chapter 2

Sound synthesis techniques-methods

Spectral

  • Additive
  • Phase Vocoder
  • FFT

Abstract

  • FM
  • Waveshaping

Sampling

  • Sampling
  • Granular

Physical

  • Modal
  • Waveguide
Additive synthesis

Additive synthesis is a method in which a composite waveform is formed by summing sinusoidal components, for example, harmonics of a tone, to produce a sound (Moorer 1985). In additive synthesis, three control functions are needed for every sinusoidal oscillator: the amplitude, frequency and phase of each component. In many cases the phase is left out and only the amplitude and frequency functions are used (Tolonen et al. 1998).

/* Sound Synthesis techniques
BA 181902
Objective: create complex sounds using additive synthesis technique
// Additive Synthesis
a+b+c+d
where a to d  can be frequencies or any other object such as a waveform
*/

// firts boot the internal server and open Freqscope and Stethoscope

s = Server.internal;
s.boot
Stethoscope.new
FreqScope.new

//create harmonics of a 440Hz A note

{SinOsc.ar([440, 880, 880*3/2, 1600*4/3, 2000], 0, [0.6, 0.1, 0.09, 0.08, 0.09])}.play//or .scope

//Using Mix.ar, kr(). Sum an array of channels.

(
//This example is from The SuperCollider Book (Wilson, 2011 p.37)
{

	Mix.ar(
	Array.fill(12,
		{|count|
		var harm;
		harm = count + 1 * 110; // remeber precedence: count + 1, then * 110
			SinOsc.ar(harm, mul: max(0,  SinOsc.kr(count+1/4))
			)*1/(count+1)
})

)*0.7
	
	
}.play;
)


//Add also different waveforms i.e sawtooth waveforms

{Mix.fill(10, {SinOsc.ar(440+440.rand, 0, 0.03)}, {Saw.ar(440+440.rand, 0.01)})}.play;


// a more complex sound

{Mix.fill(10, {SinOsc.ar(440+640.rand, 0, 0.03)+Saw.ar(440+460.rand, 0.01)}, {Saw.ar(440+880.rand, 0.01)})}.play;
Phase Vocoder

The phase vocoder was developed at Bell laboratories and was first described by Flanagan and Golden (1996). All vocoders present the input signal in a particular frequency band. Vocoders simplify the complex spectral information and reduce the amount of data needed to present the signal (Tolonen et al. 1998).


// alloc a buffer for the FFT
b = Buffer.alloc(s,2048,1);
// read a sound
c = Buffer.read(s, Platform.resourceDir +/+ "sounds/a11wlk01.wav");


(
// do nothing
{
    var in, chain;
    in = PlayBuf.ar(1,c, BufRateScale.kr(c), loop:1);
    chain = FFT(b, in);
    0.5 * IFFT(chain);
}.scope(1);
)

(
// pass only magnitudes above a threshold
{
    var in, chain;
    in = PlayBuf.ar(1,c, BufRateScale.kr(c), loop:1);
    chain = FFT(b, in);
    chain = PV_MagAbove(chain, MouseX.kr(0.1,512,1));
    0.5 * IFFT(chain);
}.scope(1);
)

more examples on Spectral

Modulation synthesis

FM synthesis (frequency modulation) is a fundamental digital sound synthesis technique employing a nonlinear oscillating function. The theory of FM was well established by the mid-twentieth century for radio frequencies. The use of FM in audio frequencies for the purpose of sound synthesis was not studied until late 60's. John Chowing (the inventor of FM synthesis) (Miranda 2002) at Stanford University was the first to study systematically FM synthesis. Chowing observed that complex audio spectra can be achieved with just two sinusoidal oscillators (Tolonen et al. 1998).


/*
Modulation synthesis
Obective: modulate the frequency,the phase and the amplitude of a simple waveform using other waveforms as the modulators
*/

/*From 2009 Collins et al. Modulation Synthesis tutorial
//Use MouseX and MouseY to control the modulation

//Ring Modulation

A straight multiplication of two signals.

carrier * modulator
*/

{
//2009 Collins et al.
var carrier, modulator, carrfreq, modfreq;

carrfreq= MouseX.kr(440,5000,'exponential');
modfreq= MouseY.kr(1,5000,'exponential');

carrier= SinOsc.ar(carrfreq,0,0.5);
modulator= SinOsc.ar(modfreq,0,0.5);

carrier*modulator;
}.scope


/*
AM synthesis
The modulation is Unipolar
means always possitive (0, 1)
SinOsc.ar(440, 0, 0.5) // (-0.5, 0.5) bipolar
SinOsc.ar(440, 0, 0.5, 0.5) // (0, 1) unipolar
*/
(
{
//2009 Collins et al.
var carrier, modulator, carrfreq, modfreq;

carrfreq= MouseX.kr(440,5000,'exponential');
modfreq= MouseY.kr(1,5000,'exponential');

carrier= SinOsc.ar(carrfreq,0,0.5);
modulator= SinOsc.ar(modfreq,0,0.25, 0.25);

carrier*modulator;
}.scope
)


// another example, One possitive one negative

{SinOsc.ar(440, 0, SinOsc.ar(0.5), SinOsc.ar(0.5))}.scope

// both possitive and negative

{SinOsc.ar(440, 0, SinOsc.ar(0.25), 0.25)}.scope

// try also a MouseX variable to control the modulation of the amplitude

{
var carrier, modulator, carrfreq, modfreq, modamp;

carrfreq= MouseX.kr(440,5000,'exponential');
modfreq= MouseY.kr(1,5000,'exponential');
modamp = MouseX.kr(0.1, 1, 'exponential');
carrier= SinOsc.ar(carrfreq,0,0.5);
modulator= SinOsc.ar(modfreq,0,SinOsc.ar(modamp), SinOsc.ar(modamp));

carrier*modulator;
}.scope

/*
Fm synthesis (frequency modulation) is a technique of changing the timbre
of a waveform by modulating its frequency. "John Chowning was the first to study FM synthesis until late 60's" (Tolonen et al. 1998).
*/
(
var synth;
synth= {| carrfreq=440, modfreq=1, moddepth=0.01|
SinOsc.ar(carrfreq + MouseX.kr(moddepth, 5000, 'exponential')*SinOsc.ar(MouseY.kr(modfreq, 10000, 'exponential')),0,0.25)
}.scope;
)


// control the carrfreq as well

(
var synth;
synth= {| carrfreq=440, modfreq=1, moddepth=0.01|
SinOsc.ar(MouseX.kr(carrfreq, 10000, 'exponential') + MouseX.kr(moddepth, 5000, 'exponential')*SinOsc.ar(MouseY.kr(modfreq, 10000, 'exponential')),0,0.25)
}.scope;
)


//Create a window with sliders to control the frequency modulation (Collins 2009 et al. sc_tutorials)

(
var w, carrfreqslider, modfreqslider, moddepthslider, synth;

w=Window("frequency modulation", Rect(100, 400, 400, 300));
w.view.decorator = FlowLayout(w.view.bounds);

synth= {arg carrfreq=440, modfreq=1, moddepth=0.01;
SinOsc.ar(carrfreq + (moddepth*SinOsc.ar(modfreq)),0,0.25)
}.scope;

carrfreqslider= EZSlider(w, 300@50, "carrfreq", ControlSpec(20, 5000, 'exponential', 10, 440), {|ez|  synth.set(\carrfreq, ez.value)});
w.view.decorator.nextLine;

modfreqslider= EZSlider(w, 300@50, "modfreq", ControlSpec(1, 5000, 'exponential', 1, 1), {|ez|  synth.set(\modfreq, ez.value)});
w.view.decorator.nextLine;
moddepthslider= EZSlider(w, 300@50, "moddepth", ControlSpec(0.01, 5000, 'exponential', 0.01, 0.01), {|ez|  synth.set(\moddepth, ez.value)});

w.front;
)

//Phase Modulation
(
{
//2009 Collins et al.
var modf, ind;

modf= MouseX.kr(1,440, 'exponential');
ind=MouseY.kr(0.0,10.0);

SinOsc.ar(440, SinOsc.ar(modf,0,modf*ind),0.25)
}.scope
)
Waveshaping synthesis

Waveshaping synthesis - nonlinear distortion

Waveshaping synthesis is a simple sound synthesis method using a nonlinear shaping function to modify the input signal (Tolonen et al. 1998)

/*
BA 182202
Waveshaping examples
Objective: create a wavetable and shape the waveform
*/

s.boot;
//Fill this buffer with a series of Chebyshev polynomials, see more on Help->Buffer
b = Buffer.alloc(s, 512, 1, { |buf| buf.chebyMsg([1,0,1,1,0,1])});

(
{
    Shaper.ar(
        b,
        SinOsc.ar(300, 0, Line.kr(0,1,6)+Saw.ar(304, Line.kr(0, 1, 6))),
        0.5
    )
}.scope;
)

b.free;

//another example with sine wave series
//Fill this buffer with a series of sine wave partials using specified frequencies, amplitudes, and initial phases.
b = Buffer.alloc(s, 512, 1, { |buf| buf.sine1Msg([1,0,1,1,0,1])});
//see also sine2 and 3

(
{
    Shaper.ar(
        b,
        SinOsc.ar(300+305.rand, 0, LFNoise2.kr(0+0.15)+Saw.ar(304+30.7.rand, LFNoise2.kr(0.15))),
        0.5
    )
}.scope;
)

b.free;

//another example

b = Buffer.alloc(s, 512, 1, { |buf| buf.sine1Msg(1.0/[1,2,3,4,5,6,7,8,9,10])});
//check also with other tables.
//Combine tables
//b = Buffer.alloc(s, 512, 1, { |buf| buf.chebyMsg([1,0,1,1,0,1])});
//b = Buffer.alloc(s, 512, 1, { |buf| buf.sine1Msg([1,0,1,1,0,1])});

(
{
    Shaper.ar(
        b,
        SinOsc.ar(300+305.rand, 0, LFNoise2.kr(0+0.15.rand)+Saw.ar(304+307.rand, LFNoise2.kr(0.15.rand)))+PinkNoise.ar(0.1.rand),
        0.5
    )
}.scope;
)

b.free;
Sampling

Sampling synthesis is a method in which recordings of relatively short sounds are played back (Roads 1995). Digital sampling instruments, also called samplers, are typically used to perform pitch shifting, looping, or other modification of the original sound signal (Borin et al. 1997b) (Tolonen et al. 1998)


/*Using Buffers with Sound Files

Buffer has another class method called 'read', which reads a sound file into memory, and returns a Buffer object. Using the UGen PlayBuf, we can play the file.
*/
// read a soundfile
b = Buffer.read(s, "sounds/a11wlk01.wav");

		// now play it
		(
		x = SynthDef("tutorial-PlayBuf",{ arg out = 0, bufnum;
			Out.ar( out,
				PlayBuf.ar(1, bufnum, BufRateScale.kr(bufnum))
			)
		}).play(s,[\bufnum, b.bufnum ]);
		)
		x.free; b.free;
Granular synthesis

Granular synthesis is a set of techniques that share a common paradigm of representing sound signals by "sound atoms" or grains. Granular synthesis originated from the studies by Gabor in the lates 40's (Cavaliere and Piccialli 1997; Roads 1995). In granular synthesis the sound grain can have duration ranging from one millisecond to more than a hundred milliseconds and the waveform of the grain can be a windowed sinusoid, a sampled signal, or obtained from a physics-based model of a sound production mechanism (Cavaliere and Piccialli 1997) (Tolonen et al. 1998).

/*
GrainBuf
Granular synthesis with sound stored in a buffer.
GrainBuf.ar(numChannels: 1, trigger: 0, dur: 1, sndbuf, rate: 1, pos: 0, interp: 2, pan: 0, envbufnum: -1, maxGrains: 512, mul: 1, add: 0)

*/
s.boot;

(
var winenv;

b = Buffer.read(s, Platform.resourceDir +/+ "sounds/a11wlk01-44_1.aiff");
// a custom envelope
winenv = Env([0, 1, 0], [0.5, 0.5], [8, -8]);
z = Buffer.sendCollection(s, winenv.discretize, 1);

SynthDef(\buf_grain_test, { |out, gate = 1, amp = 1, sndbuf, envbuf|
    var pan, env, freqdev;
    // use mouse x to control panning
    pan = MouseX.kr(-1, 1);
    env = EnvGen.kr(
        Env([0, 1, 0], [1, 1], \sin, 1),
        gate,
        levelScale: amp,
        doneAction: Done.freeSelf);
    Out.ar(out,
        GrainBuf.ar(2, Impulse.kr(10), 0.1, sndbuf, LFNoise1.kr.range(0.5, 2),
            LFNoise2.kr(0.1).range(0, 1), 2, pan, envbuf) * env)
}).add;
)

// use built-in env
x = Synth(\buf_grain_test, [\sndbuf, b, \envbuf, -1])

// switch to the custom env
x.set(\envbuf, z)
x.set(\envbuf, -1);

x.set(\gate, 0);

more examples on Granular Synthesis

Physical Modeling

Digital waveguide synthesis


/*
BA102002
Physical Modeling
Digital waveguide synthesis: superposition of 2 waves (right going and left going waves)
modal synthesis (resonant modes of vibration of acoustic systems)(vibrating systems)
delay line (Using delays and filters to model the propagation of sound wave - geometry of the waveguide)
mass-spring models (signals based on masses and springs)

See more on: JA Laird., 2001. The Physical Modelling of Drums Using Digital Waveguides. University of Bristol and (Tolonen, et al. 1998)
*/

/*
Instruction:
Envelope (sound behaviour - time domain), Impulse (trigger), NoiseGen (texture), CombL (delay line)
*/


///WaveTables
//Create a table with size 512 and the amplitudes which shape the waveform. Use Sine or Chebyshev polynomial function.

a = Wavetable.sineFill(512, 1.0/[1, 2, 3, 4, 5, 6]);
a = Wavetable.chebyFill(513, [0, 0, 1]);
a = Wavetable.chebyFill(513, [0.3, -0.8, 1.1]);
a = Wavetable.sineFill(512, [0.5]);

a = a.resamp1(256);
a = a.as(Signal);
a = a.asWavetable;
// Server side
s.boot;
b = Buffer.loadCollection(s, a);
x = b.play(loop: true); // ok sounds
x.free;

/// more on wavetables
//load a wavetable and open the collection to a SoundFile
w = SoundFile.openRead("~/wavetables/WT01.aif".standardizePath);


// Create an array to load the data
a = FloatArray.newClear(w.numFrames);
w.readData(a);
w.close;
a.size;

// resamp the table to have a pow of 2 (bigger to avoid aliassing)
// in case of many diff samples choose a bigger pow of 2

a = a.resamp1(256);

// Convert the array to a Signal
a = a.as(Signal);
a.size;

// Convert it to a Wavetable
a = a.asWavetable;
a.size; //wavetable format is signal.size * 2

// load the collection into a buffer

b = Buffer.loadCollection(s, a);
//play the buffer and loop
x = b.play(loop: true);
x.free;
//use Osc interpolating wavetable oscillator. see also VOsc3 and more on Help->UGens>Generators>Deterministic
//Osc.ar(table, freq, phase, mul, add)
x = { LPF.ar(Osc.ar(b, MouseX.kr(440, 880)), SampleRate.ir/2-1000) }.play;

s.freqscope

x.free;

//see Ball, TBall, Spring Ugens on Help
//TBall
//Example:1
// mouse x controls switch of level
// mouse y controls gravity
(
{
    var t, sf;
    sf = K2A.ar(MouseX.kr > 0.5) > 0;
    t = TBall.ar(sf, MouseY.kr(0.01, 1.0, 1), 0.01);
    Pan2.ar(Ringz.ar(t * 10, 1200, 0.1), MouseX.kr(-1,1));
}.play;
)


//spring


(
{
    var inforce, outforce, freq, k, d;
    inforce = K2A.ar(MouseButton.kr(0,1,0)) > 0;
    k = MouseY.kr(0.1, 20, 1);
    d = MouseX.kr(0.00001, 0.1, 1);
    outforce = Spring.ar(inforce, k, d);
    freq = outforce * 400 + 500; // modulate frequency with the force
    SinOsc.ar(freq, 0, 0.2)
}.play;
)

Creating textures with wavetables


/*
BA 182202
Sound Textures examples
Objective: Experiment with table oscilators to create sound textures
*/

// COsc wavetable example 1:

(
b = Buffer.alloc(s, 512, 1, {| buf | buf.sine1Msg(1.0/[1,2,3,4,5,6,7,8,9,10])});
{ LPF.ar(COsc.ar(b.bufnum, [200.rand.postln, 1000.rand], 0.7, 0.25)*Saw.ar(440+444.rand, WhiteNoise.kr(0.7)), 4000) }.play;
)