Skip to content

Instantly share code, notes, and snippets.

@Luomu
Created November 24, 2018 03:08
Show Gist options
  • Save Luomu/fcb543cb50e3931a7d5545533026a0fa to your computer and use it in GitHub Desktop.
Save Luomu/fcb543cb50e3931a7d5545533026a0fa to your computer and use it in GitHub Desktop.
A simple monophonic synth using sdl/imgui
#include "imgui.h"
#include "imgui_impl_sdl.h"
#include "imgui_impl_opengl3.h"
#include <stdio.h>
#include <SDL.h>
#include <GL/gl3w.h>
#include <string>
#include <iostream>
//A simple "analog" monophonic synthesizer!
//Uses imgui for gui
//Uses SDL for window creation and audio
//All of the code in one CPP file for easier presentation, normally you'd split it.
//SDL's default audio backend is not ideal for the job as it has too much latency.
//An ASIO backend such as JACK should be used for audio applications.
//I had an experiment with ring buffering and filling the buffer in a separate thread,
//but the latency was a *lot* worse.
//Ring buffer should still be used in a stream-like situation (music) since it's
//less suspectible to audio dropping
//Synth design goals:
// monophonic (one sound at a time)
// 2 mixable oscillators that produce sqare/saw/triangle/sine wave or noise
// envelope generator (ADSR) applied to oscillator value
// lowpass and possibly other filters
//Constants
constexpr float TWOPI = 2*M_PI;
constexpr float SMALL_FLOAT = 0.000001f;
constexpr int DEFAULT_SAMPLERATE = 44100;
constexpr int DEFAULT_SAMPLES = 512;
//Forward declarations
class Synth;
struct AudioCallbackUserData;
static void AudioCallbackNonThreaded(void* UserData, Uint8* DeviceBuffer, int Length);
//Utility functions
static float lerp(float a, float b, float t)
{
return a + (b - a) * t;
}
//Types
enum WaveType
{
WaveType_Triangle,
WaveType_Sine,
WaveType_Square,
WaveType_Saw
};
//Supported keyboard notes
enum Note
{
Note_C4,
Note_CS4,
Note_D4,
Note_DS4,
Note_E4,
Note_F4,
Note_FS4,
Note_G4,
Note_GS4,
Note_A4,
Note_AS4,
Note_B4,
Note_C5,
Note_Max
};
//Note values in Hz
//Notes/values/keys could be put in one table of structs
static float noteTable[Note_Max] = {
261.63, //C4
277.18, //C#4
293.66, //D4
311.13, //D#4
329.23, //E4
349.23, //F4
369.99, //F#4
392.23, //G4
415.30, //G#4
440.00, //A4
466.16, //A#4
493.88, //B4
523.23 //C5
};
//Keyboard keys corresponding to the above notes
static const int noteKeyTable[Note_Max] = {
SDLK_z,
SDLK_s,
SDLK_x,
SDLK_d,
SDLK_c,
SDLK_v,
SDLK_g,
SDLK_b,
SDLK_h,
SDLK_n,
SDLK_j,
SDLK_m,
SDLK_COMMA
};
//Because we don't have that many parameters for our simple synth,
//They can all be stored here. UI modifies this and the synth
//reads updated parameters from this.
struct SynthParams
{
float volume;
//float pan
//int octave
WaveType osc1WaveType;
WaveType osc2WaveType;
float oscMixAmount;
//float pulse width
int osc2Detune;
float attack;
float decay;
float sustain;
float release;
float lowpassCutoff;
float lowpassResonance;
float lowpassWetness;
SynthParams()
: volume(0.8f)
, osc1WaveType(WaveType_Saw)
, osc2WaveType(WaveType_Square)
, oscMixAmount(0)
, osc2Detune(0)
, attack(0)
, decay(0)
, sustain(1)
, release(0)
, lowpassCutoff(1000)
, lowpassResonance(1)
, lowpassWetness(0)
{
}
};
struct AudioConfig
{
int samplesPerSecond;
int bytesPerSample;
};
//userdata we pass to the audio callback
//cursor was only used with ring buffering
struct AudioCallbackUserData
{
//Uint8 *buffer;
//int size;
//int readCursor;
//int writeCursor;
AudioConfig *audioConfig;
Synth *synthesizer;
int sampleIndex;
int toneHz;
int toneVolume;
float envelopeValue;
};
//Produces 3 (or more) types of waves
class Oscillator
{
public:
Oscillator()
: m_waveType(WaveType_Sine)
{
}
WaveType GetMode() { return m_waveType; }
void SetMode(WaveType t) { m_waveType = t; }
Sint16 Sample(AudioCallbackUserData* audioState, int wavePeriod)
{
switch (GetMode())
{
case WaveType_Sine:
return SampleSine(audioState, wavePeriod);
case WaveType_Square:
return SampleSquare(audioState, wavePeriod);
case WaveType_Triangle:
return SampleTriangle(audioState, wavePeriod);
case WaveType_Saw:
return SampleSaw(audioState, wavePeriod);
default:
return 0;
}
}
private:
//goes linearly from -1 to +1
Sint16 SampleTriangle(AudioCallbackUserData *audioState, int wavePeriod)
{
float t = audioState->sampleIndex % wavePeriod;
t = t / (float)wavePeriod;
return t * audioState->toneVolume;
}
//alternates between -1 and 1 without transitions
Sint16 SampleSquare(AudioCallbackUserData *audioState, int wavePeriod)
{
int halfPeriod = wavePeriod / 2;
if ((audioState->sampleIndex / halfPeriod) % 2 == 0)
return audioState->toneVolume;
return -audioState->toneVolume;
}
//oscillates smoothly between -1 and 1
Sint16 SampleSine(AudioCallbackUserData *audioState, int wavePeriod)
{
int halfPeriod = wavePeriod / 2;
return audioState->toneVolume * sin(
TWOPI * audioState->sampleIndex / halfPeriod);
}
//rises to 1, then drop to -1
Sint16 SampleSaw(AudioCallbackUserData *audioState, int wavePeriod)
{
float t = audioState->sampleIndex % wavePeriod;
t = t / (float)wavePeriod;
float v = 1.0f - t * 2;
return audioState->toneVolume * v;
}
WaveType m_waveType;
};
//Envelope has 4 stages:
//attack - time the sound takes to reach full volume
//decay - time the sound takes to decay from full to sustain level
//sustain - volume the sound stays when key is held down
//release - time the volume takes to fade from current volume to zero when key is released
class EnvelopeGenerator
{
private:
float m_attackTime;
float m_decayTime;
float m_sustainVolume;
float m_releaseTime;
bool m_triggered;
int m_currentStage;
float m_currentTime;
float m_currentVol;
float m_volumeBeforeCurrentStage;
public:
EnvelopeGenerator()
: m_currentTime(0)
, m_currentStage(4)
, m_currentVol(0)
, m_volumeBeforeCurrentStage(0)
, m_triggered(false)
{
m_attackTime = 0.f;
m_decayTime = 0.f;
m_sustainVolume = 1.f;
m_releaseTime = 0.f;
}
//returns the current envelope strength
float Evaluate()
{
return m_currentVol;
}
float GetAttack() const { return m_attackTime; }
float GetDecay() const { return m_decayTime; }
float GetSustain() const { return m_sustainVolume; }
float GetRelease() const { return m_releaseTime; }
void SetADSR(float a, float d, float s, float r)
{
m_attackTime = a;
m_decayTime = d;
m_sustainVolume = s;
m_releaseTime = r;
}
void Update(float seconds)
{
if (m_currentStage == 0)
{
m_currentVol = lerp(m_volumeBeforeCurrentStage, 1.f, m_currentTime / m_attackTime);
m_currentTime += seconds;
if (m_currentTime > m_attackTime)
{
m_currentVol = 1.f;
m_volumeBeforeCurrentStage = m_currentVol;
m_currentStage++;
m_currentTime = 0.f;
}
}
if (m_currentStage == 1)
{
m_currentVol = lerp(m_volumeBeforeCurrentStage, m_sustainVolume, m_currentTime / m_decayTime);
m_currentTime += seconds;
if (m_currentTime > m_decayTime)
{
m_currentStage++;
m_currentTime = 0.f;
}
}
if (m_currentStage == 2)
{
m_currentVol = m_sustainVolume;
}
if (m_currentStage == 3)
{
m_currentVol = lerp(m_volumeBeforeCurrentStage, 0.f, m_currentTime / m_releaseTime);
m_currentTime += seconds;
if (m_currentTime > m_releaseTime)
{
m_currentStage++;
m_currentTime = 0.f;
m_currentVol = 0.f;
//dont untrigger but stay silent
}
}
}
//When key is pressed (and previous key has been released)
void Trigger()
{
if (m_triggered) return;
m_triggered = true;
m_currentTime = 0.f;
if (m_attackTime > SMALL_FLOAT)
{
m_volumeBeforeCurrentStage = m_currentVol;
m_currentStage = 0;
}
else if (m_decayTime > SMALL_FLOAT) //jump to decay
{
m_volumeBeforeCurrentStage = 1.f;
m_currentStage = 1;
}
else //jump to sustain
m_currentStage = 2;
}
//When a key is released
void Untrigger()
{
if (m_triggered)
{
m_triggered = false;
if (m_releaseTime > SMALL_FLOAT)
{
m_volumeBeforeCurrentStage = m_currentVol;
m_currentStage = 3;
m_currentTime = 0.f;
}
else //jump to silent
{
m_currentVol = 0.f;
m_currentStage = 4;
}
}
}
};
//Handles: key down, key up, key pressed
//Used to trigger the envelope generator
class Keyboard
{
static const int MaxKeys = 256;
int m_keyPressed[MaxKeys];
bool m_keyDown[MaxKeys];
public:
Keyboard()
{
for (int i = 0; i < MaxKeys; i++)
{
m_keyDown[i] = false;
m_keyPressed[i] = 0;
}
}
void HandleSDL(const SDL_Event &event)
{
switch (event.type)
{
case SDL_KEYDOWN:
if (event.key.keysym.sym < MaxKeys)
{
m_keyDown[event.key.keysym.sym] = true;
if (m_keyPressed[event.key.keysym.sym] == 0)
m_keyPressed[event.key.keysym.sym] = 1;
else
m_keyPressed[event.key.keysym.sym] = 2;
}
break;
case SDL_KEYUP:
if (event.key.keysym.sym < MaxKeys)
{
m_keyDown[event.key.keysym.sym] = false;
m_keyPressed[event.key.keysym.sym] = 0;
}
break;
default:
break;
}
}
bool WasKeyPressed(SDL_Keycode k)
{
if (k >= 256) return false;
if (m_keyPressed[k] == 1)
{
m_keyPressed[k] = 2;
return true;
}
return false;
}
bool IsKeyDown(SDL_Keycode k)
{
if (k >= 256) return false;
return m_keyDown[k];
}
bool IsKeyUp(SDL_Keycode k)
{
if (k >= 256) return false;
return !m_keyDown[k];
}
};
//Filter base class
//Wetness = how much filter is applied to the mix
//(dry = original sound)
class Filter
{
public:
Filter()
: m_wetness(1)
{
}
virtual void DoFiltering(Sint16* buffer, int samples)
{
FilterChannel(buffer, samples, 0);
FilterChannel(buffer, samples, 1);
}
virtual void FilterChannel(Sint16 *buffer, int samples, int channel) = 0;
float GetWetness() const { return m_wetness; }
void SetWetness(float w) { m_wetness = w; }
private:
float m_wetness;
};
//Lowpass filter supporting cutoff and resonance values
//Based on github.com/jarikomppa/soloud/ which is in turn based on:
//"Using the Biquad Resonant Filter",
//Phil Burk, Game Programming Gems 3, p. 606
//Resonance doesn't quite work!
class LowpassFilter : public Filter
{
public:
LowpassFilter(int sampleRate)
: Filter()
, m_sampleRate(sampleRate)
, m_cutoff(1000)
, m_resonance(2)
{
for (int i = 0; i < 2; i++)
{
m_state[i].mX1 = 0;
m_state[i].mY1 = 0;
m_state[i].mX2 = 0;
m_state[i].mY2 = 0;
}
RecalculateParams();
}
float GetCutoff() const { return m_cutoff; }
void SetCutoff(float c) { m_cutoff = c; }
float GetResonance() const { return m_resonance; }
void SetResonance(float r) { m_resonance = r; }
float GetSampleRate() const { return m_sampleRate; }
void FilterChannel(Sint16 *buffer, int samples, int channel) override
{
//todo: should not recalculate these if nothing changed
RecalculateParams();
float x;
unsigned int i;
int c = channel;
BQRStateData &s = m_state[channel];
Sint16 *aBuffer = buffer;
for (i = 0; i < samples-1; i+=2)
{
// Generate outputs by filtering inputs.
float bufferValue = (float)aBuffer[c] / (float)INT16_MAX;
x = bufferValue;
s.mY2 = (mA0 * x) + (mA1 * s.mX1) + (mA2 * s.mX2) - (mB1 * s.mY1) - (mB2 * s.mY2);
bufferValue += (s.mY2 - bufferValue) * GetWetness();
aBuffer[c] = bufferValue * INT16_MAX;
std::cout << bufferValue << std::endl;
//skip 2 ahead since we are doing 2 samples from 1 channel
c = i + 2;
// Permute filter operations to reduce data movement.
// Just substitute variables instead of doing mX1=x, etc.
bufferValue = (float)aBuffer[c] / (float)INT16_MAX;
s.mX2 = bufferValue;
s.mY1 = (mA0 * s.mX2) + (mA1 * x) + (mA2 * s.mX1) - (mB1 * s.mY2) - (mB2 * s.mY1);
bufferValue += (s.mY1 - bufferValue) * GetWetness();
aBuffer[c] = bufferValue * INT16_MAX;
// Only move a little data.
s.mX1 = s.mX2;
s.mX2 = x;
}
}
private:
//stores some state per channel
struct BQRStateData
{
float mY1, mY2, mX1, mX2;
};
void RecalculateParams()
{
float omega = (float)((2.0f * M_PI * GetCutoff()) / GetSampleRate());
float sin_omega = (float)sin(omega);
float cos_omega = (float)cos(omega);
float alpha = sin_omega / (2.0f * GetResonance());
float scalar = 1.0f / (1.0f + alpha);
mA0 = 0.5f * (1.0f - cos_omega) * scalar;
mA1 = (1.0f - cos_omega) * scalar;
mA2 = mA0;
mB1 = -2.0f * cos_omega * scalar;
mB2 = (1.0f - alpha) * scalar;
}
BQRStateData m_state[2];
float mA0, mA1, mA2, mB1, mB2;
const float m_sampleRate;
float m_cutoff;
float m_resonance;
};
//The synthesizer.
//Owns oscillators, envelope generator, filters and synth params.
class Synth
{
public:
Synth()
: m_sustain(false)
, m_lowpassFilter(DEFAULT_SAMPLERATE)
{
}
SynthParams &GetParams() { return m_params; }
EnvelopeGenerator &GetEG() { return m_eg; }
bool GetSustain() { return m_sustain; }
void SetSustain(bool on) { m_sustain = on; }
void Update(float seconds, AudioCallbackUserData *audioState)
{
m_osc1.SetMode(m_params.osc1WaveType);
m_osc2.SetMode(m_params.osc2WaveType);
m_eg.SetADSR(m_params.attack, m_params.decay, m_params.sustain, m_params.release);
m_eg.Update(seconds);
audioState->toneVolume = INT16_MAX * m_params.volume;
audioState->envelopeValue = m_eg.Evaluate();
m_lowpassFilter.SetCutoff(m_params.lowpassCutoff);
m_lowpassFilter.SetResonance(m_params.lowpassResonance);
m_lowpassFilter.SetWetness(m_params.lowpassWetness);
}
//Sample the oscillator(s)
//Called from the audio callback or audio thread
//Filtering is done later since it's not per sample
Sint16 Sample(AudioCallbackUserData *audioState)
{
//TODO: could change oscillators to return floats if we want
//to do mixing in floats anyway
//TODO store wave period per osc (different for osc2 if its detuned)
AudioConfig *audioConfig = audioState->audioConfig;
float env = audioState->envelopeValue;
int wp1 = audioConfig->samplesPerSecond / audioState->toneHz;
int wp2 = audioConfig->samplesPerSecond / (audioState->toneHz + m_params.osc2Detune);
float osc1S = m_osc1.Sample(audioState, wp1) / (float)INT16_MAX;
float osc2S = m_osc2.Sample(audioState, wp2) / (float)INT16_MAX;
return (lerp(osc1S, osc2S, m_params.oscMixAmount) * env) * INT16_MAX;
}
void DoFiltering(Sint16 *buffer, int numSamples)
{
if (m_params.lowpassWetness > SMALL_FLOAT)
m_lowpassFilter.DoFiltering(buffer, numSamples);
}
private:
EnvelopeGenerator m_eg;
Oscillator m_osc1;
Oscillator m_osc2;
LowpassFilter m_lowpassFilter;
SynthParams m_params;
bool m_sustain;
};
//Event handling, mostly
//owns synth
class Application
{
public:
Application(SDL_Window *window)
: m_done(false)
, m_window(window)
{
}
~Application()
{
}
bool IsDone() { return m_done; }
Synth &GetSynth() { return m_synth; }
void Update(AudioCallbackUserData *audioState)
{
SDL_Event event;
while (SDL_PollEvent(&event))
{
ImGui_ImplSDL2_ProcessEvent(&event);
if (event.type == SDL_QUIT)
m_done = true;
if (event.type == SDL_WINDOWEVENT &&
event.window.event == SDL_WINDOWEVENT_CLOSE &&
event.window.windowID == SDL_GetWindowID(m_window))
{
m_done = true;
}
m_keyboard.HandleSDL(event);
}
if (m_keyboard.IsKeyDown(SDLK_ESCAPE))
m_done = true;
//Piano keys trigger the envelope generator directly
//TODO maybe call synth.trigger and pass tone hz
//so we don't modify audioconfig here
EnvelopeGenerator &eg = GetSynth().GetEG();
bool anyKeyDown = false;
for (int i = 0; i < IM_ARRAYSIZE(noteKeyTable); i++)
{
if (m_keyboard.IsKeyDown(noteKeyTable[i]))
{
anyKeyDown = true;
//we lose a bit of precision here due to float->int
audioState->toneHz = noteTable[i];
eg.Trigger();
}
}
if (m_keyboard.WasKeyPressed(SDLK_SPACE))
m_synth.SetSustain(!m_synth.GetSustain());
if (!m_synth.GetSustain() && !anyKeyDown)
eg.Untrigger();
float time = ImGui::GetIO().DeltaTime;
m_synth.Update(time, audioState);
}
void DoGUI(AudioCallbackUserData *audioState)
{
ImGui::Begin("Hello, synth!");
SynthParams &sp = m_synth.GetParams();
ImGui::SliderFloat("Volume", &sp.volume, 0, 1);
ImGui::Text("Osc1");
const char* items[] = { "Triangle", "Sine", "Square", "Saw" };
int currentWave = sp.osc1WaveType;
ImGui::Combo("Osc 1 type", &currentWave, items, IM_ARRAYSIZE(items));
sp.osc1WaveType = static_cast<WaveType>(currentWave);
currentWave = sp.osc2WaveType;
ImGui::Combo("Osc 2 type", &currentWave, items, IM_ARRAYSIZE(items));
sp.osc2WaveType = static_cast<WaveType>(currentWave);
ImGui::SliderInt("Osc 2 detune", &sp.osc2Detune, -10, 10, "%d");
ImGui::SliderFloat("Osc mix", &sp.oscMixAmount, 0.0f, 1.0f, "%.2f");
ImGui::Text("Envelope");
{
ImGui::SliderFloat("Attack", &sp.attack, 0.0f, 1.0f, "%.3f");
ImGui::SliderFloat("Decay", &sp.decay, 0.0f, 1.0f, "%.3f");
ImGui::SliderFloat("Sustain", &sp.sustain, 0.0f, 1.0f, "%.3f");
ImGui::SliderFloat("Release", &sp.release, 0.0f, 1.0f, "%.3f");
}
ImGui::Text("Lowpass filter");
ImGui::SliderFloat("Cutoff", &sp.lowpassCutoff, 10, 1000, "%.0f");
ImGui::SliderFloat("Resonance", &sp.lowpassResonance, 1, 4, "%.3f");
ImGui::SliderFloat("Wetness", &sp.lowpassWetness, 0, 1, "%.3f");
//imgui demo window is very handy so it's hidden here
static bool showDemo = false;
if (m_keyboard.WasKeyPressed(SDLK_9))
showDemo = !showDemo;
ImGui::Text(m_synth.GetSustain() ? "Sustain ON" : "Sustain OFF");
ImGui::Text("Tone %d Hz", audioState->toneHz);
//Plot envelope
{
static float values[90] = { 0 };
static int values_offset = 0;
values[values_offset] = m_synth.GetEG().Evaluate();
values_offset = (values_offset+1) % IM_ARRAYSIZE(values);
ImGui::PlotLines("Envelope", values, IM_ARRAYSIZE(values),
values_offset, nullptr, 0.0f, 1.0f, ImVec2(0,80));
}
ImGui::Text("Use keyboard to play! Z = C4, space = toggle sustain");
ImGui::End();
if (showDemo)
ImGui::ShowDemoWindow();
}
private:
bool m_done;
Keyboard m_keyboard;
Synth m_synth;
SDL_Window *m_window;
};
//SDL opengl initialization and cleanup
class GraphicDevice
{
public:
GraphicDevice()
{
// GL 3.0 + GLSL 130
SDL_GL_SetAttribute(SDL_GL_CONTEXT_FLAGS, 0);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
// Create window with graphics context
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 24);
SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, 8);
SDL_DisplayMode current;
SDL_GetCurrentDisplayMode(0, &current);
m_window = SDL_CreateWindow("Hello Synth",
SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 800, 600,
SDL_WINDOW_OPENGL|SDL_WINDOW_RESIZABLE);
m_context = SDL_GL_CreateContext(m_window);
SDL_GL_SetSwapInterval(1); // Enable vsync
// Initialize OpenGL loader
bool err = gl3wInit() != 0;
if (err)
{
throw std::runtime_error("Failed to initialize OpenGL loader");
}
}
~GraphicDevice()
{
SDL_GL_DeleteContext(m_context);
SDL_DestroyWindow(m_window);
}
SDL_Window *GetWindow() const { return m_window; }
SDL_GLContext &GetContext() { return m_context; }
const char *GetGLSLVersion() const { return "#version 130"; }
void MakeContextCurrent()
{
SDL_GL_MakeCurrent(m_window, m_context);
}
void Swap()
{
SDL_GL_SwapWindow(m_window);
}
private:
SDL_Window *m_window;
SDL_GLContext m_context;
};
//SDL audio initialization and cleanup
class AudioDevice
{
public:
AudioDevice(AudioCallbackUserData *callbackUserData)
{
SDL_AudioSpec AudioSettings = {};
AudioSettings.freq = callbackUserData->audioConfig->samplesPerSecond;
AudioSettings.format = AUDIO_S16;
AudioSettings.channels = 2;
AudioSettings.samples = DEFAULT_SAMPLES;
AudioSettings.callback = &AudioCallbackNonThreaded;
AudioSettings.userdata = callbackUserData;
SDL_AudioSpec ObtainedSettings = {};
m_deviceID = SDL_OpenAudioDevice(
NULL, 0, &AudioSettings, &ObtainedSettings,
SDL_AUDIO_ALLOW_ANY_CHANGE
);
if (AudioSettings.format != ObtainedSettings.format)
{
SDL_Log("Unable to obtain expected audio settings: %s", SDL_GetError());
throw std::runtime_error("Failed to initialize audio");
}
}
~AudioDevice()
{
SDL_CloseAudioDevice(m_deviceID);
}
void Pause() { SDL_PauseAudioDevice(m_deviceID, 1); }
void Unpause() { SDL_PauseAudioDevice(m_deviceID, 0); }
private:
SDL_AudioDeviceID m_deviceID;
};
int main(int, char**)
{
// Setup SDL
if (SDL_Init(SDL_INIT_VIDEO|SDL_INIT_TIMER|SDL_INIT_AUDIO) != 0)
{
printf("Error: %s\n", SDL_GetError());
return -1;
}
auto graphicDevice = new GraphicDevice();
// Setup Dear ImGui context
IMGUI_CHECKVERSION();
ImGui::CreateContext();
ImGuiIO& io = ImGui::GetIO();
// Setup Platform/Renderer bindings
ImGui_ImplSDL2_InitForOpenGL(graphicDevice->GetWindow(), graphicDevice->GetContext());
ImGui_ImplOpenGL3_Init(graphicDevice->GetGLSLVersion());
Application app(graphicDevice->GetWindow());
// Setup global UI style
ImGui::StyleColorsDark();
ImVec4 clear_color = ImVec4(0.45f, 0.55f, 0.60f, 1.00f);
//2 times the samples because stereo
AudioConfig audioConfig = {};
audioConfig.samplesPerSecond = DEFAULT_SAMPLERATE;
audioConfig.bytesPerSample = 2 * sizeof(Sint16);
//This is also the userdata for the audio callback, so we have
//to put some extra references here
//Used for circular buffering
AudioCallbackUserData callbackData = {};
//callbackData.size = audioConfig.samplesPerSecond * audioConfig.bytesPerSample;
//callbackData.buffer = new Uint8[AudioBuffer.size];
//memset(callbackData.buffer, 0, AudioBuffer.size);
//callbackData.readCursor = 0;
//callbackData.writeCursor = audioConfig.bytesPerSample;
callbackData.audioConfig = &audioConfig;
callbackData.synthesizer = &app.GetSynth();
callbackData.toneHz = noteTable[Note_C4];
callbackData.toneVolume = INT16_MAX * 0.9f;
auto audioDevice = new AudioDevice(&callbackData);
audioDevice->Unpause();
// Main loop
while (!app.IsDone())
{
app.Update(&callbackData);
// Start the Dear ImGui frame (doing this outside Application::DoGUI
// if we want to render secondary windows)
ImGui_ImplOpenGL3_NewFrame();
ImGui_ImplSDL2_NewFrame(graphicDevice->GetWindow());
ImGui::NewFrame();
app.DoGUI(&callbackData);
// Rendering
ImGui::Render();
graphicDevice->MakeContextCurrent();
glViewport(0, 0, (int)io.DisplaySize.x, (int)io.DisplaySize.y);
glClearColor(clear_color.x, clear_color.y, clear_color.z, clear_color.w);
glClear(GL_COLOR_BUFFER_BIT);
ImGui_ImplOpenGL3_RenderDrawData(ImGui::GetDrawData());
graphicDevice->Swap();
}
// Cleanup
ImGui_ImplOpenGL3_Shutdown();
ImGui_ImplSDL2_Shutdown();
ImGui::DestroyContext();
audioDevice->Pause();
delete audioDevice;
//delete[] AudioBuffer.buffer;
delete graphicDevice;
SDL_Quit();
return 0;
}
//Non-threaded audio callback that operates without an extra buffer
//Sample the synth once and write the same sample to both channels since we
//Don't have any panning
static void AudioCallbackNonThreaded(void* UserData, Uint8* DeviceBuffer, int bytes)
{
auto audioState = reinterpret_cast<AudioCallbackUserData*>(UserData);
Synth *synth = audioState->synthesizer;
Sint16 *buffer = reinterpret_cast<Sint16*>(DeviceBuffer);
int length = bytes / 2;
for (int i = 0; i < length-1; i+=2)
{
Sint16 sample = synth->Sample(audioState);
buffer[i] = sample;
buffer[i+1] = sample;
audioState->sampleIndex++;
}
//filter operates on the whole buffer and not individual samples
synth->DoFiltering((Sint16*)DeviceBuffer, length);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment