Skip to content

Instantly share code, notes, and snippets.

@hayleigh-dot-dev
Created May 27, 2019 15:21
Show Gist options
  • Save hayleigh-dot-dev/0d7f9b9ee423da4133ed47a9a5f7d5aa to your computer and use it in GitHub Desktop.
Save hayleigh-dot-dev/0d7f9b9ee423da4133ed47a9a5f7d5aa to your computer and use it in GitHub Desktop.
/* global AudioContext */
import Program from 'program-instrument'
import * as Node from 'audio-node'
import * as Prop from 'audio-property'
import * as Note from 'music-note'
import * as Time from 'music-time'
import * as Html from 'html-element'
import * as Attr from 'html-attribute'
import * as Event from 'html-event'
const audioContext = new AudioContext()
// The WebAudio context usually starts in a suspended state. It requires some
// user interaction event before it can be resumed, so here we just register
// a simple click event handler on the window to resume it as soon as possible.
window.onclick = () => audioContext.state === 'suspended'
? audioContext.resume()
: void 0
// Model ======================================================================
// This is a simple helper function to create a step sequencer of some fixed.
// length and midi note number. Easier than typing it out by hand! Steps can
// either be true or false to indicate where they are active or not.
const sequence = (note, length) => (
{ note, steps: Array.from({ length }, () => false) }
)
const model = {
sequences: [
sequence(67, 16),
sequence(65, 16),
sequence(63, 16),
sequence(60, 16),
sequence(58, 16),
sequence(55, 16),
sequence(53, 16),
sequence(51, 16),
sequence(48, 16)
],
step: 0,
stepTime: Time.tosec(120, Time.Sixteenth),
tempo: 120, // beats per minute
currentTime: 0, // AudioContext time, in seconds
running: false
}
// Update =====================================================================
//
const Actions = {
PLAY_SEQ: 'PLAY_SEQ',
NEXT_STEP: 'NEXT_STEP',
STOP_SEQ: 'STOP_SEQ',
TOGGLE_STEP: 'TOGGLE_STEP'
}
//
const update = ([action, ...data], model) => {
switch (action) {
case Actions.PLAY_SEQ: {
const [ currentTime ] = data
// Ignore multiple PLAY actions if the sequencer
// is already currently running.
return !model.running
? [{ ...model, running: true, currentTime }, scheduleNextStep]
: [{ ...model }]
}
case Actions.NEXT_STEP: {
const [ currentTime ] = data
// Check if the sequencer is currently running. This is necessary because
// of the async nature of the scheduleNextStep effect. Otherwise it is
// very possible to stop the sequencer and then have scheduleNextStep dispatch
// one last NEXT_STEP action, causing the sequencer to "jump" one step forward
// after every stop.
return model.running
? [{ ...model, step: (model.step + 1) % 16, currentTime }, scheduleNextStep]
: [{ ...model }]
}
case Actions.STOP_SEQ: {
return [{ ...model, running: false }]
}
case Actions.RESET: {
return [{ ...model,
running: false,
// Map each sequence, and each step in that sequence to turn every step off.
sequences: model.sequences.map(({ note, steps }) => ({ note, steps: steps.map(_ => false) })),
step: 0
}]
}
case Actions.TOGGLE_STEP: {
const [ seq, step ] = data
const sequences = model.sequences.map(({ note, steps }, i) => {
// It is necessary tp parse seq and step into ints because HTML attributes
// are always stored as strings.
return i === parseInt(seq)
? { note, steps: steps.map((active, j) => j === parseInt(step) ? !active : active) }
: ({ note, steps })
})
return [{ ...model, sequences }]
}
}
}
// "schedule" is probably a disingenuous term for this, as no
// real scheduling happens. Constantly poll the audio context
// until its currentTime is equal to the time needed to trigger
// the next step (allowing for some lookahead).
const scheduleNextStep = (dispatch, model) => {
const targetTime = model.currentTime + model.stepTime
// A certain amount of lookahead is necessary to compensate
// for the relatively inaccurate nature of setInterval.
const lookahead = 0.1 // 100ms, this should probably be smaller
const interval = setInterval(() => {
const contextTime = audioContext.currentTime
if (contextTime >= targetTime - lookahead) {
// By allowing for some lookahead, we may reach
// this block "early". Here, we calculate the time
// difference between now and the actual target time
// so we can compensate for it when we report the
// current time back to the runtime.
const diff = targetTime - contextTime
clearInterval(interval)
dispatch([ Actions.NEXT_STEP, contextTime + diff ])
}
}, 25)
}
// Audio ======================================================================
//
const audio = model => {
const attack = model.stepTime / 10
const decay = model.stepTime / 10 * 9
return model.sequences.map(sequence => {
const note = Note.mtof(sequence.note)
const gain = sequence.steps[model.step] ? 0.5 : 0
const envelope = Prop.linearRampToValuesAtTime(Prop.gain, [
[ gain, model.currentTime + attack ], // attack
[ 0, model.currentTime + attack + decay ] // decay
])
return Node.oscillator([ Prop.frequency(note) ], [
Node.gain(envelope, [
Node.dac()
])
])
})
}
// View =======================================================================
//
const sequenceView = (sequence, i, currentStep) => {
// The background colour can be one of three shades of grey, dependent on a
// few factors. If the step has been activated, it is the darkest of the three
// shades. If not, if this step is the current step of the sequencer it is a
// middle shade of grey. Finally, if neither of these conditions, it is a light
// grey.
const bg = (active, step) => active
? 'bg-gray-500'
: step === currentStep ? 'bg-gray-300' : 'bg-gray-200'
return Html.div([ Attr.className('flex mb-2 mt-2 -mx-1') ], [
...sequence.steps.map((active, j) =>
Html.div([ Attr.className('step flex-1 px-1 h-12') ], [
Html.div([ Attr.className(`${bg(active, j)} hover:bg-gray-600 h-12`), Attr.dataCustom('sequence', i), Attr.dataCustom('step', j) ])
])
)
])
}
//
const view = model => {
return Html.div([ Attr.className('container mx-auto') ], [
// We're using the Tailwind utility css library to style this app, so
// forgive the long class list.
Html.button([ Attr.id('play'), Attr.className('bg-white hover:bg-gray-100 text-gray-800 font-semibold mr-2 py-2 px-4 border border-gray-400 rounded shadow') ],
[ 'play' ]),
Html.button([ Attr.id('stop'), Attr.className('bg-white hover:bg-gray-100 text-gray-800 font-semibold mr-2 py-2 px-4 border border-gray-400 rounded shadow') ],
[ 'stop' ]),
Html.button([ Attr.id('reset'), Attr.className('bg-white hover:bg-gray-100 text-gray-800 font-semibold py-2 px-4 border border-gray-400 rounded shadow') ],
[ 'reset' ]),
...model.sequences.map((sequence, i) => sequenceView(sequence, i, model.step))
])
}
// Listen =====================================================================
//
const listen = model => {
return [
// Listeners are a good place to grab variables to bring
// into the pure Flor core. Here we grab the audio context
// current time to schedule the first note in the sequence
// to play right now.
Event.click('#play', _ => [ Actions.PLAY_SEQ, audioContext.currentTime ]),
Event.click('#stop', _ => [ Actions.STOP_SEQ ]),
Event.click('#reset', _ => [ Actions.RESET ]),
// Making good use of css selectors and custom data attributes,
// we are able to listen to every step in each sequence and know
// exactly which one has been clicked on.
Event.click('.step', e => [
Actions.TOGGLE_STEP,
e.target.dataset.sequence,
e.target.dataset.step
])
]
}
// Program ====================================================================
//
const App = Program({ model, audio, view, update, listen })
//
window.onload = () => {
App.start({
context: audioContext,
root: document.querySelector('#app')
})
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment