Created
May 23, 2014 20:44
-
-
Save marchawkins/bcd43309f4da6f3784a8 to your computer and use it in GitHub Desktop.
Using the HTML 5 Canvas element to render out a visualization from an mp3 file. Script loads an mp3 file into an AudioContext object; 2 sound analyzers created to capture left/right audio channel data; as audio file plays, the audio channel is analyzed and written, as a gradient, to the canvas element; only works in browsers supporting the Audio…
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<div class="row"> | |
<div class="col-md-3 col-sm-3 col-xs-4"> | |
<canvas id="canvas" width="60" height="130" style="display: block;"></canvas> | |
<p><button class="btn btn-primary btn-sm" id="play-audio"><span>Play Audio <span class="glyphicon glyphicon-volume-up"></span></span></button></p> | |
</div><!-- .col --> | |
<div class="col-md-9 col-sm-9 col-xs-8"> | |
<div class="panel panel-default"> | |
<img src="/content/07-project-diane/04-interfaces/02-audio-visualization/01-audio-mp3-canvas-visualization/one-small-step.jpg" class="img-responsive img-rounded"/> | |
</div> | |
</div><!-- .col --> | |
</div><!-- .row --> | |
<script> | |
// create the audio context (chrome only for now) | |
if (! window.AudioContext) { | |
if (! window.webkitAudioContext) { | |
alert('no audiocontext found'); | |
} | |
window.AudioContext = window.webkitAudioContext; | |
} | |
var context = new AudioContext(); | |
var audioBuffer; | |
var sourceNode; | |
var splitter; | |
var analyser, analyser2; | |
var javascriptNode; | |
// get the context from the canvas to draw on | |
var ctx = $("#canvas").get()[0].getContext("2d"); | |
// create a gradient for the fill. Note the strange | |
// offset, since the gradient is calculated based on | |
// the canvas, not the specific element we draw | |
var gradient = ctx.createLinearGradient(0,0,0,130); | |
gradient.addColorStop(1,'#000000'); | |
gradient.addColorStop(0.75,'#ff0000'); | |
gradient.addColorStop(0.25,'#ffff00'); | |
gradient.addColorStop(0,'#ffffff'); | |
// load the sound | |
setupAudioNodes(); | |
loadSound("/content/07-project-diane/04-interfaces/02-audio-visualization/01-audio-mp3-canvas-visualization/one-small-step.mp3"); | |
function setupAudioNodes() { | |
// setup a javascript node | |
javascriptNode = context.createScriptProcessor(2048, 1, 1); | |
// connect to destination, else it isn't called | |
javascriptNode.connect(context.destination); | |
// setup a analyzer | |
analyser = context.createAnalyser(); | |
analyser.smoothingTimeConstant = 0.3; | |
analyser.fftSize = 1024; | |
// setup a analyzer | |
analyser2 = context.createAnalyser(); | |
analyser2.smoothingTimeConstant = 0.0; | |
analyser2.fftSize = 1024; | |
// create a buffer source node | |
sourceNode = context.createBufferSource(); | |
splitter = context.createChannelSplitter(); | |
// connect the source to the analyser and the splitter | |
sourceNode.connect(splitter); | |
// connect one of the outputs from the splitter to | |
// the analyser | |
splitter.connect(analyser,0,0); | |
splitter.connect(analyser2,1,0); | |
// connect the splitter to the javascriptnode | |
// we use the javascript node to draw at a | |
// specific interval. | |
analyser.connect(javascriptNode); | |
// splitter.connect(context.destination,0,0); | |
// splitter.connect(context.destination,0,1); | |
// and connect to destination | |
sourceNode.connect(context.destination); | |
} | |
// load the specified sound | |
function loadSound(url) { | |
var request = new XMLHttpRequest(); | |
request.open('GET', url, true); | |
request.responseType = 'arraybuffer'; | |
// When loaded decode the data | |
request.onload = function() { | |
// decode the data | |
context.decodeAudioData(request.response, function(buffer) { | |
// when the audio is decoded play the sound | |
playSound(buffer); | |
}, onError); | |
} | |
request.send(); | |
} | |
function playSound(buffer) { | |
sourceNode.buffer = buffer; | |
// sourceNode.start(0); | |
} | |
// log if an error occurs | |
function onError(e) { | |
console.log(e); | |
} | |
// when the javascript node is called | |
// we use information from the analyzer node | |
// to draw the volume | |
javascriptNode.onaudioprocess = function() { | |
// get the average for the first channel | |
var array = new Uint8Array(analyser.frequencyBinCount); | |
analyser.getByteFrequencyData(array); | |
var average = getAverageVolume(array); | |
// get the average for the second channel | |
var array2 = new Uint8Array(analyser2.frequencyBinCount); | |
analyser2.getByteFrequencyData(array2); | |
var average2 = getAverageVolume(array2); | |
// clear the current state | |
ctx.clearRect(0, 0, 60, 130); | |
// set the fill style | |
ctx.fillStyle=gradient; | |
// create the meters | |
ctx.fillRect(0,130-average,25,130); | |
ctx.fillRect(30,130-average2,25,130); | |
} | |
function getAverageVolume(array) { | |
var values = 0; | |
var average; | |
var length = array.length; | |
// get all the frequency amplitudes | |
for (var i = 0; i < length; i++) { | |
values += array[i]; | |
} | |
average = values / length; | |
return average; | |
} | |
$(document).ready(function() { | |
$('#play-audio').click(function(event){ | |
event.preventDefault(); | |
sourceNode.start(0); | |
}); | |
}); | |
</script> |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment