-
-
Save mwiemarc/68c714d64adf9a472de404750e12f86e to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<canvas id="canvas" width="512" height="256" ></canvas> | |
<p id="controls"> | |
<input type="button" id="start_button" value="Start"> | |
| |
<input type="button" id="stop_button" value="Stop"> | |
</p> | |
<!-- ----------------------------------------------------- --> | |
<style> | |
#canvas { | |
margin-left: auto; | |
margin-right: auto; | |
display: block; | |
background-color: black; | |
} | |
#controls { | |
text-align: center; | |
} | |
#start_button, #stop_button { | |
font-size: 16pt; | |
} | |
</style> | |
<!-- ----------------------------------------------------- --> | |
<script type="text/javascript"> | |
// Hacks to deal with different function names in different browsers | |
window.requestAnimFrame = (function(){ | |
return window.requestAnimationFrame || | |
window.webkitRequestAnimationFrame || | |
window.mozRequestAnimationFrame || | |
function(callback, element){ | |
window.setTimeout(callback, 1000 / 60); | |
}; | |
})(); | |
window.AudioContext = (function(){ | |
return window.webkitAudioContext || window.AudioContext || window.mozAudioContext; | |
})(); | |
// Global Variables for Audio | |
var audioContext; | |
var audioBuffer; | |
var sourceNode; | |
var analyserNode; | |
var javascriptNode; | |
var audioData = null; | |
var audioPlaying = false; | |
var sampleSize = 1024; // number of samples to collect before analyzing data | |
var fftSize = 1024; // must be power of two // ** | |
var frequencyArray; // array to hold frequency data | |
// This must be hosted on the same server as this page - otherwise you get a Cross Site Scripting error | |
var audioUrl = "/assets/Doctor_Who_theme_excerpt.ogg"; | |
// Global Variables for the Graphics | |
var canvasWidth = 512; | |
var canvasHeight = 256; | |
var ctx; | |
var gradient; // ** | |
$(document).ready(function() { | |
ctx = $("#canvas").get()[0].getContext("2d"); | |
gradient = ctx.createLinearGradient(0,0,512,0); // ** | |
gradient.addColorStop(0.0,'#ff0000'); // ** | |
gradient.addColorStop(0.25,'#ffff00'); // ** | |
gradient.addColorStop(0.5,'#ffffff'); // ** | |
gradient.addColorStop(1.0,'#ffffff'); // ** | |
// the AudioContext is the primary 'container' for all your audio node objects | |
try { | |
audioContext = new AudioContext(); | |
} catch(e) { | |
alert('Web Audio API is not supported in this browser'); | |
} | |
// When the Start button is clicked, finish setting up the audio nodes, play the sound, | |
// gather samples for the analysis, update the canvas | |
$("#start_button").on('click', function(e) { | |
e.preventDefault(); | |
// Set up the audio Analyser, the Source Buffer and javascriptNode | |
setupAudioNodes(); | |
// setup the event handler that is triggered every time enough samples have been collected | |
// trigger the audio analysis and draw the results | |
javascriptNode.onaudioprocess = function () { | |
// get the Frequency Domain data for this sample | |
analyserNode.getByteFrequencyData(frequencyArray); // ** | |
// draw the display if the audio is playing | |
if (audioPlaying == true) { | |
requestAnimFrame(drawFrequencyDomain); // ** | |
} | |
} | |
// Load the Audio the first time through, otherwise play it from the buffer | |
if(audioData == null) { | |
loadSound(audioUrl); | |
} else { | |
playSound(audioData); | |
} | |
}); | |
// Stop the audio playing | |
$("#stop_button").on('click', function(e) { | |
e.preventDefault(); | |
sourceNode.stop(0); | |
audioPlaying = false; | |
}); | |
}); | |
function setupAudioNodes() { | |
sourceNode = audioContext.createBufferSource(); | |
analyserNode = audioContext.createAnalyser(); | |
analyserNode.smoothingTimeConstant = 0.0; // ** | |
analyserNode.fftSize = fftSize; // ** | |
javascriptNode = audioContext.createScriptProcessor(sampleSize, 1, 1); | |
// Create the array for the data values | |
frequencyArray = new Uint8Array(analyserNode.frequencyBinCount); // ** | |
// Now connect the nodes together | |
sourceNode.connect(audioContext.destination); | |
sourceNode.connect(analyserNode); | |
analyserNode.connect(javascriptNode); | |
javascriptNode.connect(audioContext.destination); | |
} | |
// Load the audio from the URL via Ajax and store it in global variable audioData | |
// Note that the audio load is asynchronous | |
function loadSound(url) { | |
var request = new XMLHttpRequest(); | |
request.open('GET', url, true); | |
request.responseType = 'arraybuffer'; | |
// When loaded, decode the data and play the sound | |
request.onload = function () { | |
audioContext.decodeAudioData(request.response, function (buffer) { | |
audioData = buffer; | |
playSound(audioData); | |
}, onError); | |
} | |
request.send(); | |
} | |
// Play the audio and loop until stopped | |
function playSound(buffer) { | |
sourceNode.buffer = buffer; | |
sourceNode.start(0); // Play the sound now | |
sourceNode.loop = true; | |
audioPlaying = true; | |
} | |
function onError(e) { | |
console.log(e); | |
} | |
function drawFrequencyDomain() { | |
clearCanvas(); | |
for (var i = 0; i < frequencyArray.length; i++) { | |
ctx.fillStyle = '#000000'; // ** | |
var y = canvasHeight - Math.round(frequencyArray[i]); // ** | |
ctx.fillRect(i,0,1,y); // ** | |
} | |
} | |
function clearCanvas() { | |
ctx.fillStyle = gradient; // ** | |
ctx.fillRect(0, 0, canvasWidth, canvasHeight); | |
} | |
</script> | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment