Created
March 3, 2019 16:19
-
-
Save nchase/afc1e9efc9c945b6f2972b90b92cfb75 to your computer and use it in GitHub Desktop.
Container for CreateAudioContext: returns an audioContext, audioSources (e.g. media elements), and an analyser node that all sources are connected to.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import createAudioContext from 'ios-safe-audio-context'; | |
export async function createAudioContextContainer() { | |
// createAudioContext (this could be some other implementation/polyfill, | |
// so long as it returns an audioContext): | |
const audioContext = createAudioContext(); | |
const audioSources = []; | |
const analyserNode = audioContext.createAnalyser(); | |
analyserNode.fftSize = Math.pow(2, 11); | |
// find all `<audio>` elements on the page: | |
for (const source of document.querySelectorAll('audio')) { | |
// create a source of type audioNode for each found `<audio>` element[^1][^2]: | |
const audioNode = audioContext.createMediaElementSource(source); | |
// connect each audioNode to the context's output: | |
audioNode.connect(audioContext.destination); | |
// and also send each audioNode's data to our analyser: | |
audioNode.connect(analyserNode); | |
audioSources.push({ | |
mediaElement: source, | |
audioNode, | |
}); | |
} | |
// give the context, a list of interactive <audio> elements, | |
// and our data emitter to the caller: | |
return { audioContext, audioSources, analyserNode }; | |
} | |
// [^1]: MDN's WebAudio API Documentation: https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API | |
// [^2]: AudioNode Specification: https://webaudio.github.io/web-audio-api/#audionode |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment