Atlas - SDL_emscriptenaudio.c
Home / ext / SDL / src / audio / emscripten Lines: 1 | Size: 15381 bytes [Download] [Show on GitHub] [Search similar files] [Raw] [Raw (proxy)][FILE BEGIN]1/* 2 Simple DirectMedia Layer 3 Copyright (C) 1997-2026 Sam Lantinga <[email protected]> 4 5 This software is provided 'as-is', without any express or implied 6 warranty. In no event will the authors be held liable for any damages 7 arising from the use of this software. 8 9 Permission is granted to anyone to use this software for any purpose, 10 including commercial applications, and to alter it and redistribute it 11 freely, subject to the following restrictions: 12 13 1. The origin of this software must not be misrepresented; you must not 14 claim that you wrote the original software. If you use this software 15 in a product, an acknowledgment in the product documentation would be 16 appreciated but is not required. 17 2. Altered source versions must be plainly marked as such, and must not be 18 misrepresented as being the original software. 19 3. This notice may not be removed or altered from any source distribution. 20*/ 21#include "SDL_internal.h" 22 23#ifdef SDL_AUDIO_DRIVER_EMSCRIPTEN 24 25#include "../SDL_sysaudio.h" 26#include "SDL_emscriptenaudio.h" 27 28#include <emscripten/emscripten.h> 29 30// just turn off clang-format for this whole file, this INDENT_OFF stuff on 31// each EM_ASM section is ugly. 32/* *INDENT-OFF* */ // clang-format off 33 34static Uint8 *EMSCRIPTENAUDIO_GetDeviceBuf(SDL_AudioDevice *device, int *buffer_size) 35{ 36 return device->hidden->mixbuf; 37} 38 39static bool EMSCRIPTENAUDIO_PlayDevice(SDL_AudioDevice *device, const Uint8 *buffer, int buffer_size) 40{ 41 const int framelen = SDL_AUDIO_FRAMESIZE(device->spec); 42 MAIN_THREAD_EM_ASM({ 43 /* Convert incoming buf pointer to a HEAPF32 offset. */ 44 var SDL3 = Module['SDL3']; 45 var buf = SDL3.CPtrToHeap32Index($0); 46 var numChannels = SDL3.audio_playback.currentPlaybackBuffer['numberOfChannels']; 47 for (var c = 0; c < numChannels; ++c) { 48 var channelData = SDL3.audio_playback.currentPlaybackBuffer['getChannelData'](c); 49 if (channelData.length != $1) { 50 throw 'Web Audio playback buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!'; 51 } 52 53 for (var j = 0; j < $1; ++j) { 54 channelData[j] = HEAPF32[buf + (j * numChannels + c)]; 55 } 56 } 57 }, buffer, buffer_size / framelen); 58 return true; 59} 60 61 62static void EMSCRIPTENAUDIO_FlushRecording(SDL_AudioDevice *device) 63{ 64 // Do nothing, the new data will just be dropped. 65} 66 67static int EMSCRIPTENAUDIO_RecordDevice(SDL_AudioDevice *device, void *buffer, int buflen) 68{ 69 MAIN_THREAD_EM_ASM({ 70 var SDL3 = Module['SDL3']; 71 var numChannels = SDL3.audio_recording.currentRecordingBuffer.numberOfChannels; 72 for (var c = 0; c < numChannels; ++c) { 73 var channelData = SDL3.audio_recording.currentRecordingBuffer.getChannelData(c); 74 if (channelData.length != $1) { 75 throw 'Web Audio recording buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!'; 76 } 77 78 if (numChannels == 1) { // fastpath this a little for the common (mono) case. 79 for (var j = 0; j < $1; ++j) { 80 setValue($0 + (j * 4), channelData[j], 'float'); 81 } 82 } else { 83 for (var j = 0; j < $1; ++j) { 84 setValue($0 + (((j * numChannels) + c) * 4), channelData[j], 'float'); 85 } 86 } 87 } 88 }, buffer, (buflen / sizeof(float)) / device->spec.channels); 89 90 return buflen; 91} 92 93static void EMSCRIPTENAUDIO_CloseDevice(SDL_AudioDevice *device) 94{ 95 if (!device->hidden) { 96 return; 97 } 98 99 MAIN_THREAD_EM_ASM({ 100 var SDL3 = Module['SDL3']; 101 if ($0) { 102 if (SDL3.audio_recording.silenceTimer !== undefined) { 103 clearInterval(SDL3.audio_recording.silenceTimer); 104 } 105 if (SDL3.audio_recording.stream !== undefined) { 106 var tracks = SDL3.audio_recording.stream.getAudioTracks(); 107 for (var i = 0; i < tracks.length; i++) { 108 SDL3.audio_recording.stream.removeTrack(tracks[i]); 109 } 110 } 111 if (SDL3.audio_recording.scriptProcessorNode !== undefined) { 112 SDL3.audio_recording.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {}; 113 SDL3.audio_recording.scriptProcessorNode.disconnect(); 114 } 115 if (SDL3.audio_recording.mediaStreamNode !== undefined) { 116 SDL3.audio_recording.mediaStreamNode.disconnect(); 117 } 118 SDL3.audio_recording = undefined; 119 } else { 120 if (SDL3.audio_playback.scriptProcessorNode != undefined) { 121 SDL3.audio_playback.scriptProcessorNode.disconnect(); 122 } 123 if (SDL3.audio_playback.silenceTimer !== undefined) { 124 clearInterval(SDL3.audio_playback.silenceTimer); 125 } 126 SDL3.audio_playback = undefined; 127 } 128 if ((SDL3.audioContext !== undefined) && (SDL3.audio_playback === undefined) && (SDL3.audio_recording === undefined)) { 129 SDL3.audioContext.close(); 130 SDL3.audioContext = undefined; 131 } 132 }, device->recording); 133 134 SDL_free(device->hidden->mixbuf); 135 SDL_free(device->hidden); 136 device->hidden = NULL; 137 138 SDL_AudioThreadFinalize(device); 139} 140 141EM_JS_DEPS(sdlaudio, "$autoResumeAudioContext,$dynCall"); 142 143static bool EMSCRIPTENAUDIO_OpenDevice(SDL_AudioDevice *device) 144{ 145 // based on parts of library_sdl.js 146 147 // create context 148 const bool result = MAIN_THREAD_EM_ASM_INT({ 149 var SDL3 = Module['SDL3']; 150 if (typeof(SDL3.audio_playback) === 'undefined') { 151 SDL3.audio_playback = {}; 152 } 153 if (typeof(SDL3.audio_recording) === 'undefined') { 154 SDL3.audio_recording = {}; 155 } 156 157 if (!SDL3.audioContext) { 158 if (typeof(AudioContext) !== 'undefined') { 159 SDL3.audioContext = new AudioContext(); 160 } else if (typeof(webkitAudioContext) !== 'undefined') { 161 SDL3.audioContext = new webkitAudioContext(); 162 } 163 if (SDL3.audioContext) { 164 if ((typeof navigator.userActivation) === 'undefined') { 165 autoResumeAudioContext(SDL3.audioContext); 166 } 167 } 168 } 169 return (SDL3.audioContext !== undefined); 170 }); 171 172 if (!result) { 173 return SDL_SetError("Web Audio API is not available!"); 174 } 175 176 device->spec.format = SDL_AUDIO_F32; // web audio only supports floats 177 178 // Initialize all variables that we clean on shutdown 179 device->hidden = (struct SDL_PrivateAudioData *)SDL_calloc(1, sizeof(*device->hidden)); 180 if (!device->hidden) { 181 return false; 182 } 183 184 // limit to native freq 185 device->spec.freq = MAIN_THREAD_EM_ASM_INT({ return Module['SDL3'].audioContext.sampleRate; }); 186 device->sample_frames = SDL_GetDefaultSampleFramesFromFreq(device->spec.freq) * 2; // double the buffer size, some browsers need more, and we'll just have to live with the latency. 187 188 SDL_UpdatedAudioDeviceFormat(device); 189 190 if (!device->recording) { 191 device->hidden->mixbuf = (Uint8 *)SDL_malloc(device->buffer_size); 192 if (!device->hidden->mixbuf) { 193 return false; 194 } 195 SDL_memset(device->hidden->mixbuf, device->silence_value, device->buffer_size); 196 } 197 198 if (device->recording) { 199 /* The idea is to take the recording media stream, hook it up to an 200 audio graph where we can pass it through a ScriptProcessorNode 201 to access the raw PCM samples and push them to the SDL app's 202 callback. From there, we "process" the audio data into silence 203 and forget about it. 204 205 This should, strictly speaking, use MediaRecorder for recording, but 206 this API is cleaner to use and better supported, and fires a 207 callback whenever there's enough data to fire down into the app. 208 The downside is that we are spending CPU time silencing a buffer 209 that the audiocontext uselessly mixes into any playback. On the 210 upside, both of those things are not only run in native code in 211 the browser, they're probably SIMD code, too. MediaRecorder 212 feels like it's a pretty inefficient tapdance in similar ways, 213 to be honest. */ 214 215 MAIN_THREAD_EM_ASM({ 216 var SDL3 = Module['SDL3']; 217 var have_microphone = function(stream) { 218 //console.log('SDL audio recording: we have a microphone! Replacing silence callback.'); 219 if (SDL3.audio_recording.silenceTimer !== undefined) { 220 clearInterval(SDL3.audio_recording.silenceTimer); 221 SDL3.audio_recording.silenceTimer = undefined; 222 SDL3.audio_recording.silenceBuffer = undefined 223 } 224 SDL3.audio_recording.mediaStreamNode = SDL3.audioContext.createMediaStreamSource(stream); 225 SDL3.audio_recording.scriptProcessorNode = SDL3.audioContext.createScriptProcessor($1, $0, 1); 226 SDL3.audio_recording.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) { 227 if ((SDL3 === undefined) || (SDL3.audio_recording === undefined)) { return; } 228 audioProcessingEvent.outputBuffer.getChannelData(0).fill(0.0); 229 SDL3.audio_recording.currentRecordingBuffer = audioProcessingEvent.inputBuffer; 230 dynCall('ip', $2, [$3]); 231 }; 232 SDL3.audio_recording.mediaStreamNode.connect(SDL3.audio_recording.scriptProcessorNode); 233 SDL3.audio_recording.scriptProcessorNode.connect(SDL3.audioContext.destination); 234 SDL3.audio_recording.stream = stream; 235 }; 236 237 var no_microphone = function(error) { 238 //console.log('SDL audio recording: we DO NOT have a microphone! (' + error.name + ')...leaving silence callback running.'); 239 }; 240 241 // we write silence to the audio callback until the microphone is available (user approves use, etc). 242 SDL3.audio_recording.silenceBuffer = SDL3.audioContext.createBuffer($0, $1, SDL3.audioContext.sampleRate); 243 SDL3.audio_recording.silenceBuffer.getChannelData(0).fill(0.0); 244 var silence_callback = function() { 245 SDL3.audio_recording.currentRecordingBuffer = SDL3.audio_recording.silenceBuffer; 246 dynCall('ip', $2, [$3]); 247 }; 248 249 SDL3.audio_recording.silenceTimer = setInterval(silence_callback, ($1 / SDL3.audioContext.sampleRate) * 1000); 250 251 if ((navigator.mediaDevices !== undefined) && (navigator.mediaDevices.getUserMedia !== undefined)) { 252 navigator.mediaDevices.getUserMedia({ audio: true, video: false }).then(have_microphone).catch(no_microphone); 253 } else if (navigator.webkitGetUserMedia !== undefined) { 254 navigator.webkitGetUserMedia({ audio: true, video: false }, have_microphone, no_microphone); 255 } 256 }, device->spec.channels, device->sample_frames, SDL_RecordingAudioThreadIterate, device); 257 } else { 258 // setup a ScriptProcessorNode 259 MAIN_THREAD_EM_ASM({ 260 var SDL3 = Module['SDL3']; 261 SDL3.audio_playback.scriptProcessorNode = SDL3.audioContext['createScriptProcessor']($1, 0, $0); 262 SDL3.audio_playback.scriptProcessorNode['onaudioprocess'] = function (e) { 263 if ((SDL3 === undefined) || (SDL3.audio_playback === undefined)) { return; } 264 // if we're actually running the node, we don't need the fake callback anymore, so kill it. 265 if (SDL3.audio_playback.silenceTimer !== undefined) { 266 clearInterval(SDL3.audio_playback.silenceTimer); 267 SDL3.audio_playback.silenceTimer = undefined; 268 SDL3.audio_playback.silenceBuffer = undefined; 269 } 270 SDL3.audio_playback.currentPlaybackBuffer = e['outputBuffer']; 271 dynCall('ip', $2, [$3]); 272 }; 273 274 SDL3.audio_playback.scriptProcessorNode['connect'](SDL3.audioContext['destination']); 275 276 if (SDL3.audioContext.state === 'suspended') { // uhoh, autoplay is blocked. 277 SDL3.audio_playback.silenceBuffer = SDL3.audioContext.createBuffer($0, $1, SDL3.audioContext.sampleRate); 278 SDL3.audio_playback.silenceBuffer.getChannelData(0).fill(0.0); 279 var silence_callback = function() { 280 if ((typeof navigator.userActivation) !== 'undefined') { 281 if (navigator.userActivation.hasBeenActive) { 282 SDL3.audioContext.resume(); 283 } 284 } 285 286 // the buffer that gets filled here just gets ignored, so the app can make progress 287 // and/or avoid flooding audio queues until we can actually play audio. 288 SDL3.audio_playback.currentPlaybackBuffer = SDL3.audio_playback.silenceBuffer; 289 dynCall('ip', $2, [$3]); 290 SDL3.audio_playback.currentPlaybackBuffer = undefined; 291 }; 292 293 SDL3.audio_playback.silenceTimer = setInterval(silence_callback, ($1 / SDL3.audioContext.sampleRate) * 1000); 294 } 295 }, device->spec.channels, device->sample_frames, SDL_PlaybackAudioThreadIterate, device); 296 } 297 298 return true; 299} 300 301static bool EMSCRIPTENAUDIO_Init(SDL_AudioDriverImpl *impl) 302{ 303 bool available, recording_available; 304 305 impl->OpenDevice = EMSCRIPTENAUDIO_OpenDevice; 306 impl->CloseDevice = EMSCRIPTENAUDIO_CloseDevice; 307 impl->GetDeviceBuf = EMSCRIPTENAUDIO_GetDeviceBuf; 308 impl->PlayDevice = EMSCRIPTENAUDIO_PlayDevice; 309 impl->FlushRecording = EMSCRIPTENAUDIO_FlushRecording; 310 impl->RecordDevice = EMSCRIPTENAUDIO_RecordDevice; 311 312 impl->OnlyHasDefaultPlaybackDevice = true; 313 314 // technically, this is just runs in idle time in the main thread, but it's close enough to a "thread" for our purposes. 315 impl->ProvidesOwnCallbackThread = true; 316 317 // check availability 318 available = MAIN_THREAD_EM_ASM_INT({ 319 if (typeof(AudioContext) !== 'undefined') { 320 return true; 321 } else if (typeof(webkitAudioContext) !== 'undefined') { 322 return true; 323 } 324 return false; 325 }); 326 327 if (!available) { 328 SDL_SetError("No audio context available"); 329 } 330 331 recording_available = available && MAIN_THREAD_EM_ASM_INT({ 332 if ((typeof(navigator.mediaDevices) !== 'undefined') && (typeof(navigator.mediaDevices.getUserMedia) !== 'undefined')) { 333 return true; 334 } else if (typeof(navigator.webkitGetUserMedia) !== 'undefined') { 335 return true; 336 } 337 return false; 338 }); 339 340 impl->HasRecordingSupport = recording_available; 341 impl->OnlyHasDefaultRecordingDevice = recording_available; 342 343 return available; 344} 345 346AudioBootStrap EMSCRIPTENAUDIO_bootstrap = { 347 "emscripten", "SDL emscripten audio driver", EMSCRIPTENAUDIO_Init, false, false 348}; 349 350/* *INDENT-ON* */ // clang-format on 351 352#endif // SDL_AUDIO_DRIVER_EMSCRIPTEN 353[FILE END](C) 2025 0x4248 (C) 2025 4248 Media and 4248 Systems, All part of 0x4248 See LICENCE files for more information. Not all files are by 0x4248 always check Licencing.