Atlas - SDL_emscriptenaudio.c

Home / ext / SDL / src / audio / emscripten Lines: 1 | Size: 15414 bytes [Download] [Show on GitHub] [Search similar files] [Raw] [Raw (proxy)]
[FILE BEGIN]
1/* 2 Simple DirectMedia Layer 3 Copyright (C) 1997-2025 Sam Lantinga <[email protected]> 4 5 This software is provided 'as-is', without any express or implied 6 warranty. In no event will the authors be held liable for any damages 7 arising from the use of this software. 8 9 Permission is granted to anyone to use this software for any purpose, 10 including commercial applications, and to alter it and redistribute it 11 freely, subject to the following restrictions: 12 13 1. The origin of this software must not be misrepresented; you must not 14 claim that you wrote the original software. If you use this software 15 in a product, an acknowledgment in the product documentation would be 16 appreciated but is not required. 17 2. Altered source versions must be plainly marked as such, and must not be 18 misrepresented as being the original software. 19 3. This notice may not be removed or altered from any source distribution. 20*/ 21#include "SDL_internal.h" 22 23#ifdef SDL_AUDIO_DRIVER_EMSCRIPTEN 24 25#include "../SDL_sysaudio.h" 26#include "SDL_emscriptenaudio.h" 27 28#include <emscripten/emscripten.h> 29 30// just turn off clang-format for this whole file, this INDENT_OFF stuff on 31// each EM_ASM section is ugly. 32/* *INDENT-OFF* */ // clang-format off 33 34static Uint8 *EMSCRIPTENAUDIO_GetDeviceBuf(SDL_AudioDevice *device, int *buffer_size) 35{ 36 return device->hidden->mixbuf; 37} 38 39static bool EMSCRIPTENAUDIO_PlayDevice(SDL_AudioDevice *device, const Uint8 *buffer, int buffer_size) 40{ 41 const int framelen = SDL_AUDIO_FRAMESIZE(device->spec); 42 MAIN_THREAD_EM_ASM({ 43 /* Convert incoming buf pointer to a HEAPF32 offset. */ 44 #ifdef __wasm64__ 45 var buf = $0 / 4; 46 #else 47 var buf = $0 >>> 2; 48 #endif 49 50 var SDL3 = Module['SDL3']; 51 var numChannels = SDL3.audio_playback.currentPlaybackBuffer['numberOfChannels']; 52 for (var c = 0; c < numChannels; ++c) { 53 var channelData = SDL3.audio_playback.currentPlaybackBuffer['getChannelData'](c); 54 if (channelData.length != $1) { 55 throw 'Web Audio playback buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!'; 56 } 57 58 for (var j = 0; j < $1; ++j) { 59 channelData[j] = HEAPF32[buf + (j * numChannels + c)]; 60 } 61 } 62 }, buffer, buffer_size / framelen); 63 return true; 64} 65 66 67static void EMSCRIPTENAUDIO_FlushRecording(SDL_AudioDevice *device) 68{ 69 // Do nothing, the new data will just be dropped. 70} 71 72static int EMSCRIPTENAUDIO_RecordDevice(SDL_AudioDevice *device, void *buffer, int buflen) 73{ 74 MAIN_THREAD_EM_ASM({ 75 var SDL3 = Module['SDL3']; 76 var numChannels = SDL3.audio_recording.currentRecordingBuffer.numberOfChannels; 77 for (var c = 0; c < numChannels; ++c) { 78 var channelData = SDL3.audio_recording.currentRecordingBuffer.getChannelData(c); 79 if (channelData.length != $1) { 80 throw 'Web Audio recording buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!'; 81 } 82 83 if (numChannels == 1) { // fastpath this a little for the common (mono) case. 84 for (var j = 0; j < $1; ++j) { 85 setValue($0 + (j * 4), channelData[j], 'float'); 86 } 87 } else { 88 for (var j = 0; j < $1; ++j) { 89 setValue($0 + (((j * numChannels) + c) * 4), channelData[j], 'float'); 90 } 91 } 92 } 93 }, buffer, (buflen / sizeof(float)) / device->spec.channels); 94 95 return buflen; 96} 97 98static void EMSCRIPTENAUDIO_CloseDevice(SDL_AudioDevice *device) 99{ 100 if (!device->hidden) { 101 return; 102 } 103 104 MAIN_THREAD_EM_ASM({ 105 var SDL3 = Module['SDL3']; 106 if ($0) { 107 if (SDL3.audio_recording.silenceTimer !== undefined) { 108 clearInterval(SDL3.audio_recording.silenceTimer); 109 } 110 if (SDL3.audio_recording.stream !== undefined) { 111 var tracks = SDL3.audio_recording.stream.getAudioTracks(); 112 for (var i = 0; i < tracks.length; i++) { 113 SDL3.audio_recording.stream.removeTrack(tracks[i]); 114 } 115 } 116 if (SDL3.audio_recording.scriptProcessorNode !== undefined) { 117 SDL3.audio_recording.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {}; 118 SDL3.audio_recording.scriptProcessorNode.disconnect(); 119 } 120 if (SDL3.audio_recording.mediaStreamNode !== undefined) { 121 SDL3.audio_recording.mediaStreamNode.disconnect(); 122 } 123 SDL3.audio_recording = undefined; 124 } else { 125 if (SDL3.audio_playback.scriptProcessorNode != undefined) { 126 SDL3.audio_playback.scriptProcessorNode.disconnect(); 127 } 128 if (SDL3.audio_playback.silenceTimer !== undefined) { 129 clearInterval(SDL3.audio_playback.silenceTimer); 130 } 131 SDL3.audio_playback = undefined; 132 } 133 if ((SDL3.audioContext !== undefined) && (SDL3.audio_playback === undefined) && (SDL3.audio_recording === undefined)) { 134 SDL3.audioContext.close(); 135 SDL3.audioContext = undefined; 136 } 137 }, device->recording); 138 139 SDL_free(device->hidden->mixbuf); 140 SDL_free(device->hidden); 141 device->hidden = NULL; 142 143 SDL_AudioThreadFinalize(device); 144} 145 146EM_JS_DEPS(sdlaudio, "$autoResumeAudioContext,$dynCall"); 147 148static bool EMSCRIPTENAUDIO_OpenDevice(SDL_AudioDevice *device) 149{ 150 // based on parts of library_sdl.js 151 152 // create context 153 const bool result = MAIN_THREAD_EM_ASM_INT({ 154 if (typeof(Module['SDL3']) === 'undefined') { 155 Module['SDL3'] = {}; 156 } 157 var SDL3 = Module['SDL3']; 158 SDL3.audio_playback = {}; 159 SDL3.audio_recording = {}; 160 161 if (!SDL3.audioContext) { 162 if (typeof(AudioContext) !== 'undefined') { 163 SDL3.audioContext = new AudioContext(); 164 } else if (typeof(webkitAudioContext) !== 'undefined') { 165 SDL3.audioContext = new webkitAudioContext(); 166 } 167 if (SDL3.audioContext) { 168 if ((typeof navigator.userActivation) === 'undefined') { 169 autoResumeAudioContext(SDL3.audioContext); 170 } 171 } 172 } 173 return (SDL3.audioContext !== undefined); 174 }, device->recording); 175 176 if (!result) { 177 return SDL_SetError("Web Audio API is not available!"); 178 } 179 180 device->spec.format = SDL_AUDIO_F32; // web audio only supports floats 181 182 // Initialize all variables that we clean on shutdown 183 device->hidden = (struct SDL_PrivateAudioData *)SDL_calloc(1, sizeof(*device->hidden)); 184 if (!device->hidden) { 185 return false; 186 } 187 188 // limit to native freq 189 device->spec.freq = MAIN_THREAD_EM_ASM_INT({ return Module['SDL3'].audioContext.sampleRate; }); 190 device->sample_frames = SDL_GetDefaultSampleFramesFromFreq(device->spec.freq) * 2; // double the buffer size, some browsers need more, and we'll just have to live with the latency. 191 192 SDL_UpdatedAudioDeviceFormat(device); 193 194 if (!device->recording) { 195 device->hidden->mixbuf = (Uint8 *)SDL_malloc(device->buffer_size); 196 if (!device->hidden->mixbuf) { 197 return false; 198 } 199 SDL_memset(device->hidden->mixbuf, device->silence_value, device->buffer_size); 200 } 201 202 if (device->recording) { 203 /* The idea is to take the recording media stream, hook it up to an 204 audio graph where we can pass it through a ScriptProcessorNode 205 to access the raw PCM samples and push them to the SDL app's 206 callback. From there, we "process" the audio data into silence 207 and forget about it. 208 209 This should, strictly speaking, use MediaRecorder for recording, but 210 this API is cleaner to use and better supported, and fires a 211 callback whenever there's enough data to fire down into the app. 212 The downside is that we are spending CPU time silencing a buffer 213 that the audiocontext uselessly mixes into any playback. On the 214 upside, both of those things are not only run in native code in 215 the browser, they're probably SIMD code, too. MediaRecorder 216 feels like it's a pretty inefficient tapdance in similar ways, 217 to be honest. */ 218 219 MAIN_THREAD_EM_ASM({ 220 var SDL3 = Module['SDL3']; 221 var have_microphone = function(stream) { 222 //console.log('SDL audio recording: we have a microphone! Replacing silence callback.'); 223 if (SDL3.audio_recording.silenceTimer !== undefined) { 224 clearInterval(SDL3.audio_recording.silenceTimer); 225 SDL3.audio_recording.silenceTimer = undefined; 226 SDL3.audio_recording.silenceBuffer = undefined 227 } 228 SDL3.audio_recording.mediaStreamNode = SDL3.audioContext.createMediaStreamSource(stream); 229 SDL3.audio_recording.scriptProcessorNode = SDL3.audioContext.createScriptProcessor($1, $0, 1); 230 SDL3.audio_recording.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) { 231 if ((SDL3 === undefined) || (SDL3.audio_recording === undefined)) { return; } 232 audioProcessingEvent.outputBuffer.getChannelData(0).fill(0.0); 233 SDL3.audio_recording.currentRecordingBuffer = audioProcessingEvent.inputBuffer; 234 dynCall('ip', $2, [$3]); 235 }; 236 SDL3.audio_recording.mediaStreamNode.connect(SDL3.audio_recording.scriptProcessorNode); 237 SDL3.audio_recording.scriptProcessorNode.connect(SDL3.audioContext.destination); 238 SDL3.audio_recording.stream = stream; 239 }; 240 241 var no_microphone = function(error) { 242 //console.log('SDL audio recording: we DO NOT have a microphone! (' + error.name + ')...leaving silence callback running.'); 243 }; 244 245 // we write silence to the audio callback until the microphone is available (user approves use, etc). 246 SDL3.audio_recording.silenceBuffer = SDL3.audioContext.createBuffer($0, $1, SDL3.audioContext.sampleRate); 247 SDL3.audio_recording.silenceBuffer.getChannelData(0).fill(0.0); 248 var silence_callback = function() { 249 SDL3.audio_recording.currentRecordingBuffer = SDL3.audio_recording.silenceBuffer; 250 dynCall('ip', $2, [$3]); 251 }; 252 253 SDL3.audio_recording.silenceTimer = setInterval(silence_callback, ($1 / SDL3.audioContext.sampleRate) * 1000); 254 255 if ((navigator.mediaDevices !== undefined) && (navigator.mediaDevices.getUserMedia !== undefined)) { 256 navigator.mediaDevices.getUserMedia({ audio: true, video: false }).then(have_microphone).catch(no_microphone); 257 } else if (navigator.webkitGetUserMedia !== undefined) { 258 navigator.webkitGetUserMedia({ audio: true, video: false }, have_microphone, no_microphone); 259 } 260 }, device->spec.channels, device->sample_frames, SDL_RecordingAudioThreadIterate, device); 261 } else { 262 // setup a ScriptProcessorNode 263 MAIN_THREAD_EM_ASM({ 264 var SDL3 = Module['SDL3']; 265 SDL3.audio_playback.scriptProcessorNode = SDL3.audioContext['createScriptProcessor']($1, 0, $0); 266 SDL3.audio_playback.scriptProcessorNode['onaudioprocess'] = function (e) { 267 if ((SDL3 === undefined) || (SDL3.audio_playback === undefined)) { return; } 268 // if we're actually running the node, we don't need the fake callback anymore, so kill it. 269 if (SDL3.audio_playback.silenceTimer !== undefined) { 270 clearInterval(SDL3.audio_playback.silenceTimer); 271 SDL3.audio_playback.silenceTimer = undefined; 272 SDL3.audio_playback.silenceBuffer = undefined; 273 } 274 SDL3.audio_playback.currentPlaybackBuffer = e['outputBuffer']; 275 dynCall('ip', $2, [$3]); 276 }; 277 278 SDL3.audio_playback.scriptProcessorNode['connect'](SDL3.audioContext['destination']); 279 280 if (SDL3.audioContext.state === 'suspended') { // uhoh, autoplay is blocked. 281 SDL3.audio_playback.silenceBuffer = SDL3.audioContext.createBuffer($0, $1, SDL3.audioContext.sampleRate); 282 SDL3.audio_playback.silenceBuffer.getChannelData(0).fill(0.0); 283 var silence_callback = function() { 284 if ((typeof navigator.userActivation) !== 'undefined') { 285 if (navigator.userActivation.hasBeenActive) { 286 SDL3.audioContext.resume(); 287 } 288 } 289 290 // the buffer that gets filled here just gets ignored, so the app can make progress 291 // and/or avoid flooding audio queues until we can actually play audio. 292 SDL3.audio_playback.currentPlaybackBuffer = SDL3.audio_playback.silenceBuffer; 293 dynCall('ip', $2, [$3]); 294 SDL3.audio_playback.currentPlaybackBuffer = undefined; 295 }; 296 297 SDL3.audio_playback.silenceTimer = setInterval(silence_callback, ($1 / SDL3.audioContext.sampleRate) * 1000); 298 } 299 }, device->spec.channels, device->sample_frames, SDL_PlaybackAudioThreadIterate, device); 300 } 301 302 return true; 303} 304 305static bool EMSCRIPTENAUDIO_Init(SDL_AudioDriverImpl *impl) 306{ 307 bool available, recording_available; 308 309 impl->OpenDevice = EMSCRIPTENAUDIO_OpenDevice; 310 impl->CloseDevice = EMSCRIPTENAUDIO_CloseDevice; 311 impl->GetDeviceBuf = EMSCRIPTENAUDIO_GetDeviceBuf; 312 impl->PlayDevice = EMSCRIPTENAUDIO_PlayDevice; 313 impl->FlushRecording = EMSCRIPTENAUDIO_FlushRecording; 314 impl->RecordDevice = EMSCRIPTENAUDIO_RecordDevice; 315 316 impl->OnlyHasDefaultPlaybackDevice = true; 317 318 // technically, this is just runs in idle time in the main thread, but it's close enough to a "thread" for our purposes. 319 impl->ProvidesOwnCallbackThread = true; 320 321 // check availability 322 available = MAIN_THREAD_EM_ASM_INT({ 323 if (typeof(AudioContext) !== 'undefined') { 324 return true; 325 } else if (typeof(webkitAudioContext) !== 'undefined') { 326 return true; 327 } 328 return false; 329 }); 330 331 if (!available) { 332 SDL_SetError("No audio context available"); 333 } 334 335 recording_available = available && MAIN_THREAD_EM_ASM_INT({ 336 if ((typeof(navigator.mediaDevices) !== 'undefined') && (typeof(navigator.mediaDevices.getUserMedia) !== 'undefined')) { 337 return true; 338 } else if (typeof(navigator.webkitGetUserMedia) !== 'undefined') { 339 return true; 340 } 341 return false; 342 }); 343 344 impl->HasRecordingSupport = recording_available; 345 impl->OnlyHasDefaultRecordingDevice = recording_available; 346 347 return available; 348} 349 350AudioBootStrap EMSCRIPTENAUDIO_bootstrap = { 351 "emscripten", "SDL emscripten audio driver", EMSCRIPTENAUDIO_Init, false, false 352}; 353 354/* *INDENT-ON* */ // clang-format on 355 356#endif // SDL_AUDIO_DRIVER_EMSCRIPTEN 357
[FILE END]
(C) 2025 0x4248 (C) 2025 4248 Media and 4248 Systems, All part of 0x4248 See LICENCE files for more information. Not all files are by 0x4248 always check Licencing.