Atlas - SDL_asyncio_liburing.c
Home / ext / SDL / src / io / io_uring Lines: 1 | Size: 22085 bytes [Download] [Show on GitHub] [Search similar files] [Raw] [Raw (proxy)][FILE BEGIN]1/* 2 Simple DirectMedia Layer 3 Copyright (C) 1997-2025 Sam Lantinga <[email protected]> 4 5 This software is provided 'as-is', without any express or implied 6 warranty. In no event will the authors be held liable for any damages 7 arising from the use of this software. 8 9 Permission is granted to anyone to use this software for any purpose, 10 including commercial applications, and to alter it and redistribute it 11 freely, subject to the following restrictions: 12 13 1. The origin of this software must not be misrepresented; you must not 14 claim that you wrote the original software. If you use this software 15 in a product, an acknowledgment in the product documentation would be 16 appreciated but is not required. 17 2. Altered source versions must be plainly marked as such, and must not be 18 misrepresented as being the original software. 19 3. This notice may not be removed or altered from any source distribution. 20*/ 21 22// The Linux backend uses io_uring for asynchronous i/o, and falls back to 23// the "generic" threadpool implementation if liburing isn't available or 24// fails for some other reason. 25 26#include "SDL_internal.h" 27 28#ifdef HAVE_LIBURING_H 29 30#include "../SDL_sysasyncio.h" 31 32#include <liburing.h> 33#include <errno.h> 34#include <fcntl.h> 35#include <string.h> // for strerror() 36 37static SDL_InitState liburing_init; 38 39// We could add a whole bootstrap thing like the audio/video/etc subsystems use, but let's keep this simple for now. 40static bool (*CreateAsyncIOQueue)(SDL_AsyncIOQueue *queue); 41static void (*QuitAsyncIO)(void); 42static bool (*AsyncIOFromFile)(const char *file, const char *mode, SDL_AsyncIO *asyncio); 43 44// we never link directly to liburing. 45// (this says "-ffi" which sounds like a scripting language binding thing, but the non-ffi version 46// is static-inline code we can't lookup with dlsym. This is by design.) 47#define SDL_DRIVER_LIBURING_DYNAMIC "liburing-ffi.so.2" 48static const char *liburing_library = SDL_DRIVER_LIBURING_DYNAMIC; 49static void *liburing_handle = NULL; 50 51SDL_ELF_NOTE_DLOPEN( 52 "io-io_uring", 53 "Support for async IO through liburing", 54 SDL_ELF_NOTE_DLOPEN_PRIORITY_SUGGESTED, 55 SDL_DRIVER_LIBURING_DYNAMIC 56) 57 58#define SDL_LIBURING_FUNCS \ 59 SDL_LIBURING_FUNC(int, io_uring_queue_init, (unsigned entries, struct io_uring *ring, unsigned flags)) \ 60 SDL_LIBURING_FUNC(struct io_uring_probe *,io_uring_get_probe,(void)) \ 61 SDL_LIBURING_FUNC(void, io_uring_free_probe, (struct io_uring_probe *probe)) \ 62 SDL_LIBURING_FUNC(int, io_uring_opcode_supported, (const struct io_uring_probe *p, int op)) \ 63 SDL_LIBURING_FUNC(struct io_uring_sqe *, io_uring_get_sqe, (struct io_uring *ring)) \ 64 SDL_LIBURING_FUNC(void, io_uring_prep_read,(struct io_uring_sqe *sqe, int fd, void *buf, unsigned nbytes, __u64 offset)) \ 65 SDL_LIBURING_FUNC(void, io_uring_prep_write,(struct io_uring_sqe *sqe, int fd, const void *buf, unsigned nbytes, __u64 offset)) \ 66 SDL_LIBURING_FUNC(void, io_uring_prep_close, (struct io_uring_sqe *sqe, int fd)) \ 67 SDL_LIBURING_FUNC(void, io_uring_prep_fsync, (struct io_uring_sqe *sqe, int fd, unsigned fsync_flags)) \ 68 SDL_LIBURING_FUNC(void, io_uring_prep_cancel, (struct io_uring_sqe *sqe, void *user_data, int flags)) \ 69 SDL_LIBURING_FUNC(void, io_uring_prep_timeout, (struct io_uring_sqe *sqe, struct __kernel_timespec *ts, unsigned count, unsigned flags)) \ 70 SDL_LIBURING_FUNC(void, io_uring_prep_nop, (struct io_uring_sqe *sqe)) \ 71 SDL_LIBURING_FUNC(void, io_uring_sqe_set_data, (struct io_uring_sqe *sqe, void *data)) \ 72 SDL_LIBURING_FUNC(void, io_uring_sqe_set_flags, (struct io_uring_sqe *sqe, unsigned flags)) \ 73 SDL_LIBURING_FUNC(int, io_uring_submit, (struct io_uring *ring)) \ 74 SDL_LIBURING_FUNC(int, io_uring_peek_cqe, (struct io_uring *ring, struct io_uring_cqe **cqe_ptr)) \ 75 SDL_LIBURING_FUNC(int, io_uring_wait_cqe, (struct io_uring *ring, struct io_uring_cqe **cqe_ptr)) \ 76 SDL_LIBURING_FUNC(int, io_uring_wait_cqe_timeout, (struct io_uring *ring, struct io_uring_cqe **cqe_ptr, struct __kernel_timespec *ts)) \ 77 SDL_LIBURING_FUNC(void, io_uring_cqe_seen, (struct io_uring *ring, struct io_uring_cqe *cqe)) \ 78 SDL_LIBURING_FUNC(void, io_uring_queue_exit, (struct io_uring *ring)) \ 79 80 81#define SDL_LIBURING_FUNC(ret, fn, args) typedef ret (*SDL_fntype_##fn) args; 82SDL_LIBURING_FUNCS 83#undef SDL_LIBURING_FUNC 84 85typedef struct SDL_LibUringFunctions 86{ 87 #define SDL_LIBURING_FUNC(ret, fn, args) SDL_fntype_##fn fn; 88 SDL_LIBURING_FUNCS 89 #undef SDL_LIBURING_FUNC 90} SDL_LibUringFunctions; 91 92static SDL_LibUringFunctions liburing; 93 94 95typedef struct LibUringAsyncIOQueueData 96{ 97 SDL_Mutex *sqe_lock; 98 SDL_Mutex *cqe_lock; 99 struct io_uring ring; 100 SDL_AtomicInt num_waiting; 101} LibUringAsyncIOQueueData; 102 103 104static void UnloadLibUringLibrary(void) 105{ 106 if (liburing_library) { 107 SDL_UnloadObject(liburing_handle); 108 liburing_library = NULL; 109 } 110 SDL_zero(liburing); 111} 112 113static bool LoadLibUringSyms(void) 114{ 115 #define SDL_LIBURING_FUNC(ret, fn, args) { \ 116 liburing.fn = (SDL_fntype_##fn) SDL_LoadFunction(liburing_handle, #fn); \ 117 if (!liburing.fn) { \ 118 return false; \ 119 } \ 120 } 121 SDL_LIBURING_FUNCS 122 #undef SDL_LIBURING_FUNC 123 return true; 124} 125 126// we rely on the presence of liburing to handle io_uring for us. The alternative is making 127// direct syscalls into the kernel, which is undesirable. liburing both shields us from this, 128// but also smooths over some kernel version differences, etc. 129static bool LoadLibUring(void) 130{ 131 bool result = true; 132 133 if (!liburing_handle) { 134 liburing_handle = SDL_LoadObject(liburing_library); 135 if (!liburing_handle) { 136 result = false; 137 // Don't call SDL_SetError(): SDL_LoadObject already did. 138 } else { 139 result = LoadLibUringSyms(); 140 if (result) { 141 static const int needed_ops[] = { 142 IORING_OP_NOP, 143 IORING_OP_FSYNC, 144 IORING_OP_TIMEOUT, 145 IORING_OP_CLOSE, 146 IORING_OP_READ, 147 IORING_OP_WRITE, 148 IORING_OP_ASYNC_CANCEL 149 }; 150 151 struct io_uring_probe *probe = liburing.io_uring_get_probe(); 152 if (!probe) { 153 result = false; 154 } else { 155 for (int i = 0; i < SDL_arraysize(needed_ops); i++) { 156 if (!io_uring_opcode_supported(probe, needed_ops[i])) { 157 result = false; 158 break; 159 } 160 } 161 liburing.io_uring_free_probe(probe); 162 } 163 } 164 165 if (!result) { 166 UnloadLibUringLibrary(); 167 } 168 } 169 } 170 return result; 171} 172 173static bool liburing_SetError(const char *what, int err) 174{ 175 SDL_assert(err <= 0); 176 return SDL_SetError("%s failed: %s", what, strerror(-err)); 177} 178 179static Sint64 liburing_asyncio_size(void *userdata) 180{ 181 const int fd = (int) (intptr_t) userdata; 182 struct stat statbuf; 183 if (fstat(fd, &statbuf) < 0) { 184 SDL_SetError("fstat failed: %s", strerror(errno)); 185 return -1; 186 } 187 return ((Sint64) statbuf.st_size); 188} 189 190// you must hold sqe_lock when calling this! 191static bool liburing_asyncioqueue_queue_task(void *userdata, SDL_AsyncIOTask *task) 192{ 193 LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) userdata; 194 const int rc = liburing.io_uring_submit(&queuedata->ring); 195 return (rc < 0) ? liburing_SetError("io_uring_submit", rc) : true; 196} 197 198static void liburing_asyncioqueue_cancel_task(void *userdata, SDL_AsyncIOTask *task) 199{ 200 SDL_AsyncIOTask *cancel_task = (SDL_AsyncIOTask *) SDL_calloc(1, sizeof (*cancel_task)); 201 if (!cancel_task) { 202 return; // oh well, the task can just finish on its own. 203 } 204 205 LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) userdata; 206 207 // have to hold a lock because otherwise two threads could get_sqe and submit while one request isn't fully set up. 208 SDL_LockMutex(queuedata->sqe_lock); 209 struct io_uring_sqe *sqe = liburing.io_uring_get_sqe(&queuedata->ring); 210 if (!sqe) { 211 SDL_UnlockMutex(queuedata->sqe_lock); 212 SDL_free(cancel_task); // oh well, the task can just finish on its own. 213 return; 214 } 215 216 cancel_task->app_userdata = task; 217 liburing.io_uring_prep_cancel(sqe, task, 0); 218 liburing.io_uring_sqe_set_data(sqe, cancel_task); 219 liburing_asyncioqueue_queue_task(userdata, task); 220 SDL_UnlockMutex(queuedata->sqe_lock); 221} 222 223static SDL_AsyncIOTask *ProcessCQE(LibUringAsyncIOQueueData *queuedata, struct io_uring_cqe *cqe) 224{ 225 if (!cqe) { 226 return NULL; 227 } 228 229 SDL_AsyncIOTask *task = (SDL_AsyncIOTask *) io_uring_cqe_get_data(cqe); 230 if (task) { // can be NULL if this was just a wakeup message, a NOP, etc. 231 if (!task->queue) { // We leave `queue` blank to signify this was a task cancellation. 232 SDL_AsyncIOTask *cancel_task = task; 233 task = (SDL_AsyncIOTask *) cancel_task->app_userdata; 234 SDL_free(cancel_task); 235 if (cqe->res >= 0) { // cancel was successful? 236 task->result = SDL_ASYNCIO_CANCELED; 237 } else { 238 task = NULL; // it already finished or was too far along to cancel, so we'll pick up the actual results later. 239 } 240 } else if (cqe->res < 0) { 241 task->result = SDL_ASYNCIO_FAILURE; 242 // !!! FIXME: fill in task->error. 243 } else { 244 if ((task->type == SDL_ASYNCIO_TASK_WRITE) && (((Uint64) cqe->res) < task->requested_size)) { 245 task->result = SDL_ASYNCIO_FAILURE; // it's always a failure on short writes. 246 } 247 248 // don't explicitly mark it as COMPLETE; that's the default value and a linked task might have failed in an earlier operation and this would overwrite it. 249 250 if ((task->type == SDL_ASYNCIO_TASK_READ) || (task->type == SDL_ASYNCIO_TASK_WRITE)) { 251 task->result_size = (Uint64) cqe->res; 252 } 253 } 254 255 if (task && (task->type == SDL_ASYNCIO_TASK_CLOSE) && task->flush) { 256 task->flush = false; 257 task = NULL; // don't return this one, it's a linked task, so it'll arrive in a later CQE. 258 } 259 } 260 261 return task; 262} 263 264static SDL_AsyncIOTask *liburing_asyncioqueue_get_results(void *userdata) 265{ 266 LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) userdata; 267 268 // have to hold a lock because otherwise two threads will get the same cqe until we mark it "seen". Copy and mark it right away, then process further. 269 SDL_LockMutex(queuedata->cqe_lock); 270 struct io_uring_cqe *cqe = NULL; 271 const int rc = liburing.io_uring_peek_cqe(&queuedata->ring, &cqe); 272 if (rc != 0) { 273 SDL_assert(rc == -EAGAIN); // should only fail because nothing is available at the moment. 274 SDL_UnlockMutex(queuedata->cqe_lock); 275 return NULL; 276 } 277 278 struct io_uring_cqe cqe_copy; 279 SDL_copyp(&cqe_copy, cqe); // this is only a few bytes. 280 liburing.io_uring_cqe_seen(&queuedata->ring, cqe); // let io_uring use this slot again. 281 SDL_UnlockMutex(queuedata->cqe_lock); 282 283 return ProcessCQE(queuedata, &cqe_copy); 284} 285 286static SDL_AsyncIOTask *liburing_asyncioqueue_wait_results(void *userdata, Sint32 timeoutMS) 287{ 288 LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) userdata; 289 struct io_uring_cqe *cqe = NULL; 290 291 SDL_AddAtomicInt(&queuedata->num_waiting, 1); 292 if (timeoutMS < 0) { 293 liburing.io_uring_wait_cqe(&queuedata->ring, &cqe); 294 } else { 295 struct __kernel_timespec ts = { (Sint64) timeoutMS / SDL_MS_PER_SECOND, (Sint64) SDL_MS_TO_NS(timeoutMS % SDL_MS_PER_SECOND) }; 296 liburing.io_uring_wait_cqe_timeout(&queuedata->ring, &cqe, &ts); 297 } 298 SDL_AddAtomicInt(&queuedata->num_waiting, -1); 299 300 // (we don't care if the wait failed for any reason, as the upcoming peek_cqe will report valid information. We just wanted the wait operation to block.) 301 302 // each thing that peeks or waits for a completion _gets the same cqe_ until we mark it as seen. So when we wake up from the wait, lock the mutex and 303 // then use peek to make sure we have a unique cqe, and other competing threads either get their own or nothing. 304 return liburing_asyncioqueue_get_results(userdata); // this just happens to do all those things. 305} 306 307static void liburing_asyncioqueue_signal(void *userdata) 308{ 309 LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) userdata; 310 const int num_waiting = SDL_GetAtomicInt(&queuedata->num_waiting); 311 312 SDL_LockMutex(queuedata->sqe_lock); 313 for (int i = 0; i < num_waiting; i++) { // !!! FIXME: is there a better way to do this than pushing a zero-timeout request for everything waiting? 314 struct io_uring_sqe *sqe = liburing.io_uring_get_sqe(&queuedata->ring); 315 if (sqe) { 316 static struct __kernel_timespec ts; // no wait, just wake a thread as fast as this can land in the completion queue. 317 liburing.io_uring_prep_timeout(sqe, &ts, 0, 0); 318 liburing.io_uring_sqe_set_data(sqe, NULL); 319 } 320 } 321 liburing.io_uring_submit(&queuedata->ring); 322 323 SDL_UnlockMutex(queuedata->sqe_lock); 324} 325 326static void liburing_asyncioqueue_destroy(void *userdata) 327{ 328 LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) userdata; 329 liburing.io_uring_queue_exit(&queuedata->ring); 330 SDL_DestroyMutex(queuedata->sqe_lock); 331 SDL_DestroyMutex(queuedata->cqe_lock); 332 SDL_free(queuedata); 333} 334 335static bool SDL_SYS_CreateAsyncIOQueue_liburing(SDL_AsyncIOQueue *queue) 336{ 337 LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) SDL_calloc(1, sizeof (*queuedata)); 338 if (!queuedata) { 339 return false; 340 } 341 342 SDL_SetAtomicInt(&queuedata->num_waiting, 0); 343 344 queuedata->sqe_lock = SDL_CreateMutex(); 345 if (!queuedata->sqe_lock) { 346 SDL_free(queuedata); 347 return false; 348 } 349 350 queuedata->cqe_lock = SDL_CreateMutex(); 351 if (!queuedata->cqe_lock) { 352 SDL_DestroyMutex(queuedata->sqe_lock); 353 SDL_free(queuedata); 354 return false; 355 } 356 357 // !!! FIXME: no idea how large the queue should be. Is 128 overkill or too small? 358 const int rc = liburing.io_uring_queue_init(128, &queuedata->ring, 0); 359 if (rc != 0) { 360 SDL_DestroyMutex(queuedata->sqe_lock); 361 SDL_DestroyMutex(queuedata->cqe_lock); 362 SDL_free(queuedata); 363 return liburing_SetError("io_uring_queue_init", rc); 364 } 365 366 static const SDL_AsyncIOQueueInterface SDL_AsyncIOQueue_liburing = { 367 liburing_asyncioqueue_queue_task, 368 liburing_asyncioqueue_cancel_task, 369 liburing_asyncioqueue_get_results, 370 liburing_asyncioqueue_wait_results, 371 liburing_asyncioqueue_signal, 372 liburing_asyncioqueue_destroy 373 }; 374 375 SDL_copyp(&queue->iface, &SDL_AsyncIOQueue_liburing); 376 queue->userdata = queuedata; 377 return true; 378} 379 380 381static bool liburing_asyncio_read(void *userdata, SDL_AsyncIOTask *task) 382{ 383 LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) task->queue->userdata; 384 const int fd = (int) (intptr_t) userdata; 385 386 // !!! FIXME: `unsigned` is likely smaller than requested_size's Uint64. If we overflow it, we could try submitting multiple SQEs 387 // !!! FIXME: and make a note in the task that there are several in sequence. 388 if (task->requested_size > ((Uint64) ~((unsigned) 0))) { 389 return SDL_SetError("io_uring: i/o task is too large"); 390 } 391 392 // have to hold a lock because otherwise two threads could get_sqe and submit while one request isn't fully set up. 393 SDL_LockMutex(queuedata->sqe_lock); 394 bool retval; 395 struct io_uring_sqe *sqe = liburing.io_uring_get_sqe(&queuedata->ring); 396 if (!sqe) { 397 retval = SDL_SetError("io_uring: submission queue is full"); 398 } else { 399 liburing.io_uring_prep_read(sqe, fd, task->buffer, (unsigned) task->requested_size, task->offset); 400 liburing.io_uring_sqe_set_data(sqe, task); 401 retval = task->queue->iface.queue_task(task->queue->userdata, task); 402 } 403 SDL_UnlockMutex(queuedata->sqe_lock); 404 return retval; 405} 406 407static bool liburing_asyncio_write(void *userdata, SDL_AsyncIOTask *task) 408{ 409 LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) task->queue->userdata; 410 const int fd = (int) (intptr_t) userdata; 411 412 // !!! FIXME: `unsigned` is likely smaller than requested_size's Uint64. If we overflow it, we could try submitting multiple SQEs 413 // !!! FIXME: and make a note in the task that there are several in sequence. 414 if (task->requested_size > ((Uint64) ~((unsigned) 0))) { 415 return SDL_SetError("io_uring: i/o task is too large"); 416 } 417 418 // have to hold a lock because otherwise two threads could get_sqe and submit while one request isn't fully set up. 419 SDL_LockMutex(queuedata->sqe_lock); 420 bool retval; 421 struct io_uring_sqe *sqe = liburing.io_uring_get_sqe(&queuedata->ring); 422 if (!sqe) { 423 retval = SDL_SetError("io_uring: submission queue is full"); 424 } else { 425 liburing.io_uring_prep_write(sqe, fd, task->buffer, (unsigned) task->requested_size, task->offset); 426 liburing.io_uring_sqe_set_data(sqe, task); 427 retval = task->queue->iface.queue_task(task->queue->userdata, task); 428 } 429 SDL_UnlockMutex(queuedata->sqe_lock); 430 return retval; 431} 432 433static bool liburing_asyncio_close(void *userdata, SDL_AsyncIOTask *task) 434{ 435 LibUringAsyncIOQueueData *queuedata = (LibUringAsyncIOQueueData *) task->queue->userdata; 436 const int fd = (int) (intptr_t) userdata; 437 438 // have to hold a lock because otherwise two threads could get_sqe and submit while one request isn't fully set up. 439 SDL_LockMutex(queuedata->sqe_lock); 440 bool retval; 441 struct io_uring_sqe *sqe = liburing.io_uring_get_sqe(&queuedata->ring); 442 if (!sqe) { 443 retval = SDL_SetError("io_uring: submission queue is full"); 444 } else { 445 if (task->flush) { 446 struct io_uring_sqe *flush_sqe = sqe; 447 sqe = liburing.io_uring_get_sqe(&queuedata->ring); // this will be our actual close task. 448 if (!sqe) { 449 liburing.io_uring_prep_nop(flush_sqe); // we already have the first sqe, just make it a NOP. 450 liburing.io_uring_sqe_set_data(flush_sqe, NULL); 451 task->queue->iface.queue_task(task->queue->userdata, task); 452 SDL_UnlockMutex(queuedata->sqe_lock); 453 return SDL_SetError("io_uring: submission queue is full"); 454 } 455 liburing.io_uring_prep_fsync(flush_sqe, fd, IORING_FSYNC_DATASYNC); 456 liburing.io_uring_sqe_set_data(flush_sqe, task); 457 liburing.io_uring_sqe_set_flags(flush_sqe, IOSQE_IO_HARDLINK); // must complete before next sqe starts, and next sqe should run even if this fails. 458 } 459 460 liburing.io_uring_prep_close(sqe, fd); 461 liburing.io_uring_sqe_set_data(sqe, task); 462 463 retval = task->queue->iface.queue_task(task->queue->userdata, task); 464 } 465 SDL_UnlockMutex(queuedata->sqe_lock); 466 return retval; 467} 468 469static void liburing_asyncio_destroy(void *userdata) 470{ 471 // this is only a Unix file descriptor, should have been closed elsewhere. 472} 473 474static int PosixOpenModeFromString(const char *mode) 475{ 476 // this is exactly the set of strings that SDL_AsyncIOFromFile promises will work. 477 static const struct { const char *str; int flags; } mappings[] = { 478 { "rb", O_RDONLY }, 479 { "wb", O_WRONLY | O_CREAT | O_TRUNC }, 480 { "r+b", O_RDWR }, 481 { "w+b", O_RDWR | O_CREAT | O_TRUNC } 482 }; 483 484 for (int i = 0; i < SDL_arraysize(mappings); i++) { 485 if (SDL_strcmp(mappings[i].str, mode) == 0) { 486 return mappings[i].flags; 487 } 488 } 489 490 SDL_assert(!"Shouldn't have reached this code"); 491 return 0; 492} 493 494static bool SDL_SYS_AsyncIOFromFile_liburing(const char *file, const char *mode, SDL_AsyncIO *asyncio) 495{ 496 const int fd = open(file, PosixOpenModeFromString(mode), 0644); 497 if (fd == -1) { 498 return SDL_SetError("open failed: %s", strerror(errno)); 499 } 500 501 static const SDL_AsyncIOInterface SDL_AsyncIOFile_liburing = { 502 liburing_asyncio_size, 503 liburing_asyncio_read, 504 liburing_asyncio_write, 505 liburing_asyncio_close, 506 liburing_asyncio_destroy 507 }; 508 509 SDL_copyp(&asyncio->iface, &SDL_AsyncIOFile_liburing); 510 asyncio->userdata = (void *) (intptr_t) fd; 511 return true; 512} 513 514static void SDL_SYS_QuitAsyncIO_liburing(void) 515{ 516 UnloadLibUringLibrary(); 517} 518 519static void MaybeInitializeLibUring(void) 520{ 521 if (SDL_ShouldInit(&liburing_init)) { 522 if (LoadLibUring()) { 523 SDL_DebugLogBackend("asyncio", "liburing"); 524 CreateAsyncIOQueue = SDL_SYS_CreateAsyncIOQueue_liburing; 525 QuitAsyncIO = SDL_SYS_QuitAsyncIO_liburing; 526 AsyncIOFromFile = SDL_SYS_AsyncIOFromFile_liburing; 527 } else { // can't use liburing? Use the "generic" threadpool implementation instead. 528 SDL_DebugLogBackend("asyncio", "generic"); 529 CreateAsyncIOQueue = SDL_SYS_CreateAsyncIOQueue_Generic; 530 QuitAsyncIO = SDL_SYS_QuitAsyncIO_Generic; 531 AsyncIOFromFile = SDL_SYS_AsyncIOFromFile_Generic; 532 } 533 SDL_SetInitialized(&liburing_init, true); 534 } 535} 536 537bool SDL_SYS_CreateAsyncIOQueue(SDL_AsyncIOQueue *queue) 538{ 539 MaybeInitializeLibUring(); 540 return CreateAsyncIOQueue(queue); 541} 542 543bool SDL_SYS_AsyncIOFromFile(const char *file, const char *mode, SDL_AsyncIO *asyncio) 544{ 545 MaybeInitializeLibUring(); 546 return AsyncIOFromFile(file, mode, asyncio); 547} 548 549void SDL_SYS_QuitAsyncIO(void) 550{ 551 if (SDL_ShouldQuit(&liburing_init)) { 552 QuitAsyncIO(); 553 CreateAsyncIOQueue = NULL; 554 QuitAsyncIO = NULL; 555 AsyncIOFromFile = NULL; 556 SDL_SetInitialized(&liburing_init, false); 557 } 558} 559 560#endif // defined HAVE_LIBURING_H 561 562[FILE END](C) 2025 0x4248 (C) 2025 4248 Media and 4248 Systems, All part of 0x4248 See LICENCE files for more information. Not all files are by 0x4248 always check Licencing.