@@ -395,83 +395,105 @@ static void uv__rwlock_srwlock_wrunlock(uv_rwlock_t* rwlock) {
395
395
396
396
397
397
static int uv__rwlock_fallback_init (uv_rwlock_t * rwlock ) {
398
- int err ;
399
-
400
- err = uv_mutex_init ( & rwlock -> fallback_ . read_mutex_ );
401
- if ( err )
402
- return err ;
398
+ /* Initialize the semaphore that acts as the write lock. */
399
+ HANDLE handle = CreateSemaphoreW ( NULL , 1 , 1 , NULL );
400
+ if ( handle == NULL )
401
+ return uv_translate_sys_error ( GetLastError ());
402
+ rwlock -> fallback_ . write_lock_ . sem = handle ;
403
403
404
- err = uv_mutex_init (& rwlock -> fallback_ .write_mutex_ );
405
- if (err ) {
406
- uv_mutex_destroy (& rwlock -> fallback_ .read_mutex_ );
407
- return err ;
408
- }
404
+ /* Initialize the critical section protecting the reader count. */
405
+ InitializeCriticalSection (& rwlock -> fallback_ .read_lock_ .cs );
409
406
407
+ /* Initialize the reader count. */
410
408
rwlock -> fallback_ .num_readers_ = 0 ;
411
409
412
410
return 0 ;
413
411
}
414
412
415
413
416
414
static void uv__rwlock_fallback_destroy (uv_rwlock_t * rwlock ) {
417
- uv_mutex_destroy (& rwlock -> fallback_ .read_mutex_ );
418
- uv_mutex_destroy ( & rwlock -> fallback_ .write_mutex_ );
415
+ DeleteCriticalSection (& rwlock -> fallback_ .read_lock_ . cs );
416
+ CloseHandle ( rwlock -> fallback_ .write_lock_ . sem );
419
417
}
420
418
421
419
422
420
static void uv__rwlock_fallback_rdlock (uv_rwlock_t * rwlock ) {
423
- uv_mutex_lock (& rwlock -> fallback_ .read_mutex_ );
424
-
425
- if (++ rwlock -> fallback_ .num_readers_ == 1 )
426
- uv_mutex_lock (& rwlock -> fallback_ .write_mutex_ );
421
+ /* Acquire the lock that protects the reader count. */
422
+ EnterCriticalSection (& rwlock -> fallback_ .read_lock_ .cs );
423
+
424
+ /* Increase the reader count, and lock for write if this is the first
425
+ * reader.
426
+ */
427
+ if (++ rwlock -> fallback_ .num_readers_ == 1 ) {
428
+ DWORD r = WaitForSingleObject (rwlock -> fallback_ .write_lock_ .sem , INFINITE );
429
+ if (r != WAIT_OBJECT_0 )
430
+ uv_fatal_error (GetLastError (), "WaitForSingleObject" );
431
+ }
427
432
428
- uv_mutex_unlock (& rwlock -> fallback_ .read_mutex_ );
433
+ /* Release the lock that protects the reader count. */
434
+ LeaveCriticalSection (& rwlock -> fallback_ .read_lock_ .cs );
429
435
}
430
436
431
437
432
438
static int uv__rwlock_fallback_tryrdlock (uv_rwlock_t * rwlock ) {
433
439
int err ;
434
440
435
- err = uv_mutex_trylock (& rwlock -> fallback_ .read_mutex_ );
436
- if (err )
437
- goto out ;
441
+ if (!TryEnterCriticalSection (& rwlock -> fallback_ .read_lock_ .cs ))
442
+ return UV_EAGAIN ;
438
443
439
444
err = 0 ;
440
- if (rwlock -> fallback_ .num_readers_ == 0 )
441
- err = uv_mutex_trylock (& rwlock -> fallback_ .write_mutex_ );
442
-
443
- if (err == 0 )
444
- rwlock -> fallback_ .num_readers_ ++ ;
445
-
446
- uv_mutex_unlock (& rwlock -> fallback_ .read_mutex_ );
445
+ if (rwlock -> fallback_ .num_readers_ == 0 ) {
446
+ DWORD r = WaitForSingleObject (rwlock -> fallback_ .write_lock_ .sem , 0 );
447
+ if (r == WAIT_OBJECT_0 )
448
+ rwlock -> fallback_ .num_readers_ ++ ;
449
+ else if (r == WAIT_TIMEOUT )
450
+ err = UV_EAGAIN ;
451
+ else if (r == WAIT_FAILED )
452
+ err = uv_translate_sys_error (GetLastError ());
453
+ else
454
+ err = UV_EIO ;
455
+ }
447
456
448
- out :
457
+ LeaveCriticalSection ( & rwlock -> fallback_ . read_lock_ . cs );
449
458
return err ;
450
459
}
451
460
452
461
453
462
static void uv__rwlock_fallback_rdunlock (uv_rwlock_t * rwlock ) {
454
- uv_mutex_lock (& rwlock -> fallback_ .read_mutex_ );
463
+ EnterCriticalSection (& rwlock -> fallback_ .read_lock_ . cs );
455
464
456
- if (-- rwlock -> fallback_ .num_readers_ == 0 )
457
- uv_mutex_unlock (& rwlock -> fallback_ .write_mutex_ );
465
+ if (-- rwlock -> fallback_ .num_readers_ == 0 ) {
466
+ if (!ReleaseSemaphore (rwlock -> fallback_ .write_lock_ .sem , 1 , NULL ))
467
+ uv_fatal_error (GetLastError (), "ReleaseSemaphore" );
468
+ }
458
469
459
- uv_mutex_unlock (& rwlock -> fallback_ .read_mutex_ );
470
+ LeaveCriticalSection (& rwlock -> fallback_ .read_lock_ . cs );
460
471
}
461
472
462
473
463
474
static void uv__rwlock_fallback_wrlock (uv_rwlock_t * rwlock ) {
464
- uv_mutex_lock (& rwlock -> fallback_ .write_mutex_ );
475
+ DWORD r = WaitForSingleObject (rwlock -> fallback_ .write_lock_ .sem , INFINITE );
476
+ if (r != WAIT_OBJECT_0 )
477
+ uv_fatal_error (GetLastError (), "WaitForSingleObject" );
465
478
}
466
479
467
480
468
481
static int uv__rwlock_fallback_trywrlock (uv_rwlock_t * rwlock ) {
469
- return uv_mutex_trylock (& rwlock -> fallback_ .write_mutex_ );
482
+ DWORD r = WaitForSingleObject (rwlock -> fallback_ .write_lock_ .sem , 0 );
483
+ if (r == WAIT_OBJECT_0 )
484
+ return 0 ;
485
+ else if (r == WAIT_TIMEOUT )
486
+ return UV_EAGAIN ;
487
+ else if (r == WAIT_FAILED )
488
+ return uv_translate_sys_error (GetLastError ());
489
+ else
490
+ return UV_EIO ;
470
491
}
471
492
472
493
473
494
static void uv__rwlock_fallback_wrunlock (uv_rwlock_t * rwlock ) {
474
- uv_mutex_unlock (& rwlock -> fallback_ .write_mutex_ );
495
+ if (!ReleaseSemaphore (rwlock -> fallback_ .write_lock_ .sem , 1 , NULL ))
496
+ uv_fatal_error (GetLastError (), "ReleaseSemaphore" );
475
497
}
476
498
477
499
0 commit comments