blob: c81a1e7daee2d2672ab2034af8dca8bced357890 (
plain)
use core::sync::atomic::{AtomicBool, Ordering};
pub struct Mutex(AtomicBool);
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
impl Mutex {
#[inline]
pub const fn new() -> Mutex {
Mutex(AtomicBool::new(false))
}
/// Locks the mutex
///
/// # Safety
///
/// UB occurs if the mutex is already locked by the current thread and the
/// `unsafe_lock` feature is enabled.
#[inline]
pub unsafe fn lock(&self) {
// Can fail to lock even if the spinlock is not locked. May be more
// efficient than `try_lock` hen called in a loop.
loop {
if self.try_lock_weak() {
break;
}
while self.is_locked() {
core::hint::spin_loop();
}
}
}
/// If the mutex is unlocked, it is locked, and this function returns
/// `true'. Otherwise, `false` is returned.
#[inline]
pub unsafe fn try_lock(&self) -> bool {
// The reason for using a strong compare_exchange is explained here:
// https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
self.lock
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
}
/// Unlocks the mutex
///
/// # Safety
///
/// UB occurs if the mutex is already unlocked or if it has been locked on
/// a different thread.
#[inline]
pub unsafe fn unlock(&self) {
self.locked.set(false);
}
#[inline]
pub unsafe fn is_locked(&self) -> bool {
self.lock.load(Ordering::Relaxed)
}
#[inline]
fn try_lock_weak(&self) -> bool {
self.lock
.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
}
}
|