summaryrefslogtreecommitdiff
path: root/src/spin.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/spin.rs')
-rwxr-xr-xsrc/spin.rs68
1 files changed, 68 insertions, 0 deletions
diff --git a/src/spin.rs b/src/spin.rs
new file mode 100755
index 0000000..c81a1e7
--- /dev/null
+++ b/src/spin.rs
@@ -0,0 +1,68 @@
+use core::sync::atomic::{AtomicBool, Ordering};
+
+pub struct Mutex(AtomicBool);
+
+unsafe impl Send for Mutex {}
+unsafe impl Sync for Mutex {}
+
+impl Mutex {
+ #[inline]
+ pub const fn new() -> Mutex {
+ Mutex(AtomicBool::new(false))
+ }
+
+ /// Locks the mutex
+ ///
+ /// # Safety
+ ///
+ /// UB occurs if the mutex is already locked by the current thread and the
+ /// `unsafe_lock` feature is enabled.
+ #[inline]
+ pub unsafe fn lock(&self) {
+ // Can fail to lock even if the spinlock is not locked. May be more
+ // efficient than `try_lock` hen called in a loop.
+ loop {
+ if self.try_lock_weak() {
+ break;
+ }
+
+ while self.is_locked() {
+ core::hint::spin_loop();
+ }
+ }
+ }
+
+ /// If the mutex is unlocked, it is locked, and this function returns
+ /// `true'. Otherwise, `false` is returned.
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ // The reason for using a strong compare_exchange is explained here:
+ // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
+ self.lock
+ .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ }
+
+ /// Unlocks the mutex
+ ///
+ /// # Safety
+ ///
+ /// UB occurs if the mutex is already unlocked or if it has been locked on
+ /// a different thread.
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ self.locked.set(false);
+ }
+
+ #[inline]
+ pub unsafe fn is_locked(&self) -> bool {
+ self.lock.load(Ordering::Relaxed)
+ }
+
+ #[inline]
+ fn try_lock_weak(&self) -> bool {
+ self.lock
+ .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ }
+}