summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMica White <botahamec@gmail.com>2024-12-23 15:31:07 -0500
committerMica White <botahamec@gmail.com>2024-12-23 15:33:02 -0500
commit30d0f08b6073e9c2e545a3567838a9e1e885fea2 (patch)
tree5763e820ed5877b39ef9745e2a9380e665ced099 /src
parentfa4fe582f212bec3e4d9fe288aaac8c471b8e987 (diff)
Remove scopeguard
The scopeguard crate was being used for its `defer_on_unwind` macro. The problem was that it runs even if the runtime was already panicking. There aren't any changes to the macro which could have fixed this. I instead wrote my own function to check for a specific panicking closure.
Diffstat (limited to 'src')
-rw-r--r--src/collection/boxed.rs25
-rw-r--r--src/collection/owned.rs23
-rw-r--r--src/collection/ref.rs23
-rw-r--r--src/collection/retry.rs193
-rw-r--r--src/collection/utils.rs127
-rw-r--r--src/handle_unwind.rs9
-rw-r--r--src/lib.rs1
-rw-r--r--src/mutex/mutex.rs50
-rw-r--r--src/poisonable/error.rs12
-rw-r--r--src/poisonable/poisonable.rs2
-rw-r--r--src/rwlock/rwlock.rs101
11 files changed, 234 insertions, 332 deletions
diff --git a/src/collection/boxed.rs b/src/collection/boxed.rs
index 3cfc336..0014cc3 100644
--- a/src/collection/boxed.rs
+++ b/src/collection/boxed.rs
@@ -1,5 +1,5 @@
use std::alloc::Layout;
-use std::cell::{RefCell, UnsafeCell};
+use std::cell::UnsafeCell;
use std::fmt::Debug;
use std::marker::PhantomData;
@@ -28,16 +28,7 @@ unsafe impl<L: Lockable> RawLock for BoxedLockCollection<L> {
}
unsafe fn raw_lock(&self) {
- let locks = self.locks();
- let locked = RefCell::new(Vec::with_capacity(locks.len()));
- scopeguard::defer_on_unwind! {
- utils::attempt_to_recover_locks_from_panic(&locked)
- };
-
- for lock in self.locks() {
- lock.raw_lock();
- locked.borrow_mut().push(*lock);
- }
+ utils::ordered_lock(self.locks())
}
unsafe fn raw_try_lock(&self) -> bool {
@@ -51,16 +42,7 @@ unsafe impl<L: Lockable> RawLock for BoxedLockCollection<L> {
}
unsafe fn raw_read(&self) {
- let locks = self.locks();
- let locked = RefCell::new(Vec::with_capacity(locks.len()));
- scopeguard::defer_on_unwind! {
- utils::attempt_to_recover_reads_from_panic(&locked)
- };
-
- for lock in self.locks() {
- lock.raw_read();
- locked.borrow_mut().push(*lock);
- }
+ utils::ordered_read(self.locks());
}
unsafe fn raw_try_read(&self) -> bool {
@@ -141,6 +123,7 @@ unsafe impl<L: Send> Send for BoxedLockCollection<L> {}
unsafe impl<L: Sync> Sync for BoxedLockCollection<L> {}
impl<L> Drop for BoxedLockCollection<L> {
+ #[mutants::skip]
fn drop(&mut self) {
self.locks.clear();
diff --git a/src/collection/owned.rs b/src/collection/owned.rs
index 3ea93b6..69680f4 100644
--- a/src/collection/owned.rs
+++ b/src/collection/owned.rs
@@ -1,4 +1,3 @@
-use std::cell::RefCell;
use std::marker::PhantomData;
use crate::lockable::{Lockable, LockableIntoInner, OwnedLockable, RawLock, Sharable};
@@ -21,16 +20,7 @@ unsafe impl<L: Lockable> RawLock for OwnedLockCollection<L> {
}
unsafe fn raw_lock(&self) {
- let locks = get_locks(&self.data);
- let locked = RefCell::new(Vec::with_capacity(locks.len()));
- scopeguard::defer_on_unwind! {
- utils::attempt_to_recover_locks_from_panic(&locked)
- };
-
- for lock in locks {
- lock.raw_lock();
- locked.borrow_mut().push(lock);
- }
+ utils::ordered_lock(&get_locks(&self.data))
}
unsafe fn raw_try_lock(&self) -> bool {
@@ -46,16 +36,7 @@ unsafe impl<L: Lockable> RawLock for OwnedLockCollection<L> {
}
unsafe fn raw_read(&self) {
- let locks = get_locks(&self.data);
- let locked = RefCell::new(Vec::with_capacity(locks.len()));
- scopeguard::defer_on_unwind! {
- utils::attempt_to_recover_reads_from_panic(&locked)
- };
-
- for lock in locks {
- lock.raw_read();
- locked.borrow_mut().push(lock);
- }
+ utils::ordered_read(&get_locks(&self.data))
}
unsafe fn raw_try_read(&self) -> bool {
diff --git a/src/collection/ref.rs b/src/collection/ref.rs
index 31ef173..b0b142e 100644
--- a/src/collection/ref.rs
+++ b/src/collection/ref.rs
@@ -1,4 +1,3 @@
-use std::cell::RefCell;
use std::fmt::Debug;
use std::marker::PhantomData;
@@ -53,16 +52,7 @@ unsafe impl<L: Lockable> RawLock for RefLockCollection<'_, L> {
}
unsafe fn raw_lock(&self) {
- let locks = &self.locks;
- let locked = RefCell::new(Vec::with_capacity(locks.len()));
- scopeguard::defer_on_unwind! {
- utils::attempt_to_recover_locks_from_panic(&locked)
- };
-
- for lock in &self.locks {
- lock.raw_lock();
- locked.borrow_mut().push(*lock);
- }
+ utils::ordered_lock(&self.locks)
}
unsafe fn raw_try_lock(&self) -> bool {
@@ -76,16 +66,7 @@ unsafe impl<L: Lockable> RawLock for RefLockCollection<'_, L> {
}
unsafe fn raw_read(&self) {
- let locks = &self.locks;
- let locked = RefCell::new(Vec::with_capacity(locks.len()));
- scopeguard::defer_on_unwind! {
- utils::attempt_to_recover_reads_from_panic(&locked)
- };
-
- for lock in &self.locks {
- lock.raw_read();
- locked.borrow_mut().push(*lock);
- }
+ utils::ordered_read(&self.locks)
}
unsafe fn raw_try_read(&self) -> bool {
diff --git a/src/collection/retry.rs b/src/collection/retry.rs
index fb2401e..28602f2 100644
--- a/src/collection/retry.rs
+++ b/src/collection/retry.rs
@@ -1,4 +1,5 @@
use crate::collection::utils;
+use crate::handle_unwind::handle_unwind;
use crate::lockable::{
Lockable, LockableAsMut, LockableIntoInner, OwnedLockable, RawLock, Sharable,
};
@@ -50,47 +51,46 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> {
}
let locked = RefCell::new(Vec::with_capacity(locks.len()));
- scopeguard::defer_on_unwind! {
- utils::attempt_to_recover_locks_from_panic(&locked)
- };
-
- unsafe {
- 'outer: loop {
- // safety: we have the thread key
- locks[first_index].raw_lock();
- for (i, lock) in locks.iter().enumerate() {
- if i == first_index {
- continue;
- }
-
- // If the lock has been killed, then this returns false
- // instead of panicking. This sounds like a problem, but if
- // it does return false, then the lock function is called
- // immediately after, causing a panic
+ handle_unwind(
+ || unsafe {
+ 'outer: loop {
// safety: we have the thread key
- if lock.raw_try_lock() {
- locked.borrow_mut().push(*lock)
- } else {
- for lock in locks.iter().take(i) {
- // safety: we already locked all of these
- lock.raw_unlock();
+ locks[first_index].raw_lock();
+ for (i, lock) in locks.iter().enumerate() {
+ if i == first_index {
+ continue;
}
- if first_index >= i {
- // safety: this is already locked and can't be unlocked
- // by the previous loop
- locks[first_index].raw_unlock();
+ // If the lock has been killed, then this returns false
+ // instead of panicking. This sounds like a problem, but if
+ // it does return false, then the lock function is called
+ // immediately after, causing a panic
+ // safety: we have the thread key
+ if lock.raw_try_lock() {
+ locked.borrow_mut().push(*lock)
+ } else {
+ for lock in locks.iter().take(i) {
+ // safety: we already locked all of these
+ lock.raw_unlock();
+ }
+
+ if first_index >= i {
+ // safety: this is already locked and can't be unlocked
+ // by the previous loop
+ locks[first_index].raw_unlock();
+ }
+
+ first_index = i;
+ continue 'outer;
}
-
- first_index = i;
- continue 'outer;
}
- }
- // safety: we locked all the data
- break;
- }
- };
+ // safety: we locked all the data
+ break;
+ }
+ },
+ || utils::attempt_to_recover_locks_from_panic(&locked),
+ )
}
unsafe fn raw_try_lock(&self) -> bool {
@@ -101,26 +101,25 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> {
}
let locked = RefCell::new(Vec::with_capacity(locks.len()));
- scopeguard::defer_on_unwind! {
- utils::attempt_to_recover_locks_from_panic(&locked)
- };
-
- unsafe {
- for (i, lock) in locks.iter().enumerate() {
- // safety: we have the thread key
- if lock.raw_try_lock() {
- locked.borrow_mut().push(*lock);
- } else {
- for lock in locks.iter().take(i) {
- // safety: we already locked all of these
- lock.raw_unlock();
+ handle_unwind(
+ || unsafe {
+ for (i, lock) in locks.iter().enumerate() {
+ // safety: we have the thread key
+ if lock.raw_try_lock() {
+ locked.borrow_mut().push(*lock);
+ } else {
+ for lock in locks.iter().take(i) {
+ // safety: we already locked all of these
+ lock.raw_unlock();
+ }
+ return false;
}
- return false;
}
- }
- }
- true
+ true
+ },
+ || utils::attempt_to_recover_locks_from_panic(&locked),
+ )
}
unsafe fn raw_unlock(&self) {
@@ -140,41 +139,40 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> {
}
let locked = RefCell::new(Vec::with_capacity(locks.len()));
- scopeguard::defer_on_unwind! {
- utils::attempt_to_recover_reads_from_panic(&locked)
- };
-
- 'outer: loop {
- // safety: we have the thread key
- locks[first_index].raw_read();
- for (i, lock) in locks.iter().enumerate() {
- if i == first_index {
- continue;
- }
-
+ handle_unwind(
+ || 'outer: loop {
// safety: we have the thread key
- if lock.raw_try_read() {
- locked.borrow_mut().push(*lock);
- } else {
- for lock in locks.iter().take(i) {
- // safety: we already locked all of these
- lock.raw_unlock_read();
+ locks[first_index].raw_read();
+ for (i, lock) in locks.iter().enumerate() {
+ if i == first_index {
+ continue;
}
- if first_index >= i {
- // safety: this is already locked and can't be unlocked
- // by the previous loop
- locks[first_index].raw_unlock_read();
- }
+ // safety: we have the thread key
+ if lock.raw_try_read() {
+ locked.borrow_mut().push(*lock);
+ } else {
+ for lock in locks.iter().take(i) {
+ // safety: we already locked all of these
+ lock.raw_unlock_read();
+ }
- first_index = i;
- continue 'outer;
+ if first_index >= i {
+ // safety: this is already locked and can't be unlocked
+ // by the previous loop
+ locks[first_index].raw_unlock_read();
+ }
+
+ first_index = i;
+ continue 'outer;
+ }
}
- }
- // safety: we locked all the data
- break;
- }
+ // safety: we locked all the data
+ break;
+ },
+ || utils::attempt_to_recover_reads_from_panic(&locked),
+ )
}
unsafe fn raw_try_read(&self) -> bool {
@@ -185,26 +183,25 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> {
}
let locked = RefCell::new(Vec::with_capacity(locks.len()));
- scopeguard::defer_on_unwind! {
- utils::attempt_to_recover_reads_from_panic(&locked)
- };
-
- unsafe {
- for (i, lock) in locks.iter().enumerate() {
- // safety: we have the thread key
- if lock.raw_try_read() {
- locked.borrow_mut().push(*lock);
- } else {
- for lock in locks.iter().take(i) {
- // safety: we already locked all of these
- lock.raw_unlock_read();
+ handle_unwind(
+ || unsafe {
+ for (i, lock) in locks.iter().enumerate() {
+ // safety: we have the thread key
+ if lock.raw_try_read() {
+ locked.borrow_mut().push(*lock);
+ } else {
+ for lock in locks.iter().take(i) {
+ // safety: we already locked all of these
+ lock.raw_unlock_read();
+ }
+ return false;
}
- return false;
}
- }
- }
- true
+ true
+ },
+ || utils::attempt_to_recover_reads_from_panic(&locked),
+ )
}
unsafe fn raw_unlock_read(&self) {
diff --git a/src/collection/utils.rs b/src/collection/utils.rs
index d845450..f418386 100644
--- a/src/collection/utils.rs
+++ b/src/collection/utils.rs
@@ -1,74 +1,113 @@
use std::cell::RefCell;
+use crate::handle_unwind::handle_unwind;
use crate::lockable::RawLock;
+pub unsafe fn ordered_lock(locks: &[&dyn RawLock]) {
+ let locked = RefCell::new(Vec::with_capacity(locks.len()));
+
+ handle_unwind(
+ || {
+ for lock in locks {
+ lock.raw_lock();
+ locked.borrow_mut().push(*lock);
+ }
+ },
+ || attempt_to_recover_locks_from_panic(&locked),
+ )
+}
+
+pub unsafe fn ordered_read(locks: &[&dyn RawLock]) {
+ let locked = RefCell::new(Vec::with_capacity(locks.len()));
+
+ handle_unwind(
+ || {
+ for lock in locks {
+ lock.raw_read();
+ locked.borrow_mut().push(*lock);
+ }
+ },
+ || attempt_to_recover_reads_from_panic(&locked),
+ )
+}
+
/// Locks the locks in the order they are given. This causes deadlock if the
/// locks contain duplicates, or if this is called by multiple threads with the
/// locks in different orders.
pub unsafe fn ordered_try_lock(locks: &[&dyn RawLock]) -> bool {
let locked = RefCell::new(Vec::with_capacity(locks.len()));
- scopeguard::defer_on_unwind! {
- // safety: everything in locked is locked
- attempt_to_recover_locks_from_panic(&locked)
- };
- unsafe {
- for (i, lock) in locks.iter().enumerate() {
- // safety: we have the thread key
- if lock.raw_try_lock() {
- locked.borrow_mut().push(*lock);
- } else {
- for lock in &locks[0..i] {
- // safety: this lock was already acquired
- lock.raw_unlock();
+ handle_unwind(
+ || unsafe {
+ for (i, lock) in locks.iter().enumerate() {
+ // safety: we have the thread key
+ if lock.raw_try_lock() {
+ locked.borrow_mut().push(*lock);
+ } else {
+ for lock in &locks[0..i] {
+ // safety: this lock was already acquired
+ lock.raw_unlock();
+ }
+ return false;
}
- return false;
}
- }
- true
- }
+ true
+ },
+ ||
+ // safety: everything in locked is locked
+ attempt_to_recover_locks_from_panic(&locked),
+ )
}
/// Locks the locks in the order they are given. This causes deadlock if this
/// is called by multiple threads with the locks in different orders.
pub unsafe fn ordered_try_read(locks: &[&dyn RawLock]) -> bool {
let locked = RefCell::new(Vec::with_capacity(locks.len()));
- scopeguard::defer_on_unwind! {
- // safety: everything in locked is locked
- attempt_to_recover_reads_from_panic(&locked)
- };
- unsafe {
- for (i, lock) in locks.iter().enumerate() {
- // safety: we have the thread key
- if lock.raw_try_read() {
- locked.borrow_mut().push(*lock);
- } else {
- for lock in &locks[0..i] {
- // safety: this lock was already acquired
- lock.raw_unlock_read();
+ handle_unwind(
+ || unsafe {
+ for (i, lock) in locks.iter().enumerate() {
+ // safety: we have the thread key
+ if lock.raw_try_read() {
+ locked.borrow_mut().push(*lock);
+ } else {
+ for lock in &locks[0..i] {
+ // safety: this lock was already acquired
+ lock.raw_unlock_read();
+ }
+ return false;
}
- return false;
}
- }
- true
- }
+ true
+ },
+ ||
+ // safety: everything in locked is locked
+ attempt_to_recover_reads_from_panic(&locked),
+ )
}
pub unsafe fn attempt_to_recover_locks_from_panic(locked: &RefCell<Vec<&dyn RawLock>>) {
- scopeguard::defer_on_unwind! { locked.borrow().iter().for_each(|l| l.kill()); };
- let mut locked = locked.borrow_mut();
- while let Some(locked_lock) = locked.pop() {
- locked_lock.raw_unlock();
- }
+ handle_unwind(
+ || {
+ let mut locked = locked.borrow_mut();
+ while let Some(locked_lock) = locked.pop() {
+ locked_lock.raw_unlock();
+ }
+ },
+ || locked.borrow().iter().for_each(|l| l.kill()),
+ )
}
pub unsafe fn attempt_to_recover_reads_from_panic(locked: &RefCell<Vec<&dyn RawLock>>) {
- scopeguard::defer_on_unwind! { locked.borrow().iter().for_each(|l| l.kill()); };
- let mut locked = locked.borrow_mut();
- while let Some(locked_lock) = locked.pop() {
- locked_lock.raw_unlock_read();
- }
+ handle_unwind(
+ || {
+ let mut locked = locked.borrow_mut();
+ while let Some(locked_lock) = locked.pop() {
+ locked_lock.raw_unlock_read();
+ }
+ },
+ || locked.borrow().iter().for_each(|l| l.kill()),
+ )
}
diff --git a/src/handle_unwind.rs b/src/handle_unwind.rs
new file mode 100644
index 0000000..d515449
--- /dev/null
+++ b/src/handle_unwind.rs
@@ -0,0 +1,9 @@
+use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
+
+pub fn handle_unwind<R, F: FnOnce() -> R, G: FnOnce()>(try_fn: F, catch: G) -> R {
+ let try_fn = AssertUnwindSafe(try_fn);
+ catch_unwind(try_fn).unwrap_or_else(|e| {
+ catch();
+ resume_unwind(e)
+ })
+}
diff --git a/src/lib.rs b/src/lib.rs
index ec43121..2139d6b 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -164,6 +164,7 @@
//! [`OwnedLockCollection`]: `collection::OwnedLockCollection`
//! [`RetryingLockCollection`]: `collection::RetryingLockCollection`
+mod handle_unwind;
mod key;
pub mod collection;
diff --git a/src/mutex/mutex.rs b/src/mutex/mutex.rs
index e3f5303..080e043 100644
--- a/src/mutex/mutex.rs
+++ b/src/mutex/mutex.rs
@@ -1,9 +1,11 @@
use std::cell::UnsafeCell;
use std::fmt::Debug;
use std::marker::PhantomData;
+use std::panic::AssertUnwindSafe;
use lock_api::RawMutex;
+use crate::handle_unwind::handle_unwind;
use crate::key::Keyable;
use crate::lockable::{Lockable, LockableAsMut, LockableIntoInner, OwnedLockable, RawLock};
use crate::poisonable::PoisonFlag;
@@ -18,19 +20,9 @@ unsafe impl<T: ?Sized, R: RawMutex> RawLock for Mutex<T, R> {
unsafe fn raw_lock(&self) {
assert!(!self.poison.is_poisoned(), "The mutex has been killed");
- scopeguard::defer_on_unwind! {
- scopeguard::defer_on_unwind! { self.kill() };
- if self.raw_try_lock() {
- self.raw_unlock();
- } else {
- // We don't know whether this lock is locked by the current
- // thread, or another thread. There's not much we can do other
- // than kill it.
- self.kill();
- }
- }
-
- self.raw.lock()
+ // if the closure unwraps, then the mutex will be killed
+ let this = AssertUnwindSafe(self);
+ handle_unwind(|| this.raw.lock(), || self.kill())
}
unsafe fn raw_try_lock(&self) -> bool {
@@ -38,35 +30,15 @@ unsafe impl<T: ?Sized, R: RawMutex> RawLock for Mutex<T, R> {
return false;
}
- scopeguard::defer_on_unwind! {
- scopeguard::defer_on_unwind! { self.kill() };
- if self.raw_try_lock() {
- self.raw_unlock();
- } else {
- // We don't know whether this lock is locked by the current
- // thread, or another thread. There's not much we can do other
- // than kill it.
- self.kill();
- }
- }
-
- self.raw.try_lock()
+ // if the closure unwraps, then the mutex will be killed
+ let this = AssertUnwindSafe(self);
+ handle_unwind(|| this.raw.try_lock(), || self.kill())
}
unsafe fn raw_unlock(&self) {
- scopeguard::defer_on_unwind! {
- scopeguard::defer_on_unwind! { self.kill() };
- if self.raw_try_lock() {
- self.raw_unlock();
- } else {
- // We don't know whether this lock is locked by the current
- // thread, or another thread. There's not much we can do other
- // than kill it.
- self.kill();
- }
- }
-
- self.raw.unlock()
+ // if the closure unwraps, then the mutex will be killed
+ let this = AssertUnwindSafe(self);
+ handle_unwind(|| this.raw.unlock(), || self.kill())
}
// this is the closest thing to a read we can get, but Sharable isn't
diff --git a/src/poisonable/error.rs b/src/poisonable/error.rs
index 9e84693..f27c6ab 100644
--- a/src/poisonable/error.rs
+++ b/src/poisonable/error.rs
@@ -1,6 +1,5 @@
use core::fmt;
use std::error::Error;
-use std::ops::{Deref, DerefMut};
use super::{PoisonError, PoisonGuard, TryLockPoisonableError};
@@ -68,9 +67,7 @@ impl<Guard> PoisonError<Guard> {
pub fn into_inner(self) -> Guard {
self.guard
}
-}
-impl<T, Guard: Deref<Target = T>> PoisonError<Guard> {
/// Reaches into this error indicating that a lock is poisoned, returning a
/// reference to the underlying guard to allow access regardless.
///
@@ -82,6 +79,7 @@ impl<T, Guard: Deref<Target = T>> PoisonError<Guard> {
/// use std::thread;
///
/// use happylock::{Mutex, Poisonable, ThreadKey};
+ /// use happylock::poisonable::PoisonGuard;
///
/// let mutex = Arc::new(Poisonable::new(Mutex::new(HashSet::new())));
///
@@ -96,16 +94,14 @@ impl<T, Guard: Deref<Target = T>> PoisonError<Guard> {
///
/// let key = ThreadKey::get().unwrap();
/// let p_err = mutex.lock(key).unwrap_err();
- /// let data = p_err.get_ref();
+ /// let data: &PoisonGuard<_, _> = p_err.get_ref();
/// println!("recovered {} items", data.len());
/// ```
#[must_use]
- pub fn get_ref(&self) -> &T {
+ pub const fn get_ref(&self) -> &Guard {
&self.guard
}
-}
-impl<T, Guard: DerefMut<Target = T>> PoisonError<Guard> {
/// Reaches into this error indicating that a lock is poisoned, returning a
/// mutable reference to the underlying guard to allow access regardless.
///
@@ -136,7 +132,7 @@ impl<T, Guard: DerefMut<Target = T>> PoisonError<Guard> {
/// println!("recovered {} items", data.len());
/// ```
#[must_use]
- pub fn get_mut(&mut self) -> &mut T {
+ pub fn get_mut(&mut self) -> &mut Guard {
&mut self.guard
}
}
diff --git a/src/poisonable/poisonable.rs b/src/poisonable/poisonable.rs
index 581fb2f..0fc66a8 100644
--- a/src/poisonable/poisonable.rs
+++ b/src/poisonable/poisonable.rs
@@ -162,7 +162,7 @@ impl<L> Poisonable<L> {
///
/// let key = ThreadKey::get().unwrap();
/// let x = mutex.lock(key).unwrap_or_else(|mut e| {
- /// *e.get_mut() = 1;
+ /// **e.get_mut() = 1;
/// mutex.clear_poison();
/// e.into_inner()
/// });
diff --git a/src/rwlock/rwlock.rs b/src/rwlock/rwlock.rs
index 94c6062..03b2cfd 100644
--- a/src/rwlock/rwlock.rs
+++ b/src/rwlock/rwlock.rs
@@ -1,8 +1,11 @@
+use std::cell::UnsafeCell;
use std::fmt::Debug;
-use std::{cell::UnsafeCell, marker::PhantomData};
+use std::marker::PhantomData;
+use std::panic::AssertUnwindSafe;
use lock_api::RawRwLock;
+use crate::handle_unwind::handle_unwind;
use crate::key::Keyable;
use crate::lockable::{
Lockable, LockableAsMut, LockableIntoInner, OwnedLockable, RawLock, Sharable,
@@ -21,19 +24,9 @@ unsafe impl<T: ?Sized, R: RawRwLock> RawLock for RwLock<T, R> {
"The read-write lock has been killed"
);
- scopeguard::defer_on_unwind! {
- scopeguard::defer_on_unwind! { self.kill() };
- if self.raw_try_lock() {
- self.raw_unlock();
- } else {
- // We don't know whether this lock is locked by the current
- // thread, or another thread. There's not much we can do other
- // than kill it.
- self.kill();
- }
- }
-
- self.raw.lock_exclusive()
+ // if the closure unwraps, then the mutex will be killed
+ let this = AssertUnwindSafe(self);
+ handle_unwind(|| this.raw.lock_exclusive(), || self.kill())
}
unsafe fn raw_try_lock(&self) -> bool {
@@ -41,35 +34,15 @@ unsafe impl<T: ?Sized, R: RawRwLock> RawLock for RwLock<T, R> {
return false;
}
- scopeguard::defer_on_unwind! {
- scopeguard::defer_on_unwind! { self.kill() };
- if self.raw_try_lock() {
- self.raw_unlock();
- } else {
- // We don't know whether this lock is locked by the current
- // thread, or another thread. There's not much we can do other
- // than kill it.
- self.kill();
- }
- }
-
- self.raw.try_lock_exclusive()
+ // if the closure unwraps, then the mutex will be killed
+ let this = AssertUnwindSafe(self);
+ handle_unwind(|| this.raw.try_lock_exclusive(), || self.kill())
}
unsafe fn raw_unlock(&self) {
- scopeguard::defer_on_unwind! {
- scopeguard::defer_on_unwind! { self.kill() };
- if self.raw_try_lock() {
- self.raw_unlock();
- } else {
- // We don't know whether this lock is locked by the current
- // thread, or another thread. There's not much we can do other
- // than kill it.
- self.kill();
- }
- }
-
- self.raw.unlock_exclusive()
+ // if the closure unwraps, then the mutex will be killed
+ let this = AssertUnwindSafe(self);
+ handle_unwind(|| this.raw.unlock_exclusive(), || self.kill())
}
unsafe fn raw_read(&self) {
@@ -78,19 +51,9 @@ unsafe impl<T: ?Sized, R: RawRwLock> RawLock for RwLock<T, R> {
"The read-write lock has been killed"
);
- scopeguard::defer_on_unwind! {
- scopeguard::defer_on_unwind! { self.kill() };
- if self.raw_try_read() {
- self.raw_unlock_read();
- } else {
- // We don't know whether this lock is locked by the current
- // thread, or another thread. There's not much we can do other
- // than kill it.
- self.kill();
- }
- }
-
- self.raw.lock_shared()
+ // if the closure unwraps, then the mutex will be killed
+ let this = AssertUnwindSafe(self);
+ handle_unwind(|| this.raw.lock_shared(), || self.kill())
}
unsafe fn raw_try_read(&self) -> bool {
@@ -98,35 +61,15 @@ unsafe impl<T: ?Sized, R: RawRwLock> RawLock for RwLock<T, R> {
return false;
}
- scopeguard::defer_on_unwind! {
- scopeguard::defer_on_unwind! { self.kill() };
- if self.raw_try_read() {
- self.raw_unlock_read();
- } else {
- // We don't know whether this lock is locked by the current
- // thread, or another thread. There's not much we can do other
- // than kill it.
- self.kill();
- }
- }
-
- self.raw.try_lock_shared()
+ // if the closure unwraps, then the mutex will be killed
+ let this = AssertUnwindSafe(self);
+ handle_unwind(|| this.raw.try_lock_shared(), || self.kill())
}
unsafe fn raw_unlock_read(&self) {
- scopeguard::defer_on_unwind! {
- scopeguard::defer_on_unwind! { self.kill() };
- if self.raw_try_read() {
- self.raw_unlock_read();
- } else {
- // We don't know whether this lock is locked by the current
- // thread, or another thread. There's not much we can do other
- // than kill it.
- self.kill();
- }
- }
-
- self.raw.unlock_shared()
+ // if the closure unwraps, then the mutex will be killed
+ let this = AssertUnwindSafe(self);
+ handle_unwind(|| this.raw.unlock_shared(), || self.kill())
}
}