summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/collection/boxed.rs34
-rw-r--r--src/collection/guard.rs16
-rw-r--r--src/collection/owned.rs17
-rw-r--r--src/collection/ref.rs34
-rw-r--r--src/collection/retry.rs127
-rw-r--r--src/key.rs25
-rw-r--r--src/lockable.rs96
-rw-r--r--src/mutex.rs156
-rw-r--r--src/mutex/mutex.rs5
-rw-r--r--src/poisonable.rs18
-rw-r--r--src/poisonable/error.rs2
-rw-r--r--src/poisonable/poisonable.rs4
-rw-r--r--src/rwlock.rs132
-rw-r--r--src/rwlock/rwlock.rs18
14 files changed, 658 insertions, 26 deletions
diff --git a/src/collection/boxed.rs b/src/collection/boxed.rs
index b0d1e3b..c359098 100644
--- a/src/collection/boxed.rs
+++ b/src/collection/boxed.rs
@@ -532,3 +532,37 @@ where
self.into_iter()
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{Mutex, ThreadKey};
+
+ #[test]
+ fn non_duplicates_allowed() {
+ let mutex1 = Mutex::new(0);
+ let mutex2 = Mutex::new(1);
+ assert!(BoxedLockCollection::try_new([&mutex1, &mutex2]).is_some())
+ }
+
+ #[test]
+ fn duplicates_not_allowed() {
+ let mutex1 = Mutex::new(0);
+ assert!(BoxedLockCollection::try_new([&mutex1, &mutex1]).is_none())
+ }
+
+ #[test]
+ fn works_in_collection() {
+ let key = ThreadKey::get().unwrap();
+ let mutex1 = Mutex::new(0);
+ let mutex2 = Mutex::new(1);
+ let collection =
+ BoxedLockCollection::try_new(BoxedLockCollection::try_new([&mutex1, &mutex2]).unwrap())
+ .unwrap();
+
+ let guard = collection.lock(key);
+ assert!(mutex1.is_locked());
+ assert!(mutex2.is_locked());
+ drop(guard);
+ }
+}
diff --git a/src/collection/guard.rs b/src/collection/guard.rs
index 8857c5f..0b8a583 100644
--- a/src/collection/guard.rs
+++ b/src/collection/guard.rs
@@ -42,3 +42,19 @@ impl<'key, Guard, Key: Keyable> AsMut<Guard> for LockGuard<'key, Guard, Key> {
&mut self.guard
}
}
+
+#[cfg(test)]
+mod tests {
+ use crate::collection::OwnedLockCollection;
+ use crate::{RwLock, ThreadKey};
+
+ use super::*;
+
+ #[test]
+ fn guard_display_works() {
+ let key = ThreadKey::get().unwrap();
+ let lock = OwnedLockCollection::new(RwLock::new("Hello, world!"));
+ let guard = lock.read(key);
+ assert_eq!(guard.to_string(), "Hello, world!".to_string());
+ }
+}
diff --git a/src/collection/owned.rs b/src/collection/owned.rs
index f2b6cc9..9aa7460 100644
--- a/src/collection/owned.rs
+++ b/src/collection/owned.rs
@@ -393,3 +393,20 @@ impl<L: Sharable> OwnedLockCollection<L> {
guard.key
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::Mutex;
+
+ #[test]
+ fn can_be_extended() {
+ let mutex1 = Mutex::new(0);
+ let mutex2 = Mutex::new(1);
+ let mut collection = OwnedLockCollection::new(vec![mutex1, mutex2]);
+
+ collection.extend([Mutex::new(2)]);
+
+ assert_eq!(collection.data.len(), 3);
+ }
+}
diff --git a/src/collection/ref.rs b/src/collection/ref.rs
index 8b9e0f8..60abdfa 100644
--- a/src/collection/ref.rs
+++ b/src/collection/ref.rs
@@ -436,3 +436,37 @@ where
self.into_iter()
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{Mutex, ThreadKey};
+
+ #[test]
+ fn non_duplicates_allowed() {
+ let mutex1 = Mutex::new(0);
+ let mutex2 = Mutex::new(1);
+ assert!(RefLockCollection::try_new(&[&mutex1, &mutex2]).is_some())
+ }
+
+ #[test]
+ fn duplicates_not_allowed() {
+ let mutex1 = Mutex::new(0);
+ assert!(RefLockCollection::try_new(&[&mutex1, &mutex1]).is_none())
+ }
+
+ #[test]
+ fn works_in_collection() {
+ let key = ThreadKey::get().unwrap();
+ let mutex1 = Mutex::new(0);
+ let mutex2 = Mutex::new(1);
+ let collection0 = [&mutex1, &mutex2];
+ let collection1 = RefLockCollection::try_new(&collection0).unwrap();
+ let collection = RefLockCollection::try_new(&collection1).unwrap();
+
+ let guard = collection.lock(key);
+ assert!(mutex1.is_locked());
+ assert!(mutex2.is_locked());
+ drop(guard);
+ }
+}
diff --git a/src/collection/retry.rs b/src/collection/retry.rs
index 7aa4ef4..8a10fc3 100644
--- a/src/collection/retry.rs
+++ b/src/collection/retry.rs
@@ -30,32 +30,41 @@ unsafe impl<L: Lockable + Send + Sync> RawLock for RetryingLockCollection<L> {
let mut locks = Vec::new();
self.data.get_ptrs(&mut locks);
- 'outer: loop {
- // safety: we have the thread key
- locks[first_index].lock();
- for (i, lock) in locks.iter().enumerate() {
- if i == first_index {
- continue;
- }
+ if locks.is_empty() {
+ return;
+ }
+ unsafe {
+ 'outer: loop {
// safety: we have the thread key
- if !lock.try_lock() {
- for lock in locks.iter().take(i) {
- // safety: we already locked all of these
- lock.unlock();
+ locks[first_index].lock();
+ for (i, lock) in locks.iter().enumerate() {
+ if i == first_index {
+ continue;
}
- if first_index >= i {
- // safety: this is already locked and can't be unlocked
- // by the previous loop
- locks[first_index].unlock();
- }
+ // safety: we have the thread key
+ if !lock.try_lock() {
+ for lock in locks.iter().take(i) {
+ // safety: we already locked all of these
+ lock.unlock();
+ }
- first_index = i;
- continue 'outer;
+ if first_index >= i {
+ // safety: this is already locked and can't be unlocked
+ // by the previous loop
+ locks[first_index].unlock();
+ }
+
+ first_index = i;
+ continue 'outer;
+ }
}
+
+ // safety: we locked all the data
+ break;
}
- }
+ };
}
unsafe fn try_lock(&self) -> bool {
@@ -771,3 +780,83 @@ where
self.into_iter()
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::collection::BoxedLockCollection;
+ use crate::{Mutex, RwLock, ThreadKey};
+ use lock_api::{RawMutex, RawRwLock};
+
+ #[test]
+ fn nonduplicate_lock_references_are_allowed() {
+ let mutex1 = Mutex::new(0);
+ let mutex2 = Mutex::new(0);
+ assert!(RetryingLockCollection::try_new([&mutex1, &mutex2]).is_some());
+ }
+
+ #[test]
+ fn duplicate_lock_references_are_disallowed() {
+ let mutex = Mutex::new(0);
+ assert!(RetryingLockCollection::try_new([&mutex, &mutex]).is_none());
+ }
+
+ #[test]
+ fn locks_all_inner_mutexes() {
+ let key = ThreadKey::get().unwrap();
+ let mutex1 = Mutex::new(0);
+ let mutex2 = Mutex::new(0);
+ let collection = RetryingLockCollection::try_new([&mutex1, &mutex2]).unwrap();
+
+ let guard = collection.lock(key);
+
+ assert!(mutex1.is_locked());
+ assert!(mutex2.is_locked());
+
+ drop(guard);
+ }
+
+ #[test]
+ fn locks_all_inner_rwlocks() {
+ let key = ThreadKey::get().unwrap();
+ let rwlock1 = RwLock::new(0);
+ let rwlock2 = RwLock::new(0);
+ let collection = RetryingLockCollection::try_new([&rwlock1, &rwlock2]).unwrap();
+ // TODO Poisonable::read
+
+ let guard = collection.read(key);
+
+ assert!(rwlock1.is_locked());
+ assert!(rwlock2.is_locked());
+
+ drop(guard);
+ }
+
+ #[test]
+ fn works_with_other_collections() {
+ let key = ThreadKey::get().unwrap();
+ let mutex1 = Mutex::new(0);
+ let mutex2 = Mutex::new(0);
+ let collection = BoxedLockCollection::try_new(
+ RetryingLockCollection::try_new([&mutex1, &mutex2]).unwrap(),
+ )
+ .unwrap();
+
+ let guard = collection.lock(key);
+
+ assert!(mutex1.is_locked());
+ assert!(mutex2.is_locked());
+ drop(guard);
+ }
+
+ #[test]
+ fn extend_collection() {
+ let mutex1 = Mutex::new(0);
+ let mutex2 = Mutex::new(0);
+ let mut collection = RetryingLockCollection::new(vec![mutex1]);
+
+ collection.extend([mutex2]);
+
+ assert_eq!(collection.into_inner().len(), 2);
+ }
+}
diff --git a/src/key.rs b/src/key.rs
index 8d46817..654979d 100644
--- a/src/key.rs
+++ b/src/key.rs
@@ -83,7 +83,7 @@ impl ThreadKey {
}
/// A dumb lock that's just a wrapper for an [`AtomicBool`].
-#[derive(Debug, Default)]
+#[derive(Default)]
struct KeyCell {
is_locked: Cell<bool>,
}
@@ -101,3 +101,26 @@ impl KeyCell {
self.is_locked.set(false);
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn thread_key_returns_some_on_first_call() {
+ assert!(ThreadKey::get().is_some());
+ }
+
+ #[test]
+ fn thread_key_returns_none_on_second_call() {
+ let key = ThreadKey::get();
+ assert!(ThreadKey::get().is_none());
+ drop(key);
+ }
+
+ #[test]
+ fn dropping_thread_key_allows_reobtaining() {
+ drop(ThreadKey::get());
+ assert!(ThreadKey::get().is_some())
+ }
+}
diff --git a/src/lockable.rs b/src/lockable.rs
index 6d8e7b4..8742b96 100644
--- a/src/lockable.rs
+++ b/src/lockable.rs
@@ -622,3 +622,99 @@ unsafe impl<T: Sharable> Sharable for Vec<T> {}
unsafe impl<T: OwnedLockable, const N: usize> OwnedLockable for [T; N] {}
unsafe impl<T: OwnedLockable> OwnedLockable for Box<[T]> {}
unsafe impl<T: OwnedLockable> OwnedLockable for Vec<T> {}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::Mutex;
+
+ #[test]
+ fn array_get_ptrs_empty() {
+ let locks: [Mutex<()>; 0] = [];
+ let mut lock_ptrs = Vec::new();
+ locks.get_ptrs(&mut lock_ptrs);
+
+ assert!(lock_ptrs.is_empty());
+ }
+
+ #[test]
+ fn array_get_ptrs_length_one() {
+ let locks: [Mutex<i32>; 1] = [Mutex::new(1)];
+ let mut lock_ptrs = Vec::new();
+ locks.get_ptrs(&mut lock_ptrs);
+
+ assert_eq!(lock_ptrs.len(), 1);
+ unsafe { assert!(std::ptr::addr_eq(lock_ptrs[0], locks[0].raw())) }
+ }
+
+ #[test]
+ fn array_get_ptrs_length_two() {
+ let locks: [Mutex<i32>; 2] = [Mutex::new(1), Mutex::new(2)];
+ let mut lock_ptrs = Vec::new();
+ locks.get_ptrs(&mut lock_ptrs);
+
+ assert_eq!(lock_ptrs.len(), 2);
+ unsafe { assert!(std::ptr::addr_eq(lock_ptrs[0], locks[0].raw())) }
+ unsafe { assert!(std::ptr::addr_eq(lock_ptrs[1], locks[1].raw())) }
+ }
+
+ #[test]
+ fn vec_get_ptrs_empty() {
+ let locks: Vec<Mutex<()>> = Vec::new();
+ let mut lock_ptrs = Vec::new();
+ locks.get_ptrs(&mut lock_ptrs);
+
+ assert!(lock_ptrs.is_empty());
+ }
+
+ #[test]
+ fn vec_get_ptrs_length_one() {
+ let locks: Vec<Mutex<i32>> = vec![Mutex::new(1)];
+ let mut lock_ptrs = Vec::new();
+ locks.get_ptrs(&mut lock_ptrs);
+
+ assert_eq!(lock_ptrs.len(), 1);
+ unsafe { assert!(std::ptr::addr_eq(lock_ptrs[0], locks[0].raw())) }
+ }
+
+ #[test]
+ fn vec_get_ptrs_length_two() {
+ let locks: Vec<Mutex<i32>> = vec![Mutex::new(1), Mutex::new(2)];
+ let mut lock_ptrs = Vec::new();
+ locks.get_ptrs(&mut lock_ptrs);
+
+ assert_eq!(lock_ptrs.len(), 2);
+ unsafe { assert!(std::ptr::addr_eq(lock_ptrs[0], locks[0].raw())) }
+ unsafe { assert!(std::ptr::addr_eq(lock_ptrs[1], locks[1].raw())) }
+ }
+
+ #[test]
+ fn box_get_ptrs_empty() {
+ let locks: Box<[Mutex<()>]> = Box::from([]);
+ let mut lock_ptrs = Vec::new();
+ locks.get_ptrs(&mut lock_ptrs);
+
+ assert!(lock_ptrs.is_empty());
+ }
+
+ #[test]
+ fn box_get_ptrs_length_one() {
+ let locks: Box<[Mutex<i32>]> = vec![Mutex::new(1)].into_boxed_slice();
+ let mut lock_ptrs = Vec::new();
+ locks.get_ptrs(&mut lock_ptrs);
+
+ assert_eq!(lock_ptrs.len(), 1);
+ unsafe { assert!(std::ptr::addr_eq(lock_ptrs[0], locks[0].raw())) }
+ }
+
+ #[test]
+ fn box_get_ptrs_length_two() {
+ let locks: Box<[Mutex<i32>]> = vec![Mutex::new(1), Mutex::new(2)].into_boxed_slice();
+ let mut lock_ptrs = Vec::new();
+ locks.get_ptrs(&mut lock_ptrs);
+
+ assert_eq!(lock_ptrs.len(), 2);
+ unsafe { assert!(std::ptr::addr_eq(lock_ptrs[0], locks[0].raw())) }
+ unsafe { assert!(std::ptr::addr_eq(lock_ptrs[1], locks[1].raw())) }
+ }
+}
diff --git a/src/mutex.rs b/src/mutex.rs
index 433ab47..ae5efc8 100644
--- a/src/mutex.rs
+++ b/src/mutex.rs
@@ -28,6 +28,101 @@ pub type ParkingMutex<T> = Mutex<T, parking_lot::RawMutex>;
/// Locking the mutex on a thread that already locked it is impossible, due to
/// the requirement of the [`ThreadKey`]. Therefore, this will never deadlock.
///
+/// # Examples
+///
+/// ```
+/// use std::sync::Arc;
+/// use std::thread;
+/// use std::sync::mpsc;
+///
+/// use happylock::{Mutex, ThreadKey};
+///
+/// // Spawn a few threads to increment a shared variable (non-atomically),
+/// // and let the main thread know once all increments are done.
+/// //
+/// // Here we're using an Arc to share memory among threads, and the data
+/// // inside the Arc is protected with a mutex.
+/// const N: usize = 10;
+///
+/// let data = Arc::new(Mutex::new(0));
+///
+/// let (tx, rx) = mpsc::channel();
+/// for _ in 0..N {
+/// let (data, tx) = (Arc::clone(&data), tx.clone());
+/// thread::spawn(move || {
+/// let key = ThreadKey::get().unwrap();
+/// let mut data = data.lock(key);
+/// *data += 1;
+/// if *data == N {
+/// tx.send(()).unwrap();
+/// }
+/// // the lock is unlocked
+/// });
+/// }
+///
+/// rx.recv().unwrap();
+/// ```
+///
+/// To unlock a mutex guard sooner than the end of the enclosing scope, either
+/// create an inner scope, drop the guard manually, or call [`Mutex::unlock`].
+///
+/// ```
+/// use std::sync::Arc;
+/// use std::thread;
+///
+/// use happylock::{Mutex, ThreadKey};
+///
+/// const N: usize = 3;
+///
+/// let data_mutex = Arc::new(Mutex::new(vec![1, 2, 3, 4]));
+/// let res_mutex = Arc::new(Mutex::new(0));
+///
+/// let mut threads = Vec::with_capacity(N);
+/// (0..N).for_each(|_| {
+/// let data_mutex_clone = Arc::clone(&data_mutex);
+/// let res_mutex_clone = Arc::clone(&res_mutex);
+///
+/// threads.push(thread::spawn(move || {
+/// let mut key = ThreadKey::get().unwrap();
+///
+/// // Here we use a block to limit the lifetime of the lock guard.
+/// let result = {
+/// let mut data = data_mutex_clone.lock(&mut key);
+/// let result = data.iter().fold(0, |acc, x| acc + x * 2);
+/// data.push(result);
+/// result
+/// // The mutex guard gets dropped here, so the lock is released
+/// };
+/// // The thread key is available again
+/// *res_mutex_clone.lock(key) += result;
+/// }));
+/// });
+///
+/// let mut key = ThreadKey::get().unwrap();
+/// let mut data = data_mutex.lock(&mut key);
+/// let result = data.iter().fold(0, |acc, x| acc + x * 2);
+/// data.push(result);
+///
+/// // We drop the `data` explicitly because it's not necessary anymore. This
+/// // allows other threads to start working on the data immediately. Dropping
+/// // the data also gives us access to the thread key, so we can lock
+/// // another mutex.
+/// drop(data);
+///
+/// // Here the mutex guard is not assigned to a variable and so, even if the
+/// // scope does not end after this line, the mutex is still released: there is
+/// // no deadlock.
+/// *res_mutex.lock(&mut key) += result;
+///
+/// threads.into_iter().for_each(|thread| {
+/// thread
+/// .join()
+/// .expect("The thread creating or execution failed !")
+/// });
+///
+/// assert_eq!(*res_mutex.lock(key), 800);
+/// ```
+///
/// [`lock`]: `Mutex::lock`
/// [`try_lock`]: `Mutex::try_lock`
/// [`ThreadKey`]: `crate::ThreadKey`
@@ -61,3 +156,64 @@ pub struct MutexGuard<'a, 'key: 'a, T: ?Sized + 'a, Key: Keyable + 'key, R: RawM
thread_key: Key,
_phantom: PhantomData<&'key ()>,
}
+
+#[cfg(test)]
+mod tests {
+ use crate::ThreadKey;
+
+ use super::*;
+
+ #[test]
+ fn unlocked_when_initialized() {
+ let lock: crate::Mutex<_> = Mutex::new("Hello, world!");
+
+ assert!(!lock.is_locked());
+ }
+
+ #[test]
+ fn locked_after_read() {
+ let key = ThreadKey::get().unwrap();
+ let lock: crate::Mutex<_> = Mutex::new("Hello, world!");
+
+ let guard = lock.lock(key);
+
+ assert!(lock.is_locked());
+ drop(guard)
+ }
+
+ #[test]
+ fn display_works_for_guard() {
+ let key = ThreadKey::get().unwrap();
+ let mutex: crate::Mutex<_> = Mutex::new("Hello, world!");
+ let guard = mutex.lock(key);
+ assert_eq!(guard.to_string(), "Hello, world!".to_string());
+ }
+
+ #[test]
+ fn display_works_for_ref() {
+ let mutex: crate::Mutex<_> = Mutex::new("Hello, world!");
+ let guard = unsafe { mutex.try_lock_no_key().unwrap() }; // TODO lock_no_key
+ assert_eq!(guard.to_string(), "Hello, world!".to_string());
+ }
+
+ #[test]
+ fn dropping_guard_releases_mutex() {
+ let mut key = ThreadKey::get().unwrap();
+ let mutex: crate::Mutex<_> = Mutex::new("Hello, world!");
+
+ let guard = mutex.lock(&mut key);
+ drop(guard);
+
+ assert!(!mutex.is_locked());
+ }
+
+ #[test]
+ fn dropping_ref_releases_mutex() {
+ let mutex: crate::Mutex<_> = Mutex::new("Hello, world!");
+
+ let guard = unsafe { mutex.try_lock_no_key().unwrap() };
+ drop(guard);
+
+ assert!(!mutex.is_locked());
+ }
+}
diff --git a/src/mutex/mutex.rs b/src/mutex/mutex.rs
index 89dfef9..d5f7f3a 100644
--- a/src/mutex/mutex.rs
+++ b/src/mutex/mutex.rs
@@ -199,6 +199,11 @@ impl<T: ?Sized, R: RawMutex> Mutex<T, R> {
}
}
+ /// Returns `true` if the mutex is currently locked
+ pub(crate) fn is_locked(&self) -> bool {
+ self.raw.is_locked()
+ }
+
/// Lock without a [`ThreadKey`]. It is undefined behavior to do this without
/// owning the [`ThreadKey`].
pub(crate) unsafe fn try_lock_no_key(&self) -> Option<MutexRef<'_, T, R>> {
diff --git a/src/poisonable.rs b/src/poisonable.rs
index 6cc234e..d664291 100644
--- a/src/poisonable.rs
+++ b/src/poisonable.rs
@@ -1,8 +1,6 @@
use std::marker::PhantomData;
use std::sync::atomic::AtomicBool;
-use crate::lockable::{Lockable, RawLock};
-
mod error;
mod flag;
mod guard;
@@ -92,3 +90,19 @@ pub type PoisonResult<Guard> = Result<Guard, PoisonError<Guard>>;
/// lock might not have been acquired for other reasons.
pub type TryLockPoisonableResult<'flag, 'key, G, Key> =
Result<PoisonGuard<'flag, 'key, G, Key>, TryLockPoisonableError<'flag, 'key, G, Key>>;
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{Mutex, ThreadKey};
+
+ #[test]
+ fn display_works() {
+ let key = ThreadKey::get().unwrap();
+ let mutex = Poisonable::new(Mutex::new("Hello, world!"));
+
+ let guard = mutex.lock(key).unwrap();
+
+ assert_eq!(guard.to_string(), "Hello, world!");
+ }
+}
diff --git a/src/poisonable/error.rs b/src/poisonable/error.rs
index 886b5fd..61f0f94 100644
--- a/src/poisonable/error.rs
+++ b/src/poisonable/error.rs
@@ -11,6 +11,7 @@ impl<Guard> fmt::Debug for PoisonError<Guard> {
}
impl<Guard> fmt::Display for PoisonError<Guard> {
+ #[cfg_attr(test, mutants::skip)]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"poisoned lock: another task failed inside".fmt(f)
}
@@ -150,6 +151,7 @@ impl<'flag, 'key, G, Key> fmt::Debug for TryLockPoisonableError<'flag, 'key, G,
}
impl<'flag, 'key, G, Key> fmt::Display for TryLockPoisonableError<'flag, 'key, G, Key> {
+ #[cfg_attr(test, mutants::skip)]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Self::Poisoned(..) => "poisoned lock: another task failed inside",
diff --git a/src/poisonable/poisonable.rs b/src/poisonable/poisonable.rs
index 61264ef..2c7eeff 100644
--- a/src/poisonable/poisonable.rs
+++ b/src/poisonable/poisonable.rs
@@ -282,7 +282,7 @@ impl<L: Lockable + RawLock> Poisonable<L> {
/// use happylock::{Mutex, Poisonable};
///
/// let mutex = Poisonable::new(Mutex::new(0));
- /// assert_eq!(mutex.inner_lock().unwrap().into_inner(), 0);
+ /// assert_eq!(mutex.into_inner_lock().unwrap().into_inner(), 0);
/// ```
pub fn into_inner_lock(self) -> PoisonResult<L> {
if self.is_poisoned() {
@@ -306,7 +306,7 @@ impl<L: Lockable + RawLock> Poisonable<L> {
///
/// let key = ThreadKey::get().unwrap();
/// let mut mutex = Poisonable::new(Mutex::new(0));
- /// *mutex.lock_mut().unwrap().as_mut() = 10;
+ /// *mutex.inner_lock_mut().unwrap().as_mut() = 10;
/// assert_eq!(*mutex.lock(key).unwrap(), 10);
/// ```
pub fn inner_lock_mut(&mut self) -> PoisonResult<&mut L> {
diff --git a/src/rwlock.rs b/src/rwlock.rs
index 5715f89..c1d9a78 100644
--- a/src/rwlock.rs
+++ b/src/rwlock.rs
@@ -40,7 +40,6 @@ pub type ParkingRwLock<T> = RwLock<T, parking_lot::RawRwLock>;
/// Locking the mutex on a thread that already locked it is impossible, due to
/// the requirement of the [`ThreadKey`]. Therefore, this will never deadlock.
///
-///
/// [`ThreadKey`]: `crate::ThreadKey`
/// [`Mutex`]: `crate::mutex::Mutex`
/// [`Deref`]: `std::ops::Deref`
@@ -112,3 +111,134 @@ pub struct RwLockWriteGuard<'a, 'key, T: ?Sized, Key: Keyable + 'key, R: RawRwLo
thread_key: Key,
_phantom: PhantomData<&'key ()>,
}
+
+#[cfg(test)]
+mod tests {
+ use crate::ThreadKey;
+
+ use super::*;
+
+ #[test]
+ fn unlocked_when_initialized() {
+ let key = ThreadKey::get().unwrap();
+ let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
+
+ assert!(!lock.is_locked());
+ assert!(lock.try_write(key).is_some());
+ }
+
+ #[test]
+ fn read_lock_unlocked_when_initialized() {
+ let key = ThreadKey::get().unwrap();
+ let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
+ let reader = ReadLock::new(&lock);
+
+ assert!(reader.try_lock(key).is_some());
+ }
+
+ #[test]
+ fn write_lock_unlocked_when_initialized() {
+ let key = ThreadKey::get().unwrap();
+ let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
+ let writer = WriteLock::new(&lock);
+
+ assert!(writer.try_lock(key).is_some());
+ }
+
+ #[test]
+ fn locked_after_read() {
+ let key = ThreadKey::get().unwrap();
+ let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
+
+ let guard = lock.read(key);
+
+ assert!(lock.is_locked());
+ drop(guard)
+ }
+
+ #[test]
+ fn locked_after_using_read_lock() {
+ let key = ThreadKey::get().unwrap();
+ let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
+ let reader = ReadLock::new(&lock);
+
+ let guard = reader.lock(key);
+
+ assert!(lock.is_locked());
+ drop(guard)
+ }
+
+ #[test]
+ fn locked_after_write() {
+ let key = ThreadKey::get().unwrap();
+ let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
+
+ let guard = lock.write(key);
+
+ assert!(lock.is_locked());
+ drop(guard)
+ }
+
+ #[test]
+ fn locked_after_using_write_lock() {
+ let key = ThreadKey::get().unwrap();
+ let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
+ let writer = WriteLock::new(&lock);
+
+ let guard = writer.lock(key);
+
+ assert!(lock.is_locked());
+ drop(guard)
+ }
+
+ #[test]
+ fn read_display_works() {
+ let key = ThreadKey::get().unwrap();
+ let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
+ let guard = lock.read(key);
+ assert_eq!(guard.to_string(), "Hello, world!".to_string());
+ }
+
+ #[test]
+ fn write_display_works() {
+ let key = ThreadKey::get().unwrap();
+ let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
+ let guard = lock.write(key);
+ assert_eq!(guard.to_string(), "Hello, world!".to_string());
+ }
+
+ #[test]
+ fn read_ref_display_works() {
+ let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
+ let guard = unsafe { lock.try_read_no_key().unwrap() };
+ assert_eq!(guard.to_string(), "Hello, world!".to_string());
+ }
+
+ #[test]
+ fn write_ref_display_works() {
+ let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
+ let guard = unsafe { lock.try_write_no_key().unwrap() };
+ assert_eq!(guard.to_string(), "Hello, world!".to_string());
+ }
+
+ #[test]
+ fn dropping_read_ref_releases_rwlock() {
+ let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
+
+ let guard = unsafe { lock.try_read_no_key().unwrap() };
+ drop(guard);
+
+ assert!(!lock.is_locked());
+ }
+
+ #[test]
+ fn dropping_write_guard_releases_rwlock() {
+ let mut key = ThreadKey::get().unwrap();
+ let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
+
+ let guard = lock.write(&mut key);
+ drop(guard);
+
+ assert!(!lock.is_locked());
+ }
+}
diff --git a/src/rwlock/rwlock.rs b/src/rwlock/rwlock.rs
index 5bff5a3..fddcf5a 100644
--- a/src/rwlock/rwlock.rs
+++ b/src/rwlock/rwlock.rs
@@ -5,7 +5,7 @@ use lock_api::RawRwLock;
use crate::key::Keyable;
-use super::{RwLock, RwLockReadGuard, RwLockReadRef, RwLockWriteGuard};
+use super::{RwLock, RwLockReadGuard, RwLockReadRef, RwLockWriteGuard, RwLockWriteRef};
impl<T, R: RawRwLock> RwLock<T, R> {
/// Creates a new instance of an `RwLock<T>` which is unlocked.
@@ -194,6 +194,17 @@ impl<T: ?Sized, R: RawRwLock> RwLock<T, R> {
}
}
+ /// Attempts to create an exclusive lock without a key. Locking this
+ /// without exclusive access to the key is undefined behavior.
+ pub(crate) unsafe fn try_write_no_key(&self) -> Option<RwLockWriteRef<'_, T, R>> {
+ if self.raw.try_lock_exclusive() {
+ // safety: the lock is locked first
+ Some(RwLockWriteRef(self, PhantomData))
+ } else {
+ None
+ }
+ }
+
/// Locks this `RwLock` with exclusive write access, blocking the current
/// until it can be acquired.
///
@@ -266,6 +277,11 @@ impl<T: ?Sized, R: RawRwLock> RwLock<T, R> {
}
}
+ /// Returns `true` if the rwlock is currently locked in any way
+ pub(crate) fn is_locked(&self) -> bool {
+ self.raw.is_locked()
+ }
+
/// Unlocks shared access on the `RwLock`. This is undefined behavior is
/// the data is still accessible.
pub(super) unsafe fn force_unlock_read(&self) {