summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorBotahamec <botahamec@outlook.com>2025-02-28 16:09:11 -0500
committerBotahamec <botahamec@outlook.com>2025-02-28 16:09:11 -0500
commit4ba03be97e6cc7e790bbc9bfc18caaa228c8a262 (patch)
treea257184577a93ddf240aba698755c2886188788b /tests
parent4a5ec04a29cba07c5960792528bd66b0f99ee3ee (diff)
Scoped lock API
Diffstat (limited to 'tests')
-rw-r--r--tests/evil_mutex.rs20
-rw-r--r--tests/evil_rwlock.rs20
-rw-r--r--tests/evil_try_mutex.rs81
-rw-r--r--tests/evil_try_rwlock.rs90
-rw-r--r--tests/evil_unlock_mutex.rs99
-rw-r--r--tests/evil_unlock_rwlock.rs127
-rw-r--r--tests/forget.rs24
-rw-r--r--tests/retry.rs53
-rw-r--r--tests/retry_rw.rs34
9 files changed, 528 insertions, 20 deletions
diff --git a/tests/evil_mutex.rs b/tests/evil_mutex.rs
index 361fe4c..e10acc8 100644
--- a/tests/evil_mutex.rs
+++ b/tests/evil_mutex.rs
@@ -41,16 +41,16 @@ fn boxed_mutexes() {
let c_useless = Arc::clone(&useless_mutex);
let r = std::thread::spawn(move || {
- let mut key = ThreadKey::get().unwrap();
+ let key = ThreadKey::get().unwrap();
let collection = BoxedLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap();
- collection.lock(&mut key);
+ _ = collection.lock(key);
})
.join();
assert!(r.is_err());
- assert!(good_mutex.try_lock(&mut key).is_ok());
- assert!(evil_mutex.try_lock(&mut key).is_err());
- assert!(useless_mutex.try_lock(&mut key).is_ok());
+ assert!(good_mutex.scoped_try_lock(&mut key, |_| {}).is_ok());
+ assert!(evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err());
+ assert!(useless_mutex.scoped_try_lock(&mut key, |_| {}).is_ok());
}
#[test]
@@ -64,15 +64,15 @@ fn retrying_mutexes() {
let c_useless = Arc::clone(&useless_mutex);
let r = std::thread::spawn(move || {
- let mut key = ThreadKey::get().unwrap();
+ let key = ThreadKey::get().unwrap();
let collection =
RetryingLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap();
- collection.lock(&mut key);
+ collection.lock(key);
})
.join();
assert!(r.is_err());
- assert!(good_mutex.try_lock(&mut key).is_ok());
- assert!(evil_mutex.try_lock(&mut key).is_err());
- assert!(useless_mutex.try_lock(&mut key).is_ok());
+ assert!(good_mutex.scoped_try_lock(&mut key, |_| {}).is_ok());
+ assert!(evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err());
+ assert!(useless_mutex.scoped_try_lock(&mut key, |_| {}).is_ok());
}
diff --git a/tests/evil_rwlock.rs b/tests/evil_rwlock.rs
index 234847c..9eed8a8 100644
--- a/tests/evil_rwlock.rs
+++ b/tests/evil_rwlock.rs
@@ -53,16 +53,16 @@ fn boxed_rwlocks() {
let c_useless = Arc::clone(&useless_mutex);
let r = std::thread::spawn(move || {
- let mut key = ThreadKey::get().unwrap();
+ let key = ThreadKey::get().unwrap();
let collection = BoxedLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap();
- collection.lock(&mut key);
+ _ = collection.lock(key);
})
.join();
assert!(r.is_err());
- assert!(good_mutex.try_write(&mut key).is_ok());
- assert!(evil_mutex.try_write(&mut key).is_err());
- assert!(useless_mutex.try_write(&mut key).is_ok());
+ assert!(good_mutex.scoped_try_write(&mut key, |_| {}).is_ok());
+ assert!(evil_mutex.scoped_try_write(&mut key, |_| {}).is_err());
+ assert!(useless_mutex.scoped_try_write(&mut key, |_| {}).is_ok());
}
#[test]
@@ -76,15 +76,15 @@ fn retrying_rwlocks() {
let c_useless = Arc::clone(&useless_mutex);
let r = std::thread::spawn(move || {
- let mut key = ThreadKey::get().unwrap();
+ let key = ThreadKey::get().unwrap();
let collection =
RetryingLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap();
- collection.lock(&mut key);
+ collection.lock(key);
})
.join();
assert!(r.is_err());
- assert!(good_mutex.try_write(&mut key).is_ok());
- assert!(evil_mutex.try_write(&mut key).is_err());
- assert!(useless_mutex.try_write(&mut key).is_ok());
+ assert!(good_mutex.scoped_try_write(&mut key, |_| {}).is_ok());
+ assert!(evil_mutex.scoped_try_write(&mut key, |_| {}).is_err());
+ assert!(useless_mutex.scoped_try_write(&mut key, |_| {}).is_ok());
}
diff --git a/tests/evil_try_mutex.rs b/tests/evil_try_mutex.rs
new file mode 100644
index 0000000..5c31a91
--- /dev/null
+++ b/tests/evil_try_mutex.rs
@@ -0,0 +1,81 @@
+use std::sync::Arc;
+
+use happylock::{
+ collection::{BoxedLockCollection, RetryingLockCollection},
+ mutex::Mutex,
+ ThreadKey,
+};
+use lock_api::{GuardNoSend, RawMutex};
+
+struct EvilMutex {
+ inner: parking_lot::RawMutex,
+}
+
+unsafe impl RawMutex for EvilMutex {
+ #[allow(clippy::declare_interior_mutable_const)]
+ const INIT: Self = Self {
+ inner: parking_lot::RawMutex::INIT,
+ };
+
+ type GuardMarker = GuardNoSend;
+
+ fn lock(&self) {
+ self.inner.lock()
+ }
+
+ fn try_lock(&self) -> bool {
+ panic!("mwahahahaha");
+ }
+
+ unsafe fn unlock(&self) {
+ self.inner.unlock()
+ }
+}
+
+#[test]
+fn boxed_mutexes() {
+ let mut key = ThreadKey::get().unwrap();
+ let good_mutex: Arc<Mutex<i32, parking_lot::RawMutex>> = Arc::new(Mutex::new(5));
+ let evil_mutex: Arc<Mutex<i32, EvilMutex>> = Arc::new(Mutex::new(7));
+ let useless_mutex: Arc<Mutex<i32, parking_lot::RawMutex>> = Arc::new(Mutex::new(10));
+ let c_good = Arc::clone(&good_mutex);
+ let c_evil = Arc::clone(&evil_mutex);
+ let c_useless = Arc::clone(&useless_mutex);
+
+ let r = std::thread::spawn(move || {
+ let key = ThreadKey::get().unwrap();
+ let collection = BoxedLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap();
+ let g = collection.try_lock(key);
+ println!("{}", g.unwrap().1);
+ })
+ .join();
+
+ assert!(r.is_err());
+ assert!(good_mutex.scoped_try_lock(&mut key, |_| {}).is_ok());
+ assert!(evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err());
+ assert!(useless_mutex.scoped_try_lock(&mut key, |_| {}).is_ok());
+}
+
+#[test]
+fn retrying_mutexes() {
+ let mut key = ThreadKey::get().unwrap();
+ let good_mutex: Arc<Mutex<i32, parking_lot::RawMutex>> = Arc::new(Mutex::new(5));
+ let evil_mutex: Arc<Mutex<i32, EvilMutex>> = Arc::new(Mutex::new(7));
+ let useless_mutex: Arc<Mutex<i32, parking_lot::RawMutex>> = Arc::new(Mutex::new(10));
+ let c_good = Arc::clone(&good_mutex);
+ let c_evil = Arc::clone(&evil_mutex);
+ let c_useless = Arc::clone(&useless_mutex);
+
+ let r = std::thread::spawn(move || {
+ let key = ThreadKey::get().unwrap();
+ let collection =
+ RetryingLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap();
+ let _ = collection.try_lock(key);
+ })
+ .join();
+
+ assert!(r.is_err());
+ assert!(good_mutex.scoped_try_lock(&mut key, |_| {}).is_ok());
+ assert!(evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err());
+ assert!(useless_mutex.scoped_try_lock(&mut key, |_| {}).is_ok());
+}
diff --git a/tests/evil_try_rwlock.rs b/tests/evil_try_rwlock.rs
new file mode 100644
index 0000000..b00a666
--- /dev/null
+++ b/tests/evil_try_rwlock.rs
@@ -0,0 +1,90 @@
+use std::sync::Arc;
+
+use happylock::collection::{BoxedLockCollection, RetryingLockCollection};
+use happylock::rwlock::RwLock;
+use happylock::ThreadKey;
+use lock_api::{GuardNoSend, RawRwLock};
+
+struct EvilRwLock {
+ inner: parking_lot::RawRwLock,
+}
+
+unsafe impl RawRwLock for EvilRwLock {
+ #[allow(clippy::declare_interior_mutable_const)]
+ const INIT: Self = Self {
+ inner: parking_lot::RawRwLock::INIT,
+ };
+
+ type GuardMarker = GuardNoSend;
+
+ fn lock_shared(&self) {
+ self.inner.lock_shared()
+ }
+
+ fn try_lock_shared(&self) -> bool {
+ panic!("mwahahahaha")
+ }
+
+ unsafe fn unlock_shared(&self) {
+ self.inner.unlock_shared()
+ }
+
+ fn lock_exclusive(&self) {
+ self.inner.lock_exclusive()
+ }
+
+ fn try_lock_exclusive(&self) -> bool {
+ panic!("mwahahahaha")
+ }
+
+ unsafe fn unlock_exclusive(&self) {
+ self.inner.unlock_exclusive()
+ }
+}
+
+#[test]
+fn boxed_rwlocks() {
+ let mut key = ThreadKey::get().unwrap();
+ let good_mutex: Arc<RwLock<i32, parking_lot::RawRwLock>> = Arc::new(RwLock::new(5));
+ let evil_mutex: Arc<RwLock<i32, EvilRwLock>> = Arc::new(RwLock::new(7));
+ let useless_mutex: Arc<RwLock<i32, parking_lot::RawRwLock>> = Arc::new(RwLock::new(10));
+ let c_good = Arc::clone(&good_mutex);
+ let c_evil = Arc::clone(&evil_mutex);
+ let c_useless = Arc::clone(&useless_mutex);
+
+ let r = std::thread::spawn(move || {
+ let key = ThreadKey::get().unwrap();
+ let collection = BoxedLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap();
+ let _ = collection.try_read(key);
+ })
+ .join();
+
+ assert!(r.is_err());
+ assert!(good_mutex.scoped_try_read(&mut key, |_| {}).is_ok());
+ assert!(evil_mutex.scoped_try_read(&mut key, |_| {}).is_err());
+ assert!(useless_mutex.scoped_try_read(&mut key, |_| {}).is_ok());
+}
+
+#[test]
+fn retrying_rwlocks() {
+ let mut key = ThreadKey::get().unwrap();
+ let good_mutex: Arc<RwLock<i32, parking_lot::RawRwLock>> = Arc::new(RwLock::new(5));
+ let evil_mutex: Arc<RwLock<i32, EvilRwLock>> = Arc::new(RwLock::new(7));
+ let useless_mutex: Arc<RwLock<i32, parking_lot::RawRwLock>> = Arc::new(RwLock::new(10));
+ let c_good = Arc::clone(&good_mutex);
+ let c_evil = Arc::clone(&evil_mutex);
+ let c_useless = Arc::clone(&useless_mutex);
+
+ let r = std::thread::spawn(move || {
+ let key = ThreadKey::get().unwrap();
+ let collection =
+ RetryingLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap();
+ _ = collection.try_read(key);
+ })
+ .join();
+
+ assert!(r.is_err());
+ assert!(good_mutex.scoped_try_read(&mut key, |_| {}).is_ok());
+ assert!(evil_mutex.scoped_try_read(&mut key, |_| {}).is_err());
+ assert!(useless_mutex.scoped_try_read(&mut key, |_| {}).is_ok());
+}
diff --git a/tests/evil_unlock_mutex.rs b/tests/evil_unlock_mutex.rs
new file mode 100644
index 0000000..ee12abc
--- /dev/null
+++ b/tests/evil_unlock_mutex.rs
@@ -0,0 +1,99 @@
+use std::sync::Arc;
+
+use happylock::collection::{BoxedLockCollection, RetryingLockCollection};
+use happylock::mutex::Mutex;
+use happylock::ThreadKey;
+use lock_api::{GuardNoSend, RawMutex};
+
+struct KindaEvilMutex {
+ inner: parking_lot::RawMutex,
+}
+
+struct EvilMutex {}
+
+unsafe impl RawMutex for KindaEvilMutex {
+ #[allow(clippy::declare_interior_mutable_const)]
+ const INIT: Self = Self {
+ inner: parking_lot::RawMutex::INIT,
+ };
+
+ type GuardMarker = GuardNoSend;
+
+ fn lock(&self) {
+ self.inner.lock()
+ }
+
+ fn try_lock(&self) -> bool {
+ self.inner.try_lock()
+ }
+
+ unsafe fn unlock(&self) {
+ panic!("mwahahahaha");
+ }
+}
+
+unsafe impl RawMutex for EvilMutex {
+ #[allow(clippy::declare_interior_mutable_const)]
+ const INIT: Self = Self {};
+
+ type GuardMarker = GuardNoSend;
+
+ fn lock(&self) {
+ panic!("mwahahahaha");
+ }
+
+ fn try_lock(&self) -> bool {
+ panic!("mwahahahaha")
+ }
+
+ unsafe fn unlock(&self) {
+ panic!("mwahahahaha");
+ }
+}
+
+#[test]
+fn boxed_mutexes() {
+ let mut key = ThreadKey::get().unwrap();
+ let kinda_evil_mutex: Arc<Mutex<i32, KindaEvilMutex>> = Arc::new(Mutex::new(5));
+ let evil_mutex: Arc<Mutex<i32, EvilMutex>> = Arc::new(Mutex::new(7));
+ let useless_mutex: Arc<Mutex<i32, parking_lot::RawMutex>> = Arc::new(Mutex::new(10));
+ let c_good = Arc::clone(&kinda_evil_mutex);
+ let c_evil = Arc::clone(&evil_mutex);
+ let c_useless = Arc::clone(&useless_mutex);
+
+ let r = std::thread::spawn(move || {
+ let key = ThreadKey::get().unwrap();
+ let collection = BoxedLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap();
+ _ = collection.lock(key);
+ })
+ .join();
+
+ assert!(r.is_err());
+ assert!(kinda_evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err());
+ assert!(evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err());
+ assert!(useless_mutex.scoped_try_lock(&mut key, |_| {}).is_ok());
+}
+
+#[test]
+fn retrying_mutexes() {
+ let mut key = ThreadKey::get().unwrap();
+ let kinda_evil_mutex: Arc<Mutex<i32, KindaEvilMutex>> = Arc::new(Mutex::new(5));
+ let evil_mutex: Arc<Mutex<i32, EvilMutex>> = Arc::new(Mutex::new(7));
+ let useless_mutex: Arc<Mutex<i32, parking_lot::RawMutex>> = Arc::new(Mutex::new(10));
+ let c_good = Arc::clone(&kinda_evil_mutex);
+ let c_evil = Arc::clone(&evil_mutex);
+ let c_useless = Arc::clone(&useless_mutex);
+
+ let r = std::thread::spawn(move || {
+ let key = ThreadKey::get().unwrap();
+ let collection =
+ RetryingLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap();
+ collection.lock(key);
+ })
+ .join();
+
+ assert!(r.is_err());
+ assert!(kinda_evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err());
+ assert!(evil_mutex.scoped_try_lock(&mut key, |_| {}).is_err());
+ assert!(useless_mutex.scoped_try_lock(&mut key, |_| {}).is_ok());
+}
diff --git a/tests/evil_unlock_rwlock.rs b/tests/evil_unlock_rwlock.rs
new file mode 100644
index 0000000..58402c9
--- /dev/null
+++ b/tests/evil_unlock_rwlock.rs
@@ -0,0 +1,127 @@
+use std::sync::Arc;
+
+use happylock::collection::{BoxedLockCollection, RetryingLockCollection};
+use happylock::rwlock::RwLock;
+use happylock::ThreadKey;
+use lock_api::{GuardNoSend, RawRwLock};
+
+struct KindaEvilRwLock {
+ inner: parking_lot::RawRwLock,
+}
+
+struct EvilRwLock {}
+
+unsafe impl RawRwLock for KindaEvilRwLock {
+ #[allow(clippy::declare_interior_mutable_const)]
+ const INIT: Self = Self {
+ inner: parking_lot::RawRwLock::INIT,
+ };
+
+ type GuardMarker = GuardNoSend;
+
+ fn lock_shared(&self) {
+ self.inner.lock_shared()
+ }
+
+ fn try_lock_shared(&self) -> bool {
+ self.inner.try_lock_shared()
+ }
+
+ unsafe fn unlock_shared(&self) {
+ panic!("mwahahahaha");
+ }
+
+ fn lock_exclusive(&self) {
+ self.inner.lock_exclusive()
+ }
+
+ fn try_lock_exclusive(&self) -> bool {
+ self.inner.try_lock_exclusive()
+ }
+
+ unsafe fn unlock_exclusive(&self) {
+ panic!("mwahahahaha");
+ }
+}
+
+unsafe impl RawRwLock for EvilRwLock {
+ #[allow(clippy::declare_interior_mutable_const)]
+ const INIT: Self = Self {};
+
+ type GuardMarker = GuardNoSend;
+
+ fn lock_shared(&self) {
+ panic!("mwahahahaha");
+ }
+
+ fn try_lock_shared(&self) -> bool {
+ panic!("mwahahahaha");
+ }
+
+ unsafe fn unlock_shared(&self) {
+ panic!("mwahahahaha");
+ }
+
+ fn lock_exclusive(&self) {
+ panic!("mwahahahaha");
+ }
+
+ fn try_lock_exclusive(&self) -> bool {
+ panic!("mwahahahaha")
+ }
+
+ unsafe fn unlock_exclusive(&self) {
+ panic!("mwahahahaha");
+ }
+}
+
+#[test]
+fn boxed_rwlocks() {
+ let mut key = ThreadKey::get().unwrap();
+ let kinda_evil_mutex: RwLock<i32, KindaEvilRwLock> = RwLock::new(5);
+ let evil_mutex: RwLock<i32, EvilRwLock> = RwLock::new(7);
+ let useless_mutex: RwLock<i32, parking_lot::RawRwLock> = RwLock::new(10);
+
+ let r = std::thread::scope(|s| {
+ let r = s
+ .spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ let collection =
+ BoxedLockCollection::try_new((&kinda_evil_mutex, &evil_mutex, &useless_mutex))
+ .unwrap();
+ _ = collection.read(key);
+ })
+ .join();
+
+ r
+ });
+
+ assert!(r.is_err());
+ assert!(kinda_evil_mutex.scoped_try_write(&mut key, |_| {}).is_err());
+ assert!(evil_mutex.scoped_try_write(&mut key, |_| {}).is_err());
+ assert!(useless_mutex.scoped_try_write(&mut key, |_| {}).is_ok());
+}
+
+#[test]
+fn retrying_rwlocks() {
+ let mut key = ThreadKey::get().unwrap();
+ let kinda_evil_mutex: Arc<RwLock<i32, KindaEvilRwLock>> = Arc::new(RwLock::new(5));
+ let evil_mutex: Arc<RwLock<i32, EvilRwLock>> = Arc::new(RwLock::new(7));
+ let useless_mutex: Arc<RwLock<i32, parking_lot::RawRwLock>> = Arc::new(RwLock::new(10));
+ let c_good = Arc::clone(&kinda_evil_mutex);
+ let c_evil = Arc::clone(&evil_mutex);
+ let c_useless = Arc::clone(&useless_mutex);
+
+ let r = std::thread::spawn(move || {
+ let key = ThreadKey::get().unwrap();
+ let collection =
+ RetryingLockCollection::try_new((&*c_good, &*c_evil, &*c_useless)).unwrap();
+ collection.read(key);
+ })
+ .join();
+
+ assert!(r.is_err());
+ assert!(kinda_evil_mutex.scoped_try_write(&mut key, |_| {}).is_err());
+ assert!(evil_mutex.scoped_try_write(&mut key, |_| {}).is_err());
+ assert!(useless_mutex.scoped_try_write(&mut key, |_| {}).is_ok());
+}
diff --git a/tests/forget.rs b/tests/forget.rs
new file mode 100644
index 0000000..755bdb1
--- /dev/null
+++ b/tests/forget.rs
@@ -0,0 +1,24 @@
+use happylock::{Mutex, ThreadKey};
+
+#[test]
+fn no_new_threadkey_when_forgetting_lock() {
+ let key = ThreadKey::get().unwrap();
+ let mutex = Mutex::new("foo".to_string());
+
+ let guard = mutex.lock(key);
+ std::mem::forget(guard);
+
+ assert!(ThreadKey::get().is_none());
+}
+
+#[test]
+fn no_new_threadkey_in_scoped_lock() {
+ let mut key = ThreadKey::get().unwrap();
+ let mutex = Mutex::new("foo".to_string());
+
+ mutex.scoped_lock(&mut key, |_| {
+ assert!(ThreadKey::get().is_none());
+ });
+
+ mutex.lock(key);
+}
diff --git a/tests/retry.rs b/tests/retry.rs
new file mode 100644
index 0000000..64e0918
--- /dev/null
+++ b/tests/retry.rs
@@ -0,0 +1,53 @@
+use std::time::Duration;
+
+use happylock::{collection::RetryingLockCollection, Mutex, ThreadKey};
+
+static MUTEX_1: Mutex<i32> = Mutex::new(1);
+static MUTEX_2: Mutex<i32> = Mutex::new(2);
+static MUTEX_3: Mutex<i32> = Mutex::new(3);
+
+fn thread_1() {
+ let key = ThreadKey::get().unwrap();
+ let mut guard = MUTEX_2.lock(key);
+ std::thread::sleep(Duration::from_millis(100));
+ *guard = 5;
+}
+
+fn thread_2() {
+ let mut key = ThreadKey::get().unwrap();
+ std::thread::sleep(Duration::from_millis(50));
+ let collection = RetryingLockCollection::try_new([&MUTEX_1, &MUTEX_2, &MUTEX_3]).unwrap();
+ collection.scoped_lock(&mut key, |guard| {
+ assert_eq!(*guard[0], 4);
+ assert_eq!(*guard[1], 5);
+ assert_eq!(*guard[2], 3);
+ });
+}
+
+fn thread_3() {
+ let key = ThreadKey::get().unwrap();
+ std::thread::sleep(Duration::from_millis(75));
+ let mut guard = MUTEX_1.lock(key);
+ std::thread::sleep(Duration::from_millis(100));
+ *guard = 4;
+}
+
+fn thread_4() {
+ let mut key = ThreadKey::get().unwrap();
+ std::thread::sleep(Duration::from_millis(25));
+ let collection = RetryingLockCollection::try_new([&MUTEX_1, &MUTEX_2]).unwrap();
+ assert!(collection.scoped_try_lock(&mut key, |_| {}).is_err());
+}
+
+#[test]
+fn retries() {
+ let t1 = std::thread::spawn(thread_1);
+ let t2 = std::thread::spawn(thread_2);
+ let t3 = std::thread::spawn(thread_3);
+ let t4 = std::thread::spawn(thread_4);
+
+ t1.join().unwrap();
+ t2.join().unwrap();
+ t3.join().unwrap();
+ t4.join().unwrap();
+}
diff --git a/tests/retry_rw.rs b/tests/retry_rw.rs
new file mode 100644
index 0000000..976ab14
--- /dev/null
+++ b/tests/retry_rw.rs
@@ -0,0 +1,34 @@
+use std::time::Duration;
+
+use happylock::{collection::RetryingLockCollection, RwLock, ThreadKey};
+
+static RWLOCK_1: RwLock<i32> = RwLock::new(1);
+static RWLOCK_2: RwLock<i32> = RwLock::new(2);
+static RWLOCK_3: RwLock<i32> = RwLock::new(3);
+
+fn thread_1() {
+ let key = ThreadKey::get().unwrap();
+ let mut guard = RWLOCK_2.write(key);
+ std::thread::sleep(Duration::from_millis(75));
+ assert_eq!(*guard, 2);
+ *guard = 5;
+}
+
+fn thread_2() {
+ let key = ThreadKey::get().unwrap();
+ let collection = RetryingLockCollection::try_new([&RWLOCK_1, &RWLOCK_2, &RWLOCK_3]).unwrap();
+ std::thread::sleep(Duration::from_millis(25));
+ let guard = collection.read(key);
+ assert_eq!(*guard[0], 1);
+ assert_eq!(*guard[1], 5);
+ assert_eq!(*guard[2], 3);
+}
+
+#[test]
+fn retries() {
+ let t1 = std::thread::spawn(thread_1);
+ let t2 = std::thread::spawn(thread_2);
+
+ t1.join().unwrap();
+ t2.join().unwrap();
+}