summaryrefslogtreecommitdiff
path: root/src/collection
diff options
context:
space:
mode:
Diffstat (limited to 'src/collection')
-rw-r--r--src/collection/boxed.rs186
-rw-r--r--src/collection/owned.rs283
-rw-r--r--src/collection/ref.rs261
-rw-r--r--src/collection/retry.rs403
-rw-r--r--src/collection/utils.rs113
5 files changed, 900 insertions, 346 deletions
diff --git a/src/collection/boxed.rs b/src/collection/boxed.rs
index 364ec97..1891119 100644
--- a/src/collection/boxed.rs
+++ b/src/collection/boxed.rs
@@ -4,7 +4,9 @@ use std::fmt::Debug;
use crate::lockable::{Lockable, LockableIntoInner, OwnedLockable, RawLock, Sharable};
use crate::{Keyable, ThreadKey};
-use super::utils::ordered_contains_duplicates;
+use super::utils::{
+ ordered_contains_duplicates, scoped_read, scoped_try_read, scoped_try_write, scoped_write,
+};
use super::{utils, BoxedLockCollection, LockGuard};
unsafe impl<L: Lockable> RawLock for BoxedLockCollection<L> {
@@ -16,18 +18,18 @@ unsafe impl<L: Lockable> RawLock for BoxedLockCollection<L> {
}
}
- unsafe fn raw_lock(&self) {
- utils::ordered_lock(self.locks())
+ unsafe fn raw_write(&self) {
+ utils::ordered_write(self.locks())
}
- unsafe fn raw_try_lock(&self) -> bool {
+ unsafe fn raw_try_write(&self) -> bool {
println!("{}", self.locks().len());
- utils::ordered_try_lock(self.locks())
+ utils::ordered_try_write(self.locks())
}
- unsafe fn raw_unlock(&self) {
+ unsafe fn raw_unlock_write(&self) {
for lock in self.locks() {
- lock.raw_unlock();
+ lock.raw_unlock_write();
}
}
@@ -58,7 +60,7 @@ unsafe impl<L: Lockable> Lockable for BoxedLockCollection<L> {
Self: 'a;
fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) {
- ptrs.extend(self.locks())
+ ptrs.push(self);
}
unsafe fn guard(&self) -> Self::Guard<'_> {
@@ -156,7 +158,7 @@ impl<L> Drop for BoxedLockCollection<L> {
}
}
-impl<T, L: AsRef<T>> AsRef<T> for BoxedLockCollection<L> {
+impl<T: ?Sized, L: AsRef<T>> AsRef<T> for BoxedLockCollection<L> {
fn as_ref(&self) -> &T {
self.child().as_ref()
}
@@ -364,44 +366,16 @@ impl<L: Lockable> BoxedLockCollection<L> {
}
}
- pub fn scoped_lock<R>(&self, key: impl Keyable, f: impl Fn(L::DataMut<'_>) -> R) -> R {
- unsafe {
- // safety: we have the thread key
- self.raw_lock();
-
- // safety: the data was just locked
- let r = f(self.data_mut());
-
- // safety: the collection is still locked
- self.raw_unlock();
-
- drop(key); // ensure the key stays alive long enough
-
- r
- }
+ pub fn scoped_lock<'a, R>(&'a self, key: impl Keyable, f: impl Fn(L::DataMut<'a>) -> R) -> R {
+ scoped_write(self, key, f)
}
- pub fn scoped_try_lock<Key: Keyable, R>(
- &self,
+ pub fn scoped_try_lock<'a, Key: Keyable, R>(
+ &'a self,
key: Key,
- f: impl Fn(L::DataMut<'_>) -> R,
+ f: impl Fn(L::DataMut<'a>) -> R,
) -> Result<R, Key> {
- unsafe {
- // safety: we have the thread key
- if !self.raw_try_lock() {
- return Err(key);
- }
-
- // safety: we just locked the collection
- let r = f(self.data_mut());
-
- // safety: the collection is still locked
- self.raw_unlock();
-
- drop(key); // ensures the key stays valid long enough
-
- Ok(r)
- }
+ scoped_try_write(self, key, f)
}
/// Locks the collection
@@ -427,7 +401,7 @@ impl<L: Lockable> BoxedLockCollection<L> {
pub fn lock(&self, key: ThreadKey) -> LockGuard<L::Guard<'_>> {
unsafe {
// safety: we have the thread key
- self.raw_lock();
+ self.raw_write();
LockGuard {
// safety: we've already acquired the lock
@@ -468,7 +442,7 @@ impl<L: Lockable> BoxedLockCollection<L> {
/// ```
pub fn try_lock(&self, key: ThreadKey) -> Result<LockGuard<L::Guard<'_>>, ThreadKey> {
let guard = unsafe {
- if !self.raw_try_lock() {
+ if !self.raw_try_write() {
return Err(key);
}
@@ -503,44 +477,16 @@ impl<L: Lockable> BoxedLockCollection<L> {
}
impl<L: Sharable> BoxedLockCollection<L> {
- pub fn scoped_read<R>(&self, key: impl Keyable, f: impl Fn(L::DataRef<'_>) -> R) -> R {
- unsafe {
- // safety: we have the thread key
- self.raw_read();
-
- // safety: the data was just locked
- let r = f(self.data_ref());
-
- // safety: the collection is still locked
- self.raw_unlock_read();
-
- drop(key); // ensure the key stays alive long enough
-
- r
- }
+ pub fn scoped_read<'a, R>(&'a self, key: impl Keyable, f: impl Fn(L::DataRef<'a>) -> R) -> R {
+ scoped_read(self, key, f)
}
- pub fn scoped_try_read<Key: Keyable, R>(
- &self,
+ pub fn scoped_try_read<'a, Key: Keyable, R>(
+ &'a self,
key: Key,
- f: impl Fn(L::DataRef<'_>) -> R,
+ f: impl Fn(L::DataRef<'a>) -> R,
) -> Result<R, Key> {
- unsafe {
- // safety: we have the thread key
- if !self.raw_try_read() {
- return Err(key);
- }
-
- // safety: we just locked the collection
- let r = f(self.data_ref());
-
- // safety: the collection is still locked
- self.raw_unlock_read();
-
- drop(key); // ensures the key stays valid long enough
-
- Ok(r)
- }
+ scoped_try_read(self, key, f)
}
/// Locks the collection, so that other threads can still read from it
@@ -765,6 +711,56 @@ mod tests {
}
#[test]
+ fn scoped_read_sees_changes() {
+ let mut key = ThreadKey::get().unwrap();
+ let mutexes = [RwLock::new(24), RwLock::new(42)];
+ let collection = BoxedLockCollection::new(mutexes);
+ collection.scoped_lock(&mut key, |guard| *guard[0] = 128);
+
+ let sum = collection.scoped_read(&mut key, |guard| {
+ assert_eq!(*guard[0], 128);
+ assert_eq!(*guard[1], 42);
+ *guard[0] + *guard[1]
+ });
+
+ assert_eq!(sum, 128 + 42);
+ }
+
+ #[test]
+ fn scoped_try_lock_can_fail() {
+ let key = ThreadKey::get().unwrap();
+ let collection = BoxedLockCollection::new([Mutex::new(1), Mutex::new(2)]);
+ let guard = collection.lock(key);
+
+ std::thread::scope(|s| {
+ s.spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ let r = collection.scoped_try_lock(key, |_| {});
+ assert!(r.is_err());
+ });
+ });
+
+ drop(guard);
+ }
+
+ #[test]
+ fn scoped_try_read_can_fail() {
+ let key = ThreadKey::get().unwrap();
+ let collection = BoxedLockCollection::new([RwLock::new(1), RwLock::new(2)]);
+ let guard = collection.lock(key);
+
+ std::thread::scope(|s| {
+ s.spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ let r = collection.scoped_try_read(key, |_| {});
+ assert!(r.is_err());
+ });
+ });
+
+ drop(guard);
+ }
+
+ #[test]
fn try_lock_works() {
let key = ThreadKey::get().unwrap();
let collection = BoxedLockCollection::new([Mutex::new(1), Mutex::new(2)]);
@@ -884,15 +880,41 @@ mod tests {
#[test]
fn works_in_collection() {
let key = ThreadKey::get().unwrap();
- let mutex1 = Mutex::new(0);
- let mutex2 = Mutex::new(1);
+ let mutex1 = RwLock::new(0);
+ let mutex2 = RwLock::new(1);
let collection =
BoxedLockCollection::try_new(BoxedLockCollection::try_new([&mutex1, &mutex2]).unwrap())
.unwrap();
- let guard = collection.lock(key);
+ let mut guard = collection.lock(key);
+ assert!(mutex1.is_locked());
+ assert!(mutex2.is_locked());
+ assert_eq!(*guard[0], 0);
+ assert_eq!(*guard[1], 1);
+ *guard[0] = 2;
+ let key = BoxedLockCollection::<BoxedLockCollection<[&RwLock<_>; 2]>>::unlock(guard);
+
+ let guard = collection.read(key);
assert!(mutex1.is_locked());
assert!(mutex2.is_locked());
+ assert_eq!(*guard[0], 2);
+ assert_eq!(*guard[1], 1);
drop(guard);
}
+
+ #[test]
+ fn as_ref_works() {
+ let mutexes = [Mutex::new(0), Mutex::new(1)];
+ let collection = BoxedLockCollection::new_ref(&mutexes);
+
+ assert!(std::ptr::addr_eq(&mutexes, collection.as_ref()))
+ }
+
+ #[test]
+ fn child() {
+ let mutexes = [Mutex::new(0), Mutex::new(1)];
+ let collection = BoxedLockCollection::new_ref(&mutexes);
+
+ assert!(std::ptr::addr_eq(&mutexes, *collection.child()))
+ }
}
diff --git a/src/collection/owned.rs b/src/collection/owned.rs
index b9cf313..68170d1 100644
--- a/src/collection/owned.rs
+++ b/src/collection/owned.rs
@@ -3,6 +3,7 @@ use crate::lockable::{
};
use crate::{Keyable, ThreadKey};
+use super::utils::{scoped_read, scoped_try_read, scoped_try_write, scoped_write};
use super::{utils, LockGuard, OwnedLockCollection};
unsafe impl<L: Lockable> RawLock for OwnedLockCollection<L> {
@@ -15,19 +16,19 @@ unsafe impl<L: Lockable> RawLock for OwnedLockCollection<L> {
}
}
- unsafe fn raw_lock(&self) {
- utils::ordered_lock(&utils::get_locks_unsorted(&self.data))
+ unsafe fn raw_write(&self) {
+ utils::ordered_write(&utils::get_locks_unsorted(&self.data))
}
- unsafe fn raw_try_lock(&self) -> bool {
+ unsafe fn raw_try_write(&self) -> bool {
let locks = utils::get_locks_unsorted(&self.data);
- utils::ordered_try_lock(&locks)
+ utils::ordered_try_write(&locks)
}
- unsafe fn raw_unlock(&self) {
+ unsafe fn raw_unlock_write(&self) {
let locks = utils::get_locks_unsorted(&self.data);
for lock in locks {
- lock.raw_unlock();
+ lock.raw_unlock_write();
}
}
@@ -62,7 +63,7 @@ unsafe impl<L: Lockable> Lockable for OwnedLockCollection<L> {
#[mutants::skip] // It's hard to test lkocks in an OwnedLockCollection, because they're owned
#[cfg(not(tarpaulin_include))]
fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) {
- self.data.get_ptrs(ptrs)
+ ptrs.push(self)
}
unsafe fn guard(&self) -> Self::Guard<'_> {
@@ -146,7 +147,7 @@ impl<E: OwnedLockable + Extend<L>, L: OwnedLockable> Extend<L> for OwnedLockColl
// invariant that there is only one way to lock the collection. AsMut is fine,
// because the collection can't be locked as long as the reference is valid.
-impl<T, L: AsMut<T>> AsMut<T> for OwnedLockCollection<L> {
+impl<T: ?Sized, L: AsMut<T>> AsMut<T> for OwnedLockCollection<L> {
fn as_mut(&mut self) -> &mut T {
self.data.as_mut()
}
@@ -185,44 +186,16 @@ impl<L: OwnedLockable> OwnedLockCollection<L> {
Self { data }
}
- pub fn scoped_lock<R>(&self, key: impl Keyable, f: impl Fn(L::DataMut<'_>) -> R) -> R {
- unsafe {
- // safety: we have the thread key
- self.raw_lock();
-
- // safety: the data was just locked
- let r = f(self.data_mut());
-
- // safety: the collection is still locked
- self.raw_unlock();
-
- drop(key); // ensure the key stays alive long enough
-
- r
- }
+ pub fn scoped_lock<'a, R>(&'a self, key: impl Keyable, f: impl Fn(L::DataMut<'a>) -> R) -> R {
+ scoped_write(self, key, f)
}
- pub fn scoped_try_lock<Key: Keyable, R>(
- &self,
+ pub fn scoped_try_lock<'a, Key: Keyable, R>(
+ &'a self,
key: Key,
- f: impl Fn(L::DataMut<'_>) -> R,
+ f: impl Fn(L::DataMut<'a>) -> R,
) -> Result<R, Key> {
- unsafe {
- // safety: we have the thread key
- if !self.raw_try_lock() {
- return Err(key);
- }
-
- // safety: we just locked the collection
- let r = f(self.data_mut());
-
- // safety: the collection is still locked
- self.raw_unlock();
-
- drop(key); // ensures the key stays valid long enough
-
- Ok(r)
- }
+ scoped_try_write(self, key, f)
}
/// Locks the collection
@@ -249,7 +222,7 @@ impl<L: OwnedLockable> OwnedLockCollection<L> {
let guard = unsafe {
// safety: we have the thread key, and these locks happen in a
// predetermined order
- self.raw_lock();
+ self.raw_write();
// safety: we've locked all of this already
self.data.guard()
@@ -290,7 +263,7 @@ impl<L: OwnedLockable> OwnedLockCollection<L> {
/// ```
pub fn try_lock(&self, key: ThreadKey) -> Result<LockGuard<L::Guard<'_>>, ThreadKey> {
let guard = unsafe {
- if !self.raw_try_lock() {
+ if !self.raw_try_write() {
return Err(key);
}
@@ -327,44 +300,16 @@ impl<L: OwnedLockable> OwnedLockCollection<L> {
}
impl<L: Sharable> OwnedLockCollection<L> {
- pub fn scoped_read<R>(&self, key: impl Keyable, f: impl Fn(L::DataRef<'_>) -> R) -> R {
- unsafe {
- // safety: we have the thread key
- self.raw_read();
-
- // safety: the data was just locked
- let r = f(self.data_ref());
-
- // safety: the collection is still locked
- self.raw_unlock_read();
-
- drop(key); // ensure the key stays alive long enough
-
- r
- }
+ pub fn scoped_read<'a, R>(&'a self, key: impl Keyable, f: impl Fn(L::DataRef<'a>) -> R) -> R {
+ scoped_read(self, key, f)
}
- pub fn scoped_try_read<Key: Keyable, R>(
- &self,
+ pub fn scoped_try_read<'a, Key: Keyable, R>(
+ &'a self,
key: Key,
- f: impl Fn(L::DataRef<'_>) -> R,
+ f: impl Fn(L::DataRef<'a>) -> R,
) -> Result<R, Key> {
- unsafe {
- // safety: we have the thread key
- if !self.raw_try_read() {
- return Err(key);
- }
-
- // safety: we just locked the collection
- let r = f(self.data_ref());
-
- // safety: the collection is still locked
- self.raw_unlock_read();
-
- drop(key); // ensures the key stays valid long enough
-
- Ok(r)
- }
+ scoped_try_read(self, key, f)
}
/// Locks the collection, so that other threads can still read from it
@@ -554,7 +499,7 @@ impl<L: LockableIntoInner> OwnedLockCollection<L> {
#[cfg(test)]
mod tests {
use super::*;
- use crate::{Mutex, ThreadKey};
+ use crate::{Mutex, RwLock, ThreadKey};
#[test]
fn get_mut_applies_changes() {
@@ -604,6 +549,63 @@ mod tests {
}
#[test]
+ fn scoped_read_works() {
+ let mut key = ThreadKey::get().unwrap();
+ let collection = OwnedLockCollection::new([RwLock::new(24), RwLock::new(42)]);
+ let sum = collection.scoped_read(&mut key, |guard| guard[0] + guard[1]);
+ assert_eq!(sum, 24 + 42);
+ }
+
+ #[test]
+ fn scoped_lock_works() {
+ let mut key = ThreadKey::get().unwrap();
+ let collection = OwnedLockCollection::new([RwLock::new(24), RwLock::new(42)]);
+ collection.scoped_lock(&mut key, |guard| *guard[0] += *guard[1]);
+
+ let sum = collection.scoped_lock(&mut key, |guard| {
+ assert_eq!(*guard[0], 24 + 42);
+ assert_eq!(*guard[1], 42);
+ *guard[0] + *guard[1]
+ });
+
+ assert_eq!(sum, 24 + 42 + 42);
+ }
+
+ #[test]
+ fn scoped_try_lock_can_fail() {
+ let key = ThreadKey::get().unwrap();
+ let collection = OwnedLockCollection::new([Mutex::new(1), Mutex::new(2)]);
+ let guard = collection.lock(key);
+
+ std::thread::scope(|s| {
+ s.spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ let r = collection.scoped_try_lock(key, |_| {});
+ assert!(r.is_err());
+ });
+ });
+
+ drop(guard);
+ }
+
+ #[test]
+ fn scoped_try_read_can_fail() {
+ let key = ThreadKey::get().unwrap();
+ let collection = OwnedLockCollection::new([RwLock::new(1), RwLock::new(2)]);
+ let guard = collection.lock(key);
+
+ std::thread::scope(|s| {
+ s.spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ let r = collection.scoped_try_read(key, |_| {});
+ assert!(r.is_err());
+ });
+ });
+
+ drop(guard);
+ }
+
+ #[test]
fn try_lock_works_on_unlocked() {
let key = ThreadKey::get().unwrap();
let collection = OwnedLockCollection::new((Mutex::new(0), Mutex::new(1)));
@@ -630,6 +632,74 @@ mod tests {
}
#[test]
+ fn try_read_succeeds_for_unlocked_collection() {
+ let key = ThreadKey::get().unwrap();
+ let mutexes = [RwLock::new(24), RwLock::new(42)];
+ let collection = OwnedLockCollection::new(mutexes);
+ let guard = collection.try_read(key).unwrap();
+ assert_eq!(*guard[0], 24);
+ assert_eq!(*guard[1], 42);
+ }
+
+ #[test]
+ fn try_read_fails_on_locked() {
+ let key = ThreadKey::get().unwrap();
+ let collection = OwnedLockCollection::new((RwLock::new(0), RwLock::new(1)));
+
+ std::thread::scope(|s| {
+ s.spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ #[allow(unused)]
+ let guard = collection.lock(key);
+ std::mem::forget(guard);
+ });
+ });
+
+ assert!(collection.try_read(key).is_err());
+ }
+
+ #[test]
+ fn can_read_twice_on_different_threads() {
+ let key = ThreadKey::get().unwrap();
+ let mutexes = [RwLock::new(24), RwLock::new(42)];
+ let collection = OwnedLockCollection::new(mutexes);
+
+ std::thread::scope(|s| {
+ s.spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ let guard = collection.read(key);
+ assert_eq!(*guard[0], 24);
+ assert_eq!(*guard[1], 42);
+ std::mem::forget(guard);
+ });
+ });
+
+ let guard = collection.try_read(key).unwrap();
+ assert_eq!(*guard[0], 24);
+ assert_eq!(*guard[1], 42);
+ }
+
+ #[test]
+ fn unlock_collection_works() {
+ let key = ThreadKey::get().unwrap();
+ let collection = OwnedLockCollection::new((Mutex::new("foo"), Mutex::new("bar")));
+ let guard = collection.lock(key);
+
+ let key = OwnedLockCollection::<(Mutex<_>, Mutex<_>)>::unlock(guard);
+ assert!(collection.try_lock(key).is_ok())
+ }
+
+ #[test]
+ fn read_unlock_collection_works() {
+ let key = ThreadKey::get().unwrap();
+ let collection = OwnedLockCollection::new((RwLock::new("foo"), RwLock::new("bar")));
+ let guard = collection.read(key);
+
+ let key = OwnedLockCollection::<(&RwLock<_>, &RwLock<_>)>::unlock_read(guard);
+ assert!(collection.try_lock(key).is_ok())
+ }
+
+ #[test]
fn default_works() {
type MyCollection = OwnedLockCollection<(Mutex<i32>, Mutex<Option<i32>>, Mutex<String>)>;
let collection = MyCollection::default();
@@ -649,4 +719,59 @@ mod tests {
assert_eq!(collection.data.len(), 3);
}
+
+ #[test]
+ fn works_in_collection() {
+ let key = ThreadKey::get().unwrap();
+ let collection =
+ OwnedLockCollection::new(OwnedLockCollection::new([RwLock::new(0), RwLock::new(1)]));
+
+ let mut guard = collection.lock(key);
+ assert_eq!(*guard[0], 0);
+ assert_eq!(*guard[1], 1);
+ *guard[1] = 2;
+
+ let key = OwnedLockCollection::<OwnedLockCollection<[RwLock<_>; 2]>>::unlock(guard);
+ let guard = collection.read(key);
+ assert_eq!(*guard[0], 0);
+ assert_eq!(*guard[1], 2);
+ }
+
+ #[test]
+ fn as_mut_works() {
+ let mut mutexes = [Mutex::new(0), Mutex::new(1)];
+ let mut collection = OwnedLockCollection::new(&mut mutexes);
+
+ collection.as_mut()[0] = Mutex::new(42);
+
+ assert_eq!(*collection.as_mut()[0].get_mut(), 42);
+ }
+
+ #[test]
+ fn child_mut_works() {
+ let mut mutexes = [Mutex::new(0), Mutex::new(1)];
+ let mut collection = OwnedLockCollection::new(&mut mutexes);
+
+ collection.child_mut()[0] = Mutex::new(42);
+
+ assert_eq!(*collection.child_mut()[0].get_mut(), 42);
+ }
+
+ #[test]
+ fn into_child_works() {
+ let mutexes = [Mutex::new(0), Mutex::new(1)];
+ let mut collection = OwnedLockCollection::new(mutexes);
+
+ collection.child_mut()[0] = Mutex::new(42);
+
+ assert_eq!(
+ *collection
+ .into_child()
+ .as_mut()
+ .get_mut(0)
+ .unwrap()
+ .get_mut(),
+ 42
+ );
+ }
}
diff --git a/src/collection/ref.rs b/src/collection/ref.rs
index b68b72f..5f96533 100644
--- a/src/collection/ref.rs
+++ b/src/collection/ref.rs
@@ -3,7 +3,10 @@ use std::fmt::Debug;
use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable};
use crate::{Keyable, ThreadKey};
-use super::utils::{get_locks, ordered_contains_duplicates};
+use super::utils::{
+ get_locks, ordered_contains_duplicates, scoped_read, scoped_try_read, scoped_try_write,
+ scoped_write,
+};
use super::{utils, LockGuard, RefLockCollection};
impl<'a, L> IntoIterator for &'a RefLockCollection<'a, L>
@@ -27,17 +30,17 @@ unsafe impl<L: Lockable> RawLock for RefLockCollection<'_, L> {
}
}
- unsafe fn raw_lock(&self) {
- utils::ordered_lock(&self.locks)
+ unsafe fn raw_write(&self) {
+ utils::ordered_write(&self.locks)
}
- unsafe fn raw_try_lock(&self) -> bool {
- utils::ordered_try_lock(&self.locks)
+ unsafe fn raw_try_write(&self) -> bool {
+ utils::ordered_try_write(&self.locks)
}
- unsafe fn raw_unlock(&self) {
+ unsafe fn raw_unlock_write(&self) {
for lock in &self.locks {
- lock.raw_unlock();
+ lock.raw_unlock_write();
}
}
@@ -68,7 +71,7 @@ unsafe impl<L: Lockable> Lockable for RefLockCollection<'_, L> {
Self: 'a;
fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) {
- ptrs.extend_from_slice(&self.locks);
+ ptrs.push(self)
}
unsafe fn guard(&self) -> Self::Guard<'_> {
@@ -100,7 +103,7 @@ unsafe impl<L: Sharable> Sharable for RefLockCollection<'_, L> {
}
}
-impl<T, L: AsRef<T>> AsRef<T> for RefLockCollection<'_, L> {
+impl<T: ?Sized, L: AsRef<T>> AsRef<T> for RefLockCollection<'_, L> {
fn as_ref(&self) -> &T {
self.data.as_ref()
}
@@ -234,44 +237,16 @@ impl<'a, L: Lockable> RefLockCollection<'a, L> {
Some(Self { data, locks })
}
- pub fn scoped_lock<R>(&self, key: impl Keyable, f: impl Fn(L::DataMut<'_>) -> R) -> R {
- unsafe {
- // safety: we have the thread key
- self.raw_lock();
-
- // safety: the data was just locked
- let r = f(self.data_mut());
-
- // safety: the collection is still locked
- self.raw_unlock();
-
- drop(key); // ensure the key stays alive long enough
-
- r
- }
+ pub fn scoped_lock<'s, R>(&'s self, key: impl Keyable, f: impl Fn(L::DataMut<'s>) -> R) -> R {
+ scoped_write(self, key, f)
}
- pub fn scoped_try_lock<Key: Keyable, R>(
- &self,
+ pub fn scoped_try_lock<'s, Key: Keyable, R>(
+ &'s self,
key: Key,
- f: impl Fn(L::DataMut<'_>) -> R,
+ f: impl Fn(L::DataMut<'s>) -> R,
) -> Result<R, Key> {
- unsafe {
- // safety: we have the thread key
- if !self.raw_try_lock() {
- return Err(key);
- }
-
- // safety: we just locked the collection
- let r = f(self.data_mut());
-
- // safety: the collection is still locked
- self.raw_unlock();
-
- drop(key); // ensures the key stays valid long enough
-
- Ok(r)
- }
+ scoped_try_write(self, key, f)
}
/// Locks the collection
@@ -298,7 +273,7 @@ impl<'a, L: Lockable> RefLockCollection<'a, L> {
pub fn lock(&self, key: ThreadKey) -> LockGuard<L::Guard<'_>> {
let guard = unsafe {
// safety: we have the thread key
- self.raw_lock();
+ self.raw_write();
// safety: we've locked all of this already
self.data.guard()
@@ -339,7 +314,7 @@ impl<'a, L: Lockable> RefLockCollection<'a, L> {
/// ```
pub fn try_lock(&self, key: ThreadKey) -> Result<LockGuard<L::Guard<'_>>, ThreadKey> {
let guard = unsafe {
- if !self.raw_try_lock() {
+ if !self.raw_try_write() {
return Err(key);
}
@@ -376,44 +351,16 @@ impl<'a, L: Lockable> RefLockCollection<'a, L> {
}
impl<L: Sharable> RefLockCollection<'_, L> {
- pub fn scoped_read<R>(&self, key: impl Keyable, f: impl Fn(L::DataRef<'_>) -> R) -> R {
- unsafe {
- // safety: we have the thread key
- self.raw_read();
-
- // safety: the data was just locked
- let r = f(self.data_ref());
-
- // safety: the collection is still locked
- self.raw_unlock_read();
-
- drop(key); // ensure the key stays alive long enough
-
- r
- }
+ pub fn scoped_read<'a, R>(&'a self, key: impl Keyable, f: impl Fn(L::DataRef<'a>) -> R) -> R {
+ scoped_read(self, key, f)
}
- pub fn scoped_try_read<Key: Keyable, R>(
- &self,
+ pub fn scoped_try_read<'a, Key: Keyable, R>(
+ &'a self,
key: Key,
- f: impl Fn(L::DataRef<'_>) -> R,
+ f: impl Fn(L::DataRef<'a>) -> R,
) -> Result<R, Key> {
- unsafe {
- // safety: we have the thread key
- if !self.raw_try_read() {
- return Err(key);
- }
-
- // safety: we just locked the collection
- let r = f(self.data_ref());
-
- // safety: the collection is still locked
- self.raw_unlock_read();
-
- drop(key); // ensures the key stays valid long enough
-
- Ok(r)
- }
+ scoped_try_read(self, key, f)
}
/// Locks the collection, so that other threads can still read from it
@@ -565,6 +512,88 @@ mod tests {
}
#[test]
+ fn from() {
+ let key = ThreadKey::get().unwrap();
+ let mutexes = [Mutex::new("foo"), Mutex::new("bar"), Mutex::new("baz")];
+ let collection = RefLockCollection::from(&mutexes);
+ let guard = collection.lock(key);
+ assert_eq!(*guard[0], "foo");
+ assert_eq!(*guard[1], "bar");
+ assert_eq!(*guard[2], "baz");
+ }
+
+ #[test]
+ fn scoped_lock_changes_collection() {
+ let mut key = ThreadKey::get().unwrap();
+ let mutexes = [Mutex::new(24), Mutex::new(42)];
+ let collection = RefLockCollection::new(&mutexes);
+ let sum = collection.scoped_lock(&mut key, |guard| {
+ *guard[0] = 128;
+ *guard[0] + *guard[1]
+ });
+
+ assert_eq!(sum, 128 + 42);
+
+ let guard = collection.lock(key);
+ assert_eq!(*guard[0], 128);
+ assert_eq!(*guard[1], 42);
+ }
+
+ #[test]
+ fn scoped_read_sees_changes() {
+ let mut key = ThreadKey::get().unwrap();
+ let mutexes = [RwLock::new(24), RwLock::new(42)];
+ let collection = RefLockCollection::new(&mutexes);
+ collection.scoped_lock(&mut key, |guard| {
+ *guard[0] = 128;
+ });
+
+ let sum = collection.scoped_read(&mut key, |guard| {
+ assert_eq!(*guard[0], 128);
+ assert_eq!(*guard[1], 42);
+ *guard[0] + *guard[1]
+ });
+
+ assert_eq!(sum, 128 + 42);
+ }
+
+ #[test]
+ fn scoped_try_lock_can_fail() {
+ let key = ThreadKey::get().unwrap();
+ let locks = [Mutex::new(1), Mutex::new(2)];
+ let collection = RefLockCollection::new(&locks);
+ let guard = collection.lock(key);
+
+ std::thread::scope(|s| {
+ s.spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ let r = collection.scoped_try_lock(key, |_| {});
+ assert!(r.is_err());
+ });
+ });
+
+ drop(guard);
+ }
+
+ #[test]
+ fn scoped_try_read_can_fail() {
+ let key = ThreadKey::get().unwrap();
+ let locks = [RwLock::new(1), RwLock::new(2)];
+ let collection = RefLockCollection::new(&locks);
+ let guard = collection.lock(key);
+
+ std::thread::scope(|s| {
+ s.spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ let r = collection.scoped_try_read(key, |_| {});
+ assert!(r.is_err());
+ });
+ });
+
+ drop(guard);
+ }
+
+ #[test]
fn try_lock_succeeds_for_unlocked_collection() {
let key = ThreadKey::get().unwrap();
let mutexes = [Mutex::new(24), Mutex::new(42)];
@@ -644,17 +673,85 @@ mod tests {
}
#[test]
+ fn into_ref_iterator() {
+ let mut key = ThreadKey::get().unwrap();
+ let mutexes = [Mutex::new(0), Mutex::new(1), Mutex::new(2)];
+ let collection = RefLockCollection::new(&mutexes);
+ for (i, mutex) in (&collection).into_iter().enumerate() {
+ mutex.scoped_lock(&mut key, |val| assert_eq!(*val, i))
+ }
+ }
+
+ #[test]
+ fn ref_iterator() {
+ let mut key = ThreadKey::get().unwrap();
+ let mutexes = [Mutex::new(0), Mutex::new(1), Mutex::new(2)];
+ let collection = RefLockCollection::new(&mutexes);
+ for (i, mutex) in collection.iter().enumerate() {
+ mutex.scoped_lock(&mut key, |val| assert_eq!(*val, i))
+ }
+ }
+
+ #[test]
fn works_in_collection() {
let key = ThreadKey::get().unwrap();
- let mutex1 = Mutex::new(0);
- let mutex2 = Mutex::new(1);
+ let mutex1 = RwLock::new(0);
+ let mutex2 = RwLock::new(1);
let collection0 = [&mutex1, &mutex2];
let collection1 = RefLockCollection::try_new(&collection0).unwrap();
let collection = RefLockCollection::try_new(&collection1).unwrap();
- let guard = collection.lock(key);
+ let mut guard = collection.lock(key);
assert!(mutex1.is_locked());
assert!(mutex2.is_locked());
+ assert_eq!(*guard[0], 0);
+ assert_eq!(*guard[1], 1);
+ *guard[1] = 2;
drop(guard);
+
+ let key = ThreadKey::get().unwrap();
+ let guard = collection.read(key);
+ assert!(mutex1.is_locked());
+ assert!(mutex2.is_locked());
+ assert_eq!(*guard[0], 0);
+ assert_eq!(*guard[1], 2);
+ }
+
+ #[test]
+ fn unlock_collection_works() {
+ let key = ThreadKey::get().unwrap();
+ let mutexes = (Mutex::new("foo"), Mutex::new("bar"));
+ let collection = RefLockCollection::new(&mutexes);
+ let guard = collection.lock(key);
+
+ let key = RefLockCollection::<(Mutex<_>, Mutex<_>)>::unlock(guard);
+ assert!(collection.try_lock(key).is_ok())
+ }
+
+ #[test]
+ fn read_unlock_collection_works() {
+ let key = ThreadKey::get().unwrap();
+ let locks = (RwLock::new("foo"), RwLock::new("bar"));
+ let collection = RefLockCollection::new(&locks);
+ let guard = collection.read(key);
+
+ let key = RefLockCollection::<(&RwLock<_>, &RwLock<_>)>::unlock_read(guard);
+ assert!(collection.try_lock(key).is_ok())
+ }
+
+ #[test]
+ fn as_ref_works() {
+ let mutexes = [Mutex::new(0), Mutex::new(1)];
+ let collection = RefLockCollection::new(&mutexes);
+
+ assert!(std::ptr::addr_eq(&mutexes, collection.as_ref()))
+ }
+
+ #[test]
+ fn child() {
+ let mutexes = [Mutex::new(0), Mutex::new(1)];
+ let collection = RefLockCollection::new(&mutexes);
+
+ assert!(std::ptr::addr_eq(&mutexes, collection.child()))
}
}
diff --git a/src/collection/retry.rs b/src/collection/retry.rs
index 775ea29..70e5183 100644
--- a/src/collection/retry.rs
+++ b/src/collection/retry.rs
@@ -9,7 +9,8 @@ use crate::lockable::{
use crate::{Keyable, ThreadKey};
use super::utils::{
- attempt_to_recover_locks_from_panic, attempt_to_recover_reads_from_panic, get_locks_unsorted,
+ attempt_to_recover_reads_from_panic, attempt_to_recover_writes_from_panic, get_locks_unsorted,
+ scoped_read, scoped_try_read, scoped_try_write, scoped_write,
};
use super::{LockGuard, RetryingLockCollection};
@@ -40,7 +41,7 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> {
}
}
- unsafe fn raw_lock(&self) {
+ unsafe fn raw_write(&self) {
let locks = get_locks_unsorted(&self.data);
if locks.is_empty() {
@@ -57,7 +58,7 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> {
// This prevents us from entering a spin loop waiting for
// the same lock to be unlocked
// safety: we have the thread key
- locks[first_index.get()].raw_lock();
+ locks[first_index.get()].raw_write();
for (i, lock) in locks.iter().enumerate() {
if i == first_index.get() {
// we've already locked this one
@@ -69,15 +70,15 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> {
// it does return false, then the lock function is called
// immediately after, causing a panic
// safety: we have the thread key
- if lock.raw_try_lock() {
+ if lock.raw_try_write() {
locked.set(locked.get() + 1);
} else {
// safety: we already locked all of these
- attempt_to_recover_locks_from_panic(&locks[0..i]);
+ attempt_to_recover_writes_from_panic(&locks[0..i]);
if first_index.get() >= i {
// safety: this is already locked and can't be
// unlocked by the previous loop
- locks[first_index.get()].raw_unlock();
+ locks[first_index.get()].raw_unlock_write();
}
// nothing is locked anymore
@@ -94,15 +95,15 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> {
}
},
|| {
- utils::attempt_to_recover_locks_from_panic(&locks[0..locked.get()]);
+ utils::attempt_to_recover_writes_from_panic(&locks[0..locked.get()]);
if first_index.get() >= locked.get() {
- locks[first_index.get()].raw_unlock();
+ locks[first_index.get()].raw_unlock_write();
}
},
)
}
- unsafe fn raw_try_lock(&self) -> bool {
+ unsafe fn raw_try_write(&self) -> bool {
let locks = get_locks_unsorted(&self.data);
if locks.is_empty() {
@@ -117,26 +118,26 @@ unsafe impl<L: Lockable> RawLock for RetryingLockCollection<L> {
|| unsafe {
for (i, lock) in locks.iter().enumerate() {
// safety: we have the thread key
- if lock.raw_try_lock() {
+ if lock.raw_try_write() {
locked.set(locked.get() + 1);
} else {
// safety: we already locked all of these
- attempt_to_recover_locks_from_panic(&locks[0..i]);
+ attempt_to_recover_writes_from_panic(&locks[0..i]);
return false;
}
}
true
},
- || utils::attempt_to_recover_locks_from_panic(&locks[0..locked.get()]),
+ || utils::attempt_to_recover_writes_from_panic(&locks[0..locked.get()]),
)
}
- unsafe fn raw_unlock(&self) {
+ unsafe fn raw_unlock_write(&self) {
let locks = get_locks_unsorted(&self.data);
for lock in locks {
- lock.raw_unlock();
+ lock.raw_unlock_write();
}
}
@@ -243,7 +244,7 @@ unsafe impl<L: Lockable> Lockable for RetryingLockCollection<L> {
Self: 'a;
fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) {
- self.data.get_ptrs(ptrs)
+ ptrs.push(self)
}
unsafe fn guard(&self) -> Self::Guard<'_> {
@@ -347,13 +348,13 @@ impl<E: OwnedLockable + Extend<L>, L: OwnedLockable> Extend<L> for RetryingLockC
}
}
-impl<T, L: AsRef<T>> AsRef<T> for RetryingLockCollection<L> {
+impl<T: ?Sized, L: AsRef<T>> AsRef<T> for RetryingLockCollection<L> {
fn as_ref(&self) -> &T {
self.data.as_ref()
}
}
-impl<T, L: AsMut<T>> AsMut<T> for RetryingLockCollection<L> {
+impl<T: ?Sized, L: AsMut<T>> AsMut<T> for RetryingLockCollection<L> {
fn as_mut(&mut self) -> &mut T {
self.data.as_mut()
}
@@ -389,7 +390,8 @@ impl<L: OwnedLockable> RetryingLockCollection<L> {
/// ```
#[must_use]
pub const fn new(data: L) -> Self {
- Self { data }
+ // safety: the data cannot cannot contain references
+ unsafe { Self::new_unchecked(data) }
}
}
@@ -410,7 +412,8 @@ impl<'a, L: OwnedLockable> RetryingLockCollection<&'a L> {
/// ```
#[must_use]
pub const fn new_ref(data: &'a L) -> Self {
- Self { data }
+ // safety: the data cannot cannot contain references
+ unsafe { Self::new_unchecked(data) }
}
}
@@ -525,47 +528,20 @@ impl<L: Lockable> RetryingLockCollection<L> {
/// ```
#[must_use]
pub fn try_new(data: L) -> Option<Self> {
- (!contains_duplicates(&data)).then_some(Self { data })
+ // safety: the data is checked for duplicates before returning the collection
+ (!contains_duplicates(&data)).then_some(unsafe { Self::new_unchecked(data) })
}
- pub fn scoped_lock<R>(&self, key: impl Keyable, f: impl Fn(L::DataMut<'_>) -> R) -> R {
- unsafe {
- // safety: we have the thread key
- self.raw_lock();
-
- // safety: the data was just locked
- let r = f(self.data_mut());
-
- // safety: the collection is still locked
- self.raw_unlock();
-
- drop(key); // ensure the key stays alive long enough
-
- r
- }
+ pub fn scoped_lock<'a, R>(&'a self, key: impl Keyable, f: impl Fn(L::DataMut<'a>) -> R) -> R {
+ scoped_write(self, key, f)
}
- pub fn scoped_try_lock<Key: Keyable, R>(
- &self,
+ pub fn scoped_try_lock<'a, Key: Keyable, R>(
+ &'a self,
key: Key,
- f: impl Fn(L::DataMut<'_>) -> R,
+ f: impl Fn(L::DataMut<'a>) -> R,
) -> Result<R, Key> {
- unsafe {
- // safety: we have the thread key
- if !self.raw_try_lock() {
- return Err(key);
- }
-
- // safety: we just locked the collection
- let r = f(self.data_mut());
-
- // safety: the collection is still locked
- self.raw_unlock();
-
- drop(key); // ensures the key stays valid long enough
-
- Ok(r)
- }
+ scoped_try_write(self, key, f)
}
/// Locks the collection
@@ -591,7 +567,7 @@ impl<L: Lockable> RetryingLockCollection<L> {
pub fn lock(&self, key: ThreadKey) -> LockGuard<L::Guard<'_>> {
unsafe {
// safety: we're taking the thread key
- self.raw_lock();
+ self.raw_write();
LockGuard {
// safety: we just locked the collection
@@ -634,7 +610,7 @@ impl<L: Lockable> RetryingLockCollection<L> {
pub fn try_lock(&self, key: ThreadKey) -> Result<LockGuard<L::Guard<'_>>, ThreadKey> {
unsafe {
// safety: we're taking the thread key
- if self.raw_try_lock() {
+ if self.raw_try_write() {
Ok(LockGuard {
// safety: we just succeeded in locking everything
guard: self.guard(),
@@ -671,44 +647,16 @@ impl<L: Lockable> RetryingLockCollection<L> {
}
impl<L: Sharable> RetryingLockCollection<L> {
- pub fn scoped_read<R>(&self, key: impl Keyable, f: impl Fn(L::DataRef<'_>) -> R) -> R {
- unsafe {
- // safety: we have the thread key
- self.raw_read();
-
- // safety: the data was just locked
- let r = f(self.data_ref());
-
- // safety: the collection is still locked
- self.raw_unlock_read();
-
- drop(key); // ensure the key stays alive long enough
-
- r
- }
+ pub fn scoped_read<'a, R>(&'a self, key: impl Keyable, f: impl Fn(L::DataRef<'a>) -> R) -> R {
+ scoped_read(self, key, f)
}
- pub fn scoped_try_read<Key: Keyable, R>(
- &self,
+ pub fn scoped_try_read<'a, Key: Keyable, R>(
+ &'a self,
key: Key,
- f: impl Fn(L::DataRef<'_>) -> R,
+ f: impl Fn(L::DataRef<'a>) -> R,
) -> Result<R, Key> {
- unsafe {
- // safety: we have the thread key
- if !self.raw_try_read() {
- return Err(key);
- }
-
- // safety: we just locked the collection
- let r = f(self.data_ref());
-
- // safety: the collection is still locked
- self.raw_unlock_read();
-
- drop(key); // ensures the key stays valid long enough
-
- Ok(r)
- }
+ scoped_try_read(self, key, f)
}
/// Locks the collection, so that other threads can still read from it
@@ -778,7 +726,7 @@ impl<L: Sharable> RetryingLockCollection<L> {
pub fn try_read(&self, key: ThreadKey) -> Result<LockGuard<L::ReadGuard<'_>>, ThreadKey> {
unsafe {
// safety: we're taking the thread key
- if !self.raw_try_lock() {
+ if !self.raw_try_read() {
return Err(key);
}
@@ -911,7 +859,7 @@ where
mod tests {
use super::*;
use crate::collection::BoxedLockCollection;
- use crate::{LockCollection, Mutex, RwLock, ThreadKey};
+ use crate::{Mutex, RwLock, ThreadKey};
#[test]
fn nonduplicate_lock_references_are_allowed() {
@@ -927,6 +875,159 @@ mod tests {
}
#[test]
+ #[allow(clippy::float_cmp)]
+ fn uses_correct_default() {
+ let collection =
+ RetryingLockCollection::<(RwLock<f64>, Mutex<Option<i32>>, Mutex<usize>)>::default();
+ let tuple = collection.into_inner();
+ assert_eq!(tuple.0, 0.0);
+ assert!(tuple.1.is_none());
+ assert_eq!(tuple.2, 0)
+ }
+
+ #[test]
+ fn from() {
+ let key = ThreadKey::get().unwrap();
+ let collection =
+ RetryingLockCollection::from([Mutex::new("foo"), Mutex::new("bar"), Mutex::new("baz")]);
+ let guard = collection.lock(key);
+ assert_eq!(*guard[0], "foo");
+ assert_eq!(*guard[1], "bar");
+ assert_eq!(*guard[2], "baz");
+ }
+
+ #[test]
+ fn new_ref_works() {
+ let key = ThreadKey::get().unwrap();
+ let mutexes = [Mutex::new(0), Mutex::new(1)];
+ let collection = RetryingLockCollection::new_ref(&mutexes);
+ collection.scoped_lock(key, |guard| {
+ assert_eq!(*guard[0], 0);
+ assert_eq!(*guard[1], 1);
+ })
+ }
+
+ #[test]
+ fn scoped_read_sees_changes() {
+ let mut key = ThreadKey::get().unwrap();
+ let mutexes = [RwLock::new(24), RwLock::new(42)];
+ let collection = RetryingLockCollection::new(mutexes);
+ collection.scoped_lock(&mut key, |guard| *guard[0] = 128);
+
+ let sum = collection.scoped_read(&mut key, |guard| {
+ assert_eq!(*guard[0], 128);
+ assert_eq!(*guard[1], 42);
+ *guard[0] + *guard[1]
+ });
+
+ assert_eq!(sum, 128 + 42);
+ }
+
+ #[test]
+ fn get_mut_affects_scoped_read() {
+ let mut key = ThreadKey::get().unwrap();
+ let mutexes = [RwLock::new(24), RwLock::new(42)];
+ let mut collection = RetryingLockCollection::new(mutexes);
+ let guard = collection.get_mut();
+ *guard[0] = 128;
+
+ let sum = collection.scoped_read(&mut key, |guard| {
+ assert_eq!(*guard[0], 128);
+ assert_eq!(*guard[1], 42);
+ *guard[0] + *guard[1]
+ });
+
+ assert_eq!(sum, 128 + 42);
+ }
+
+ #[test]
+ fn scoped_try_lock_can_fail() {
+ let key = ThreadKey::get().unwrap();
+ let collection = RetryingLockCollection::new([Mutex::new(1), Mutex::new(2)]);
+ let guard = collection.lock(key);
+
+ std::thread::scope(|s| {
+ s.spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ let r = collection.scoped_try_lock(key, |_| {});
+ assert!(r.is_err());
+ });
+ });
+
+ drop(guard);
+ }
+
+ #[test]
+ fn scoped_try_read_can_fail() {
+ let key = ThreadKey::get().unwrap();
+ let collection = RetryingLockCollection::new([RwLock::new(1), RwLock::new(2)]);
+ let guard = collection.lock(key);
+
+ std::thread::scope(|s| {
+ s.spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ let r = collection.scoped_try_read(key, |_| {});
+ assert!(r.is_err());
+ });
+ });
+
+ drop(guard);
+ }
+
+ #[test]
+ fn try_lock_works() {
+ let key = ThreadKey::get().unwrap();
+ let collection = RetryingLockCollection::new([Mutex::new(1), Mutex::new(2)]);
+ let guard = collection.try_lock(key);
+
+ std::thread::scope(|s| {
+ s.spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ let guard = collection.try_lock(key);
+ assert!(guard.is_err());
+ });
+ });
+
+ assert!(guard.is_ok());
+ }
+
+ #[test]
+ fn try_read_works() {
+ let key = ThreadKey::get().unwrap();
+ let collection = RetryingLockCollection::new([RwLock::new(1), RwLock::new(2)]);
+ let guard = collection.try_read(key);
+
+ std::thread::scope(|s| {
+ s.spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ let guard = collection.try_read(key);
+ assert!(guard.is_ok());
+ });
+ });
+
+ assert!(guard.is_ok());
+ }
+
+ #[test]
+ fn try_read_fails_for_locked_collection() {
+ let key = ThreadKey::get().unwrap();
+ let mutexes = [RwLock::new(24), RwLock::new(42)];
+ let collection = RetryingLockCollection::new_ref(&mutexes);
+
+ std::thread::scope(|s| {
+ s.spawn(|| {
+ let key = ThreadKey::get().unwrap();
+ let guard = mutexes[1].write(key);
+ assert_eq!(*guard, 42);
+ std::mem::forget(guard);
+ });
+ });
+
+ let guard = collection.try_read(key);
+ assert!(guard.is_err());
+ }
+
+ #[test]
fn locks_all_inner_mutexes() {
let key = ThreadKey::get().unwrap();
let mutex1 = Mutex::new(0);
@@ -974,6 +1075,55 @@ mod tests {
}
#[test]
+ fn from_iterator() {
+ let key = ThreadKey::get().unwrap();
+ let collection: RetryingLockCollection<Vec<Mutex<&str>>> =
+ [Mutex::new("foo"), Mutex::new("bar"), Mutex::new("baz")]
+ .into_iter()
+ .collect();
+ let guard = collection.lock(key);
+ assert_eq!(*guard[0], "foo");
+ assert_eq!(*guard[1], "bar");
+ assert_eq!(*guard[2], "baz");
+ }
+
+ #[test]
+ fn into_owned_iterator() {
+ let collection = RetryingLockCollection::new([Mutex::new(0), Mutex::new(1), Mutex::new(2)]);
+ for (i, mutex) in collection.into_iter().enumerate() {
+ assert_eq!(mutex.into_inner(), i);
+ }
+ }
+
+ #[test]
+ fn into_ref_iterator() {
+ let mut key = ThreadKey::get().unwrap();
+ let collection = RetryingLockCollection::new([Mutex::new(0), Mutex::new(1), Mutex::new(2)]);
+ for (i, mutex) in (&collection).into_iter().enumerate() {
+ mutex.scoped_lock(&mut key, |val| assert_eq!(*val, i))
+ }
+ }
+
+ #[test]
+ fn ref_iterator() {
+ let mut key = ThreadKey::get().unwrap();
+ let collection = RetryingLockCollection::new([Mutex::new(0), Mutex::new(1), Mutex::new(2)]);
+ for (i, mutex) in collection.iter().enumerate() {
+ mutex.scoped_lock(&mut key, |val| assert_eq!(*val, i))
+ }
+ }
+
+ #[test]
+ fn mut_iterator() {
+ let mut key = ThreadKey::get().unwrap();
+ let mut collection =
+ RetryingLockCollection::new([Mutex::new(0), Mutex::new(1), Mutex::new(2)]);
+ for (i, mutex) in collection.iter_mut().enumerate() {
+ mutex.scoped_lock(&mut key, |val| assert_eq!(*val, i))
+ }
+ }
+
+ #[test]
fn extend_collection() {
let mutex1 = Mutex::new(0);
let mutex2 = Mutex::new(0);
@@ -991,9 +1141,76 @@ mod tests {
let guard = collection.lock(key);
assert!(guard.len() == 0);
- let key = LockCollection::<[RwLock<_>; 0]>::unlock(guard);
+ let key = RetryingLockCollection::<[RwLock<_>; 0]>::unlock(guard);
+
+ let guard = collection.read(key);
+ assert!(guard.len() == 0);
+ }
+
+ #[test]
+ fn read_empty_lock_collection() {
+ let key = ThreadKey::get().unwrap();
+ let collection: RetryingLockCollection<[RwLock<i32>; 0]> = RetryingLockCollection::new([]);
let guard = collection.read(key);
assert!(guard.len() == 0);
+ let key = RetryingLockCollection::<[RwLock<_>; 0]>::unlock_read(guard);
+
+ let guard = collection.lock(key);
+ assert!(guard.len() == 0);
+ }
+
+ #[test]
+ fn as_ref_works() {
+ let mutexes = [Mutex::new(0), Mutex::new(1)];
+ let collection = RetryingLockCollection::new_ref(&mutexes);
+
+ assert!(std::ptr::addr_eq(&mutexes, collection.as_ref()))
+ }
+
+ #[test]
+ fn as_mut_works() {
+ let mut mutexes = [Mutex::new(0), Mutex::new(1)];
+ let mut collection = RetryingLockCollection::new(&mut mutexes);
+
+ collection.as_mut()[0] = Mutex::new(42);
+
+ assert_eq!(*collection.as_mut()[0].get_mut(), 42);
+ }
+
+ #[test]
+ fn child() {
+ let mutexes = [Mutex::new(0), Mutex::new(1)];
+ let collection = RetryingLockCollection::new_ref(&mutexes);
+
+ assert!(std::ptr::addr_eq(&mutexes, *collection.child()))
+ }
+
+ #[test]
+ fn child_mut_works() {
+ let mut mutexes = [Mutex::new(0), Mutex::new(1)];
+ let mut collection = RetryingLockCollection::new(&mut mutexes);
+
+ collection.child_mut()[0] = Mutex::new(42);
+
+ assert_eq!(*collection.child_mut()[0].get_mut(), 42);
+ }
+
+ #[test]
+ fn into_child_works() {
+ let mutexes = [Mutex::new(0), Mutex::new(1)];
+ let mut collection = RetryingLockCollection::new(mutexes);
+
+ collection.child_mut()[0] = Mutex::new(42);
+
+ assert_eq!(
+ *collection
+ .into_child()
+ .as_mut()
+ .get_mut(0)
+ .unwrap()
+ .get_mut(),
+ 42
+ );
}
}
diff --git a/src/collection/utils.rs b/src/collection/utils.rs
index 1d96e5c..59a68da 100644
--- a/src/collection/utils.rs
+++ b/src/collection/utils.rs
@@ -1,7 +1,8 @@
use std::cell::Cell;
use crate::handle_unwind::handle_unwind;
-use crate::lockable::{Lockable, RawLock};
+use crate::lockable::{Lockable, RawLock, Sharable};
+use crate::Keyable;
#[must_use]
pub fn get_locks<L: Lockable>(data: &L) -> Vec<&dyn RawLock> {
@@ -32,18 +33,18 @@ pub fn ordered_contains_duplicates(l: &[&dyn RawLock]) -> bool {
}
/// Lock a set of locks in the given order. It's UB to call this without a `ThreadKey`
-pub unsafe fn ordered_lock(locks: &[&dyn RawLock]) {
+pub unsafe fn ordered_write(locks: &[&dyn RawLock]) {
// these will be unlocked in case of a panic
let locked = Cell::new(0);
handle_unwind(
|| {
for lock in locks {
- lock.raw_lock();
+ lock.raw_write();
locked.set(locked.get() + 1);
}
},
- || attempt_to_recover_locks_from_panic(&locks[0..locked.get()]),
+ || attempt_to_recover_writes_from_panic(&locks[0..locked.get()]),
)
}
@@ -65,19 +66,19 @@ pub unsafe fn ordered_read(locks: &[&dyn RawLock]) {
/// Locks the locks in the order they are given. This causes deadlock if the
/// locks contain duplicates, or if this is called by multiple threads with the
/// locks in different orders.
-pub unsafe fn ordered_try_lock(locks: &[&dyn RawLock]) -> bool {
+pub unsafe fn ordered_try_write(locks: &[&dyn RawLock]) -> bool {
let locked = Cell::new(0);
handle_unwind(
|| unsafe {
for (i, lock) in locks.iter().enumerate() {
// safety: we have the thread key
- if lock.raw_try_lock() {
+ if lock.raw_try_write() {
locked.set(locked.get() + 1);
} else {
for lock in &locks[0..i] {
// safety: this lock was already acquired
- lock.raw_unlock();
+ lock.raw_unlock_write();
}
return false;
}
@@ -87,7 +88,7 @@ pub unsafe fn ordered_try_lock(locks: &[&dyn RawLock]) -> bool {
},
||
// safety: everything in locked is locked
- attempt_to_recover_locks_from_panic(&locks[0..locked.get()]),
+ attempt_to_recover_writes_from_panic(&locks[0..locked.get()]),
)
}
@@ -120,12 +121,104 @@ pub unsafe fn ordered_try_read(locks: &[&dyn RawLock]) -> bool {
)
}
+pub fn scoped_write<'a, L: RawLock + Lockable, R>(
+ collection: &'a L,
+ key: impl Keyable,
+ f: impl FnOnce(L::DataMut<'a>) -> R,
+) -> R {
+ unsafe {
+ // safety: we have the key
+ collection.raw_write();
+
+ // safety: we just locked this
+ let r = f(collection.data_mut());
+
+ // this ensures the key is held long enough
+ drop(key);
+
+ // safety: we've locked already, and aren't using the data again
+ collection.raw_unlock_write();
+
+ r
+ }
+}
+
+pub fn scoped_try_write<'a, L: RawLock + Lockable, Key: Keyable, R>(
+ collection: &'a L,
+ key: Key,
+ f: impl FnOnce(L::DataMut<'a>) -> R,
+) -> Result<R, Key> {
+ unsafe {
+ // safety: we have the key
+ if !collection.raw_try_write() {
+ return Err(key);
+ }
+
+ // safety: we just locked this
+ let r = f(collection.data_mut());
+
+ // this ensures the key is held long enough
+ drop(key);
+
+ // safety: we've locked already, and aren't using the data again
+ collection.raw_unlock_write();
+
+ Ok(r)
+ }
+}
+
+pub fn scoped_read<'a, L: RawLock + Sharable, R>(
+ collection: &'a L,
+ key: impl Keyable,
+ f: impl FnOnce(L::DataRef<'a>) -> R,
+) -> R {
+ unsafe {
+ // safety: we have the key
+ collection.raw_read();
+
+ // safety: we just locked this
+ let r = f(collection.data_ref());
+
+ // this ensures the key is held long enough
+ drop(key);
+
+ // safety: we've locked already, and aren't using the data again
+ collection.raw_unlock_read();
+
+ r
+ }
+}
+
+pub fn scoped_try_read<'a, L: RawLock + Sharable, Key: Keyable, R>(
+ collection: &'a L,
+ key: Key,
+ f: impl FnOnce(L::DataRef<'a>) -> R,
+) -> Result<R, Key> {
+ unsafe {
+ // safety: we have the key
+ if !collection.raw_try_read() {
+ return Err(key);
+ }
+
+ // safety: we just locked this
+ let r = f(collection.data_ref());
+
+ // this ensures the key is held long enough
+ drop(key);
+
+ // safety: we've locked already, and aren't using the data again
+ collection.raw_unlock_read();
+
+ Ok(r)
+ }
+}
+
/// Unlocks the already locked locks in order to recover from a panic
-pub unsafe fn attempt_to_recover_locks_from_panic(locks: &[&dyn RawLock]) {
+pub unsafe fn attempt_to_recover_writes_from_panic(locks: &[&dyn RawLock]) {
handle_unwind(
|| {
// safety: the caller assumes that these are already locked
- locks.iter().for_each(|lock| lock.raw_unlock());
+ locks.iter().for_each(|lock| lock.raw_unlock_write());
},
// if we get another panic in here, we'll just have to poison what remains
|| locks.iter().for_each(|l| l.poison()),