From 7bd236853ef5ae705328c8fdc492cf60fc6887c1 Mon Sep 17 00:00:00 2001 From: Mica White Date: Wed, 13 Mar 2024 22:44:46 -0400 Subject: Lockable overhaul --- src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/lib.rs') diff --git a/src/lib.rs b/src/lib.rs index 4093ac4..64813af 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -106,14 +106,14 @@ //! println!("{}", *data.1); //! ``` -mod collection; mod key; mod lockable; +pub mod collection; pub mod mutex; pub mod rwlock; -pub use collection::LockCollection; +pub use collection::RefLockCollection; pub use key::{Keyable, ThreadKey}; pub use lockable::{Lockable, OwnedLockable}; -- cgit v1.2.3 From ad76d43dc28b8802d64eb7ddcd9e02d3d12ac89a Mon Sep 17 00:00:00 2001 From: Mica White Date: Thu, 14 Mar 2024 17:21:51 -0400 Subject: Implement sequenced collections --- examples/dining_philosophers.rs | 2 +- examples/double_mutex.rs | 2 +- examples/list.rs | 2 +- src/collection.rs | 18 +-- src/collection/boxed_collection.rs | 60 +++++++++ src/collection/collection.rs | 244 ------------------------------------ src/collection/owned_collection.rs | 68 ++++++++++ src/collection/ref_collection.rs | 245 +++++++++++++++++++++++++++++++++++++ src/lib.rs | 1 - 9 files changed, 381 insertions(+), 261 deletions(-) create mode 100644 src/collection/boxed_collection.rs delete mode 100644 src/collection/collection.rs create mode 100644 src/collection/owned_collection.rs create mode 100644 src/collection/ref_collection.rs (limited to 'src/lib.rs') diff --git a/examples/dining_philosophers.rs b/examples/dining_philosophers.rs index 34efb0e..2f2fa0d 100644 --- a/examples/dining_philosophers.rs +++ b/examples/dining_philosophers.rs @@ -1,6 +1,6 @@ use std::{thread, time::Duration}; -use happylock::{Mutex, RefLockCollection, ThreadKey}; +use happylock::{collection::RefLockCollection, Mutex, ThreadKey}; static PHILOSOPHERS: [Philosopher; 5] = [ Philosopher { diff --git a/examples/double_mutex.rs b/examples/double_mutex.rs index cd627c4..e2b08df 100644 --- a/examples/double_mutex.rs +++ b/examples/double_mutex.rs @@ -1,6 +1,6 @@ use std::thread; -use happylock::{Mutex, RefLockCollection, ThreadKey}; +use happylock::{collection::RefLockCollection, Mutex, ThreadKey}; const N: usize = 10; diff --git a/examples/list.rs b/examples/list.rs index cf344e7..dda468a 100644 --- a/examples/list.rs +++ b/examples/list.rs @@ -1,6 +1,6 @@ use std::thread; -use happylock::{Mutex, RefLockCollection, ThreadKey}; +use happylock::{collection::RefLockCollection, Mutex, ThreadKey}; const N: usize = 10; diff --git a/src/collection.rs b/src/collection.rs index 1c276a6..93adf16 100644 --- a/src/collection.rs +++ b/src/collection.rs @@ -1,13 +1,14 @@ -use std::marker::{PhantomData, PhantomPinned}; -use std::ptr::NonNull; +use std::{marker::PhantomData, ptr::NonNull}; use crate::{ key::Keyable, lockable::{Lock, Lockable}, }; -mod collection; +mod boxed_collection; mod guard; +mod owned_collection; +mod ref_collection; pub struct OwnedLockCollection { data: L, @@ -22,16 +23,7 @@ pub struct RefLockCollection<'a, L> { data: &'a L, } -pub struct BoxedLockCollection(RefLockCollection<'static, L>); - -pub struct PinnedLockCollection { - _unpin: PhantomPinned, - data: L, - locks: Vec>, -} - -unsafe impl Send for PinnedLockCollection {} -unsafe impl Sync for PinnedLockCollection {} +pub struct BoxedLockCollection<'a, L>(RefLockCollection<'a, L>); /// A RAII guard for a generic [`Lockable`] type. pub struct LockGuard<'a, 'key: 'a, L: Lockable<'a>, Key: Keyable + 'key> { diff --git a/src/collection/boxed_collection.rs b/src/collection/boxed_collection.rs new file mode 100644 index 0000000..bcb941b --- /dev/null +++ b/src/collection/boxed_collection.rs @@ -0,0 +1,60 @@ +use std::ops::{Deref, DerefMut}; + +use crate::{Lockable, OwnedLockable}; + +use super::{BoxedLockCollection, RefLockCollection}; + +impl<'a, L> Deref for BoxedLockCollection<'a, L> { + type Target = RefLockCollection<'a, L>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'a, L> DerefMut for BoxedLockCollection<'a, L> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl<'a, L> Drop for BoxedLockCollection<'a, L> { + fn drop(&mut self) { + // this was created with Box::new + let boxed = unsafe { Box::from_raw((self.0.data as *const L).cast_mut()) }; + drop(boxed); + } +} + +impl<'a, L: OwnedLockable<'a> + 'a> BoxedLockCollection<'a, L> { + #[must_use] + pub fn new(data: L) -> Self { + let boxed = Box::leak(Box::new(data)); + Self(RefLockCollection::new(boxed)) + } +} + +impl<'a, L: OwnedLockable<'a> + 'a> BoxedLockCollection<'a, &'a L> { + #[must_use] + pub fn new_ref(data: &'a L) -> Self { + let boxed = Box::leak(Box::new(data)); + + // this is a reference to an OwnedLockable, which can't possibly + // contain inner duplicates + Self(unsafe { RefLockCollection::new_unchecked(boxed) }) + } +} + +impl<'a, L: Lockable<'a> + 'a> BoxedLockCollection<'a, L> { + #[must_use] + pub unsafe fn new_unchecked(data: L) -> Self { + let boxed = Box::leak(Box::new(data)); + Self(RefLockCollection::new_unchecked(boxed)) + } + + #[must_use] + pub fn try_new(data: L) -> Option { + let boxed = Box::leak(Box::new(data)); + RefLockCollection::try_new(boxed).map(Self) + } +} diff --git a/src/collection/collection.rs b/src/collection/collection.rs deleted file mode 100644 index a8d25a5..0000000 --- a/src/collection/collection.rs +++ /dev/null @@ -1,244 +0,0 @@ -use std::marker::PhantomData; - -use crate::{key::Keyable, lockable::Lock, Lockable, OwnedLockable}; - -use super::{LockGuard, RefLockCollection}; - -fn get_locks<'a, L: Lockable<'a> + 'a>(data: &'a L) -> Vec<&'a dyn Lock> { - let mut locks = Vec::new(); - data.get_ptrs(&mut locks); - locks.sort_by_key(|lock| std::ptr::from_ref(*lock)); - locks -} - -/// returns `true` if the sorted list contains a duplicate -#[must_use] -fn contains_duplicates(l: &[&dyn Lock]) -> bool { - l.windows(2).any(|window| { - std::ptr::addr_eq(std::ptr::from_ref(window[0]), std::ptr::from_ref(window[1])) - }) -} - -impl<'a, L: Lockable<'a>> AsRef for RefLockCollection<'a, L> { - fn as_ref(&self) -> &L { - self.data - } -} - -impl<'a, L: Lockable<'a>> AsRef for RefLockCollection<'a, L> { - fn as_ref(&self) -> &Self { - self - } -} - -impl<'a, L: Lockable<'a>> AsMut for RefLockCollection<'a, L> { - fn as_mut(&mut self) -> &mut Self { - self - } -} - -impl<'a, L> IntoIterator for &'a RefLockCollection<'a, L> -where - &'a L: IntoIterator, -{ - type Item = <&'a L as IntoIterator>::Item; - type IntoIter = <&'a L as IntoIterator>::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.data.into_iter() - } -} - -impl<'a, L: OwnedLockable<'a> + 'a> RefLockCollection<'a, L> { - /// Creates a new collection of owned locks. - /// - /// Because the locks are owned, there's no need to do any checks for - /// duplicate values. - /// - /// # Examples - /// - /// ``` - /// use happylock::{LockCollection, Mutex}; - /// - /// let data = (Mutex::new(0), Mutex::new("")); - /// let lock = LockCollection::new(&data); - /// ``` - #[must_use] - pub fn new(data: &'a L) -> RefLockCollection { - RefLockCollection { - locks: get_locks(data), - data, - } - } -} - -impl<'a, L: Lockable<'a>> RefLockCollection<'a, L> { - /// Creates a new collections of locks. - /// - /// # Safety - /// - /// This results in undefined behavior if any locks are presented twice - /// within this collection. - /// - /// # Examples - /// - /// ``` - /// use happylock::{LockCollection, Mutex}; - /// - /// let data1 = Mutex::new(0); - /// let data2 = Mutex::new(""); - /// - /// // safety: data1 and data2 refer to distinct mutexes - /// let lock = unsafe { LockCollection::new_unchecked((&data1, &data2)) }; - /// ``` - #[must_use] - pub unsafe fn new_unchecked(data: &'a L) -> Self { - Self { - data, - locks: get_locks(data), - } - } - - /// Creates a new collection of locks. - /// - /// This returns `None` if any locks are found twice in the given - /// collection. - /// - /// # Examples - /// - /// ``` - /// use happylock::{LockCollection, Mutex}; - /// - /// let data1 = Mutex::new(0); - /// let data2 = Mutex::new(""); - /// - /// // data1 and data2 refer to distinct mutexes, so this won't panic - /// let lock = LockCollection::try_new((&data1, &data2)).unwrap(); - /// ``` - #[must_use] - pub fn try_new(data: &'a L) -> Option { - let locks = get_locks(data); - if contains_duplicates(&locks) { - return None; - } - - Some(Self { locks, data }) - } - - /// Locks the collection - /// - /// This function returns a guard that can be used to access the underlying - /// data. When the guard is dropped, the locks in the collection are also - /// dropped. - /// - /// # Examples - /// - /// ``` - /// use happylock::{LockCollection, Mutex, ThreadKey}; - /// - /// let key = ThreadKey::get().unwrap(); - /// let lock = LockCollection::new((Mutex::new(0), Mutex::new(""))); - /// - /// let mut guard = lock.lock(key); - /// *guard.0 += 1; - /// *guard.1 = "1"; - /// ``` - pub fn lock<'key: 'a, Key: Keyable + 'key>(&'a self, key: Key) -> LockGuard<'a, 'key, L, Key> { - for lock in &self.locks { - // safety: we have the thread key - unsafe { lock.lock() }; - } - - LockGuard { - // safety: we've already acquired the lock - guard: unsafe { self.data.guard() }, - key, - _phantom: PhantomData, - } - } - - /// Attempts to lock the without blocking. - /// - /// If successful, this method returns a guard that can be used to access - /// the data, and unlocks the data when it is dropped. Otherwise, `None` is - /// returned. - /// - /// # Examples - /// - /// ``` - /// use happylock::{LockCollection, Mutex, ThreadKey}; - /// - /// let key = ThreadKey::get().unwrap(); - /// let lock = LockCollection::new((Mutex::new(0), Mutex::new(""))); - /// - /// match lock.try_lock(key) { - /// Some(mut guard) => { - /// *guard.0 += 1; - /// *guard.1 = "1"; - /// }, - /// None => unreachable!(), - /// }; - /// - /// ``` - pub fn try_lock<'key: 'a, Key: Keyable + 'key>( - &'a self, - key: Key, - ) -> Option> { - let guard = unsafe { - for (i, lock) in self.locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_lock(); - - if !success { - for lock in &self.locks[0..i] { - // safety: this lock was already acquired - lock.unlock(); - } - return None; - } - } - - // safety: we've acquired the locks - self.data.guard() - }; - - Some(LockGuard { - guard, - key, - _phantom: PhantomData, - }) - } - - /// Unlocks the underlying lockable data type, returning the key that's - /// associated with it. - /// - /// # Examples - /// - /// ``` - /// use happylock::{LockCollection, Mutex, ThreadKey}; - /// - /// let key = ThreadKey::get().unwrap(); - /// let lock = LockCollection::new((Mutex::new(0), Mutex::new(""))); - /// - /// let mut guard = lock.lock(key); - /// *guard.0 += 1; - /// *guard.1 = "1"; - /// let key = LockCollection::unlock(guard); - /// ``` - #[allow(clippy::missing_const_for_fn)] - pub fn unlock<'key: 'a, Key: Keyable + 'key>(guard: LockGuard<'a, 'key, L, Key>) -> Key { - drop(guard.guard); - guard.key - } -} - -impl<'a, L: 'a> RefLockCollection<'a, L> -where - &'a L: IntoIterator, -{ - /// Returns an iterator over references to each value in the collection. - #[must_use] - pub fn iter(&'a self) -> <&'a L as IntoIterator>::IntoIter { - self.into_iter() - } -} diff --git a/src/collection/owned_collection.rs b/src/collection/owned_collection.rs new file mode 100644 index 0000000..dbc9a45 --- /dev/null +++ b/src/collection/owned_collection.rs @@ -0,0 +1,68 @@ +use std::marker::PhantomData; + +use crate::{lockable::Lock, Keyable, Lockable, OwnedLockable}; + +use super::{LockGuard, OwnedLockCollection}; + +fn get_locks<'a, L: Lockable<'a> + 'a>(data: &'a L) -> Vec<&'a dyn Lock> { + let mut locks = Vec::new(); + data.get_ptrs(&mut locks); + locks +} + +impl<'a, L: OwnedLockable<'a>> OwnedLockCollection { + #[must_use] + pub const fn new(data: L) -> Self { + Self { data } + } + + pub fn lock<'s: 'a, 'key, Key: Keyable + 'key>( + &'s self, + key: Key, + ) -> LockGuard<'a, 'key, L, Key> { + let locks = get_locks(&self.data); + for lock in locks { + // safety: we have the thread key, and these locks happen in a + // predetermined order + unsafe { lock.lock() }; + } + + // safety: we've locked all of this already + let guard = unsafe { self.data.guard() }; + LockGuard { + guard, + key, + _phantom: PhantomData, + } + } + + pub fn try_lock<'key: 'a, Key: Keyable + 'key>( + &'a self, + key: Key, + ) -> Option> { + let locks = get_locks(&self.data); + let guard = unsafe { + for (i, lock) in locks.iter().enumerate() { + // safety: we have the thread key + let success = lock.try_lock(); + + if !success { + for lock in &locks[0..i] { + // safety: this lock was already acquired + lock.unlock(); + } + return None; + } + } + + // safety: we've acquired the locks + self.data.guard() + }; + + Some(LockGuard { + guard, + key, + _phantom: PhantomData, + }) + } +} diff --git a/src/collection/ref_collection.rs b/src/collection/ref_collection.rs new file mode 100644 index 0000000..3e4d5f8 --- /dev/null +++ b/src/collection/ref_collection.rs @@ -0,0 +1,245 @@ +use std::marker::PhantomData; + +use crate::{key::Keyable, lockable::Lock, Lockable, OwnedLockable}; + +use super::{LockGuard, RefLockCollection}; + +#[must_use] +fn get_locks<'a, L: Lockable<'a> + 'a>(data: &'a L) -> Vec<&'a dyn Lock> { + let mut locks = Vec::new(); + data.get_ptrs(&mut locks); + locks.sort_by_key(|lock| std::ptr::from_ref(*lock)); + locks +} + +/// returns `true` if the sorted list contains a duplicate +#[must_use] +fn contains_duplicates(l: &[&dyn Lock]) -> bool { + l.windows(2).any(|window| { + std::ptr::addr_eq(std::ptr::from_ref(window[0]), std::ptr::from_ref(window[1])) + }) +} + +impl<'a, L: Lockable<'a>> AsRef for RefLockCollection<'a, L> { + fn as_ref(&self) -> &L { + self.data + } +} + +impl<'a, L: Lockable<'a>> AsRef for RefLockCollection<'a, L> { + fn as_ref(&self) -> &Self { + self + } +} + +impl<'a, L: Lockable<'a>> AsMut for RefLockCollection<'a, L> { + fn as_mut(&mut self) -> &mut Self { + self + } +} + +impl<'a, L> IntoIterator for &'a RefLockCollection<'a, L> +where + &'a L: IntoIterator, +{ + type Item = <&'a L as IntoIterator>::Item; + type IntoIter = <&'a L as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.data.into_iter() + } +} + +impl<'a, L: OwnedLockable<'a> + 'a> RefLockCollection<'a, L> { + /// Creates a new collection of owned locks. + /// + /// Because the locks are owned, there's no need to do any checks for + /// duplicate values. + /// + /// # Examples + /// + /// ``` + /// use happylock::{LockCollection, Mutex}; + /// + /// let data = (Mutex::new(0), Mutex::new("")); + /// let lock = LockCollection::new(&data); + /// ``` + #[must_use] + pub fn new(data: &'a L) -> RefLockCollection { + RefLockCollection { + locks: get_locks(data), + data, + } + } +} + +impl<'a, L: Lockable<'a>> RefLockCollection<'a, L> { + /// Creates a new collections of locks. + /// + /// # Safety + /// + /// This results in undefined behavior if any locks are presented twice + /// within this collection. + /// + /// # Examples + /// + /// ``` + /// use happylock::{LockCollection, Mutex}; + /// + /// let data1 = Mutex::new(0); + /// let data2 = Mutex::new(""); + /// + /// // safety: data1 and data2 refer to distinct mutexes + /// let lock = unsafe { LockCollection::new_unchecked((&data1, &data2)) }; + /// ``` + #[must_use] + pub unsafe fn new_unchecked(data: &'a L) -> Self { + Self { + data, + locks: get_locks(data), + } + } + + /// Creates a new collection of locks. + /// + /// This returns `None` if any locks are found twice in the given + /// collection. + /// + /// # Examples + /// + /// ``` + /// use happylock::{LockCollection, Mutex}; + /// + /// let data1 = Mutex::new(0); + /// let data2 = Mutex::new(""); + /// + /// // data1 and data2 refer to distinct mutexes, so this won't panic + /// let lock = LockCollection::try_new((&data1, &data2)).unwrap(); + /// ``` + #[must_use] + pub fn try_new(data: &'a L) -> Option { + let locks = get_locks(data); + if contains_duplicates(&locks) { + return None; + } + + Some(Self { locks, data }) + } + + /// Locks the collection + /// + /// This function returns a guard that can be used to access the underlying + /// data. When the guard is dropped, the locks in the collection are also + /// dropped. + /// + /// # Examples + /// + /// ``` + /// use happylock::{LockCollection, Mutex, ThreadKey}; + /// + /// let key = ThreadKey::get().unwrap(); + /// let lock = LockCollection::new((Mutex::new(0), Mutex::new(""))); + /// + /// let mut guard = lock.lock(key); + /// *guard.0 += 1; + /// *guard.1 = "1"; + /// ``` + pub fn lock<'key: 'a, Key: Keyable + 'key>(&'a self, key: Key) -> LockGuard<'a, 'key, L, Key> { + for lock in &self.locks { + // safety: we have the thread key + unsafe { lock.lock() }; + } + + LockGuard { + // safety: we've already acquired the lock + guard: unsafe { self.data.guard() }, + key, + _phantom: PhantomData, + } + } + + /// Attempts to lock the without blocking. + /// + /// If successful, this method returns a guard that can be used to access + /// the data, and unlocks the data when it is dropped. Otherwise, `None` is + /// returned. + /// + /// # Examples + /// + /// ``` + /// use happylock::{LockCollection, Mutex, ThreadKey}; + /// + /// let key = ThreadKey::get().unwrap(); + /// let lock = LockCollection::new((Mutex::new(0), Mutex::new(""))); + /// + /// match lock.try_lock(key) { + /// Some(mut guard) => { + /// *guard.0 += 1; + /// *guard.1 = "1"; + /// }, + /// None => unreachable!(), + /// }; + /// + /// ``` + pub fn try_lock<'key: 'a, Key: Keyable + 'key>( + &'a self, + key: Key, + ) -> Option> { + let guard = unsafe { + for (i, lock) in self.locks.iter().enumerate() { + // safety: we have the thread key + let success = lock.try_lock(); + + if !success { + for lock in &self.locks[0..i] { + // safety: this lock was already acquired + lock.unlock(); + } + return None; + } + } + + // safety: we've acquired the locks + self.data.guard() + }; + + Some(LockGuard { + guard, + key, + _phantom: PhantomData, + }) + } + + /// Unlocks the underlying lockable data type, returning the key that's + /// associated with it. + /// + /// # Examples + /// + /// ``` + /// use happylock::{LockCollection, Mutex, ThreadKey}; + /// + /// let key = ThreadKey::get().unwrap(); + /// let lock = LockCollection::new((Mutex::new(0), Mutex::new(""))); + /// + /// let mut guard = lock.lock(key); + /// *guard.0 += 1; + /// *guard.1 = "1"; + /// let key = LockCollection::unlock(guard); + /// ``` + #[allow(clippy::missing_const_for_fn)] + pub fn unlock<'key: 'a, Key: Keyable + 'key>(guard: LockGuard<'a, 'key, L, Key>) -> Key { + drop(guard.guard); + guard.key + } +} + +impl<'a, L: 'a> RefLockCollection<'a, L> +where + &'a L: IntoIterator, +{ + /// Returns an iterator over references to each value in the collection. + #[must_use] + pub fn iter(&'a self) -> <&'a L as IntoIterator>::IntoIter { + self.into_iter() + } +} diff --git a/src/lib.rs b/src/lib.rs index 64813af..92b31a0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -113,7 +113,6 @@ pub mod collection; pub mod mutex; pub mod rwlock; -pub use collection::RefLockCollection; pub use key::{Keyable, ThreadKey}; pub use lockable::{Lockable, OwnedLockable}; -- cgit v1.2.3 From a4625296cb98a68a590ae1aa78b07f190a850f37 Mon Sep 17 00:00:00 2001 From: Botahamec Date: Tue, 21 May 2024 13:39:57 -0400 Subject: fix errors --- src/key.rs | 2 +- src/lib.rs | 8 ++++---- src/rwlock.rs | 6 +++--- src/rwlock/read_lock.rs | 12 ++++++------ src/rwlock/rwlock.rs | 2 +- src/rwlock/write_lock.rs | 10 +++++----- 6 files changed, 20 insertions(+), 20 deletions(-) (limited to 'src/lib.rs') diff --git a/src/key.rs b/src/key.rs index 1cfa209..875f4be 100644 --- a/src/key.rs +++ b/src/key.rs @@ -20,7 +20,7 @@ static KEY: Lazy> = Lazy::new(ThreadLocal::new); /// The key for the current thread. /// /// Only one of these exist per thread. To get the current thread's key, call -/// [`ThreadKey::get`]. If the `ThreadKey` is dropped, it can be reobtained. +/// [`ThreadKey::get`]. If the `ThreadKey` is dropped, it can be re-obtained. pub struct ThreadKey { phantom: PhantomData<*const ()>, // implement !Send and !Sync } diff --git a/src/lib.rs b/src/lib.rs index 92b31a0..7e7930f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -22,10 +22,10 @@ //! 4. **partial allocation** //! //! This library seeks to solve **partial allocation** by requiring total -//! allocation. All of the resources a thread needs must be allocated at the -//! same time. In order to request new resources, the old resources must be -//! dropped first. Requesting multiple resources at once is atomic. You either -//! get all of the requested resources or none at all. +//! allocation. All the resources a thread needs must be allocated at the same +//! time. In order to request new resources, the old resources must be dropped +//! first. Requesting multiple resources at once is atomic. You either get all +//! the requested resources or none at all. //! //! # Performance //! diff --git a/src/rwlock.rs b/src/rwlock.rs index 40c5a6e..8f1ba8f 100644 --- a/src/rwlock.rs +++ b/src/rwlock.rs @@ -22,7 +22,7 @@ pub type ParkingRwLock = RwLock; /// A reader-writer lock /// /// This type of lock allows a number of readers or at most one writer at any -/// point in time. The write portion of thislock typically allows modification +/// point in time. The write portion of this lock typically allows modification /// of the underlying data (exclusive access) and the read portion of this lock /// typically allows for read-only access (shared access). /// @@ -57,7 +57,7 @@ pub struct RwLock { /// /// [`LockCollection`]: `crate::LockCollection` #[repr(transparent)] -pub struct ReadLock(RwLock); +pub struct ReadLock<'l, T: ?Sized, R>(&'l RwLock); /// Grants write access to an [`RwLock`] /// @@ -66,7 +66,7 @@ pub struct ReadLock(RwLock); /// /// [`LockCollection`]: `crate::LockCollection` #[repr(transparent)] -pub struct WriteLock(RwLock); +pub struct WriteLock<'l, T: ?Sized, R>(&'l RwLock); /// RAII structure that unlocks the shared read access to a [`RwLock`] pub struct RwLockReadRef<'a, T: ?Sized, R: RawRwLock>( diff --git a/src/rwlock/read_lock.rs b/src/rwlock/read_lock.rs index a8bb9be..011bd8c 100644 --- a/src/rwlock/read_lock.rs +++ b/src/rwlock/read_lock.rs @@ -28,8 +28,8 @@ impl Debug for ReadLock { } } -impl From> for ReadLock { - fn from(value: RwLock) -> Self { +impl<'l, T, R> From<&'l RwLock> for ReadLock<'l, T, R> { + fn from(value: &'l RwLock) -> Self { Self::new(value) } } @@ -40,7 +40,7 @@ impl AsRef> for ReadLock { } } -impl ReadLock { +impl<'l, T, R> ReadLock<'l, T, R> { /// Creates a new `ReadLock` which accesses the given [`RwLock`] /// /// # Examples @@ -52,12 +52,12 @@ impl ReadLock { /// let read_lock = ReadLock::new(&lock); /// ``` #[must_use] - pub const fn new(rwlock: RwLock) -> Self { + pub const fn new(rwlock: &'l RwLock) -> Self { Self(rwlock) } } -impl ReadLock { +impl<'l, T: ?Sized, R: RawRwLock> ReadLock<'l, T, R> { /// Locks the underlying [`RwLock`] with shared read access, blocking the /// current thread until it can be acquired. pub fn lock<'s, 'key: 's, Key: Keyable + 'key>( @@ -82,7 +82,7 @@ impl ReadLock { self.0.try_read_no_key() } - /// Immediately drops the guard, and consequentlyreleases the shared lock + /// Immediately drops the guard, and consequently releases the shared lock /// on the underlying [`RwLock`]. pub fn unlock<'key, Key: Keyable + 'key>(guard: RwLockReadGuard<'_, 'key, T, Key, R>) -> Key { RwLock::unlock_read(guard) diff --git a/src/rwlock/rwlock.rs b/src/rwlock/rwlock.rs index d16befe..7070b0e 100644 --- a/src/rwlock/rwlock.rs +++ b/src/rwlock/rwlock.rs @@ -254,7 +254,7 @@ impl RwLock { /// Attempts to lock this `RwLock` with exclusive write access. /// /// This function does not block. If the lock could not be acquired at this - /// time, then `None` is returned. Otherwise an RAII guard is returned + /// time, then `None` is returned. Otherwise, an RAII guard is returned /// which will release the lock when it is dropped. /// /// This function does not provide any guarantees with respect to the diff --git a/src/rwlock/write_lock.rs b/src/rwlock/write_lock.rs index a344125..1f7112a 100644 --- a/src/rwlock/write_lock.rs +++ b/src/rwlock/write_lock.rs @@ -21,15 +21,15 @@ impl Debug for WriteLock { } } - f.debug_struct("ReadLock") + f.debug_struct("WriteLock") .field("data", &LockedPlaceholder) .finish() } } } -impl From> for WriteLock { - fn from(value: RwLock) -> Self { +impl<'l, T, R> From<&'l RwLock> for WriteLock<'l, T, R> { + fn from(value: &'l RwLock) -> Self { Self::new(value) } } @@ -40,7 +40,7 @@ impl AsRef> for WriteLock { } } -impl WriteLock { +impl<'l, T, R> WriteLock<'l, T, R> { /// Creates a new `WriteLock` which accesses the given [`RwLock`] /// /// # Examples @@ -52,7 +52,7 @@ impl WriteLock { /// let write_lock = WriteLock::new(&lock); /// ``` #[must_use] - pub const fn new(rwlock: RwLock) -> Self { + pub const fn new(rwlock: &'l RwLock) -> Self { Self(rwlock) } } -- cgit v1.2.3 From 86610b631c20832d160c1a38181080232a05b508 Mon Sep 17 00:00:00 2001 From: Botahamec Date: Tue, 21 May 2024 19:17:11 -0400 Subject: Sharable API --- examples/dining_philosophers.rs | 3 +- examples/double_mutex.rs | 3 +- src/collection.rs | 19 ++- src/collection/boxed.rs | 60 ++++++++ src/collection/boxed_collection.rs | 60 -------- src/collection/guard.rs | 8 +- src/collection/owned.rs | 158 +++++++++++++++++++++ src/collection/owned_collection.rs | 65 --------- src/collection/ref.rs | 267 ++++++++++++++++++++++++++++++++++++ src/collection/ref_collection.rs | 244 --------------------------------- src/collection/retry.rs | 271 +++++++++++++++++++++++++++++++++++++ src/collection/retry_collection.rs | 138 ------------------- src/lib.rs | 3 +- src/lockable.rs | 235 ++++++++++++++++++++++++++++++++ 14 files changed, 1009 insertions(+), 525 deletions(-) create mode 100644 src/collection/boxed.rs delete mode 100644 src/collection/boxed_collection.rs create mode 100644 src/collection/owned.rs delete mode 100644 src/collection/owned_collection.rs create mode 100644 src/collection/ref.rs delete mode 100644 src/collection/ref_collection.rs create mode 100644 src/collection/retry.rs delete mode 100644 src/collection/retry_collection.rs (limited to 'src/lib.rs') diff --git a/examples/dining_philosophers.rs b/examples/dining_philosophers.rs index 1340564..70826ba 100644 --- a/examples/dining_philosophers.rs +++ b/examples/dining_philosophers.rs @@ -1,5 +1,6 @@ use std::{thread, time::Duration}; +use happylock::collection::RetryingLockCollection; use happylock::{collection::RefLockCollection, Mutex, ThreadKey}; static PHILOSOPHERS: [Philosopher; 5] = [ @@ -51,7 +52,7 @@ impl Philosopher { // safety: no philosopher asks for the same fork twice let forks = [&FORKS[self.left], &FORKS[self.right]]; - let forks = unsafe { RefLockCollection::new_unchecked(&forks) }; + let forks = unsafe { RetryingLockCollection::new_unchecked(&forks) }; let forks = forks.lock(key); println!("{} is eating...", self.name); thread::sleep(Duration::from_secs(1)); diff --git a/examples/double_mutex.rs b/examples/double_mutex.rs index e2b08df..e9a9c77 100644 --- a/examples/double_mutex.rs +++ b/examples/double_mutex.rs @@ -1,5 +1,6 @@ use std::thread; +use happylock::collection::RetryingLockCollection; use happylock::{collection::RefLockCollection, Mutex, ThreadKey}; const N: usize = 10; @@ -11,7 +12,7 @@ fn main() { for _ in 0..N { let th = thread::spawn(move || { let key = ThreadKey::get().unwrap(); - let lock = RefLockCollection::new(&DATA); + let lock = RetryingLockCollection::new_ref(&DATA); let mut guard = lock.lock(key); *guard.1 = (100 - *guard.0).to_string(); *guard.0 += 1; diff --git a/src/collection.rs b/src/collection.rs index a11d60c..6623c8a 100644 --- a/src/collection.rs +++ b/src/collection.rs @@ -1,15 +1,12 @@ use std::marker::PhantomData; -use crate::{ - key::Keyable, - lockable::{Lock, Lockable}, -}; +use crate::{key::Keyable, lockable::Lock}; -mod boxed_collection; +mod boxed; mod guard; -mod owned_collection; -mod ref_collection; -mod retry_collection; +mod owned; +mod r#ref; +mod retry; pub struct OwnedLockCollection { data: L, @@ -24,15 +21,15 @@ pub struct RefLockCollection<'a, L> { data: &'a L, } -pub struct BoxedLockCollection<'a, L>(RefLockCollection<'a, L>); +pub struct BoxedLockCollection<'a, L: 'a>(RefLockCollection<'a, L>); pub struct RetryingLockCollection { data: L, } /// A RAII guard for a generic [`Lockable`] type. -pub struct LockGuard<'g, 'key: 'g, L: Lockable + 'g, Key: Keyable + 'key> { - guard: L::Guard<'g>, +pub struct LockGuard<'key, Guard, Key: Keyable + 'key> { + guard: Guard, key: Key, _phantom: PhantomData<&'key ()>, } diff --git a/src/collection/boxed.rs b/src/collection/boxed.rs new file mode 100644 index 0000000..8b67ee9 --- /dev/null +++ b/src/collection/boxed.rs @@ -0,0 +1,60 @@ +use std::ops::{Deref, DerefMut}; + +use crate::{Lockable, OwnedLockable}; + +use super::{BoxedLockCollection, RefLockCollection}; + +impl<'a, L: 'a> Deref for BoxedLockCollection<'a, L> { + type Target = RefLockCollection<'a, L>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'a, L: 'a> DerefMut for BoxedLockCollection<'a, L> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl<'a, L: 'a> Drop for BoxedLockCollection<'a, L> { + fn drop(&mut self) { + // this was created with Box::new + let boxed = unsafe { Box::from_raw((self.0.data as *const L).cast_mut()) }; + drop(boxed); + } +} + +impl<'a, L: OwnedLockable + 'a> BoxedLockCollection<'a, L> { + #[must_use] + pub fn new(data: L) -> Self { + let boxed = Box::leak(Box::new(data)); + Self(RefLockCollection::new(boxed)) + } +} + +impl<'a, L: OwnedLockable + 'a> BoxedLockCollection<'a, L> { + #[must_use] + pub fn new_ref(data: &'a L) -> Self { + let boxed = Box::leak(Box::new(data)); + + // safety: this is a reference to an OwnedLockable, which can't + // possibly contain inner duplicates + Self(unsafe { RefLockCollection::new_unchecked(boxed) }) + } +} + +impl<'a, L: Lockable + 'a> BoxedLockCollection<'a, L> { + #[must_use] + pub unsafe fn new_unchecked(data: L) -> Self { + let boxed = Box::leak(Box::new(data)); + Self(RefLockCollection::new_unchecked(boxed)) + } + + #[must_use] + pub fn try_new(data: L) -> Option { + let boxed = Box::leak(Box::new(data)); + RefLockCollection::try_new(boxed).map(Self) + } +} diff --git a/src/collection/boxed_collection.rs b/src/collection/boxed_collection.rs deleted file mode 100644 index 1aae1e4..0000000 --- a/src/collection/boxed_collection.rs +++ /dev/null @@ -1,60 +0,0 @@ -use std::ops::{Deref, DerefMut}; - -use crate::{Lockable, OwnedLockable}; - -use super::{BoxedLockCollection, RefLockCollection}; - -impl<'a, L> Deref for BoxedLockCollection<'a, L> { - type Target = RefLockCollection<'a, L>; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl<'a, L> DerefMut for BoxedLockCollection<'a, L> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl<'a, L> Drop for BoxedLockCollection<'a, L> { - fn drop(&mut self) { - // this was created with Box::new - let boxed = unsafe { Box::from_raw((self.0.data as *const L).cast_mut()) }; - drop(boxed); - } -} - -impl<'a, L: OwnedLockable> BoxedLockCollection<'a, L> { - #[must_use] - pub fn new(data: L) -> Self { - let boxed = Box::leak(Box::new(data)); - Self(RefLockCollection::new(boxed)) - } -} - -impl<'a, L: OwnedLockable> BoxedLockCollection<'a, &'a L> { - #[must_use] - pub fn new_ref(data: &'a L) -> Self { - let boxed = Box::leak(Box::new(data)); - - // safety: this is a reference to an OwnedLockable, which can't - // possibly contain inner duplicates - Self(unsafe { RefLockCollection::new_unchecked(boxed) }) - } -} - -impl<'a, L: Lockable> BoxedLockCollection<'a, L> { - #[must_use] - pub unsafe fn new_unchecked(data: L) -> Self { - let boxed = Box::leak(Box::new(data)); - Self(RefLockCollection::new_unchecked(boxed)) - } - - #[must_use] - pub fn try_new(data: L) -> Option { - let boxed = Box::leak(Box::new(data)); - RefLockCollection::try_new(boxed).map(Self) - } -} diff --git a/src/collection/guard.rs b/src/collection/guard.rs index e3ffb21..b8561eb 100644 --- a/src/collection/guard.rs +++ b/src/collection/guard.rs @@ -1,18 +1,18 @@ use std::ops::{Deref, DerefMut}; -use crate::{key::Keyable, Lockable}; +use crate::key::Keyable; use super::LockGuard; -impl<'a, 'key: 'a, L: Lockable + 'a, Key: Keyable> Deref for LockGuard<'a, 'key, L, Key> { - type Target = L::Guard<'a>; +impl<'key, Guard, Key: Keyable> Deref for LockGuard<'key, Guard, Key> { + type Target = Guard; fn deref(&self) -> &Self::Target { &self.guard } } -impl<'a, 'key: 'a, L: Lockable + 'a, Key: Keyable> DerefMut for LockGuard<'a, 'key, L, Key> { +impl<'key, Guard, Key: Keyable> DerefMut for LockGuard<'key, Guard, Key> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.guard } diff --git a/src/collection/owned.rs b/src/collection/owned.rs new file mode 100644 index 0000000..3415ac4 --- /dev/null +++ b/src/collection/owned.rs @@ -0,0 +1,158 @@ +use std::marker::PhantomData; + +use crate::{lockable::Lock, Keyable, Lockable, OwnedLockable, Sharable}; + +use super::{LockGuard, OwnedLockCollection}; + +fn get_locks(data: &L) -> Vec<&dyn Lock> { + let mut locks = Vec::new(); + data.get_ptrs(&mut locks); + locks +} + +unsafe impl Lockable for OwnedLockCollection { + type Guard<'g> = L::Guard<'g> where Self: 'g; + + type ReadGuard<'g> = L::ReadGuard<'g> where Self: 'g; + + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + self.data.get_ptrs(ptrs) + } + + unsafe fn guard(&self) -> Self::Guard<'_> { + self.data.guard() + } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + self.data.read_guard() + } +} + +unsafe impl Sharable for OwnedLockCollection {} + +unsafe impl OwnedLockable for OwnedLockCollection {} + +impl OwnedLockCollection { + #[must_use] + pub const fn new(data: L) -> Self { + Self { data } + } + + pub fn lock<'g, 'key, Key: Keyable + 'key>( + &'g self, + key: Key, + ) -> LockGuard<'key, L::Guard<'g>, Key> { + let locks = get_locks(&self.data); + for lock in locks { + // safety: we have the thread key, and these locks happen in a + // predetermined order + unsafe { lock.lock() }; + } + + // safety: we've locked all of this already + let guard = unsafe { self.data.guard() }; + LockGuard { + guard, + key, + _phantom: PhantomData, + } + } + + pub fn try_lock<'g, 'key: 'g, Key: Keyable + 'key>( + &'g self, + key: Key, + ) -> Option, Key>> { + let locks = get_locks(&self.data); + let guard = unsafe { + for (i, lock) in locks.iter().enumerate() { + // safety: we have the thread key + let success = lock.try_lock(); + + if !success { + for lock in &locks[0..i] { + // safety: this lock was already acquired + lock.unlock(); + } + return None; + } + } + + // safety: we've acquired the locks + self.data.guard() + }; + + Some(LockGuard { + guard, + key, + _phantom: PhantomData, + }) + } + + #[allow(clippy::missing_const_for_fn)] + pub fn unlock<'g, 'key: 'g, Key: Keyable + 'key>( + guard: LockGuard<'key, L::Guard<'g>, Key>, + ) -> Key { + drop(guard.guard); + guard.key + } +} + +impl OwnedLockCollection { + pub fn read<'g, 'key, Key: Keyable + 'key>( + &'g self, + key: Key, + ) -> LockGuard<'key, L::ReadGuard<'g>, Key> { + let locks = get_locks(&self.data); + for lock in locks { + // safety: we have the thread key, and these locks happen in a + // predetermined order + unsafe { lock.read() }; + } + + // safety: we've locked all of this already + let guard = unsafe { self.data.read_guard() }; + LockGuard { + guard, + key, + _phantom: PhantomData, + } + } + + pub fn try_read<'g, 'key: 'g, Key: Keyable + 'key>( + &'g self, + key: Key, + ) -> Option, Key>> { + let locks = get_locks(&self.data); + let guard = unsafe { + for (i, lock) in locks.iter().enumerate() { + // safety: we have the thread key + let success = lock.try_read(); + + if !success { + for lock in &locks[0..i] { + // safety: this lock was already acquired + lock.unlock(); + } + return None; + } + } + + // safety: we've acquired the locks + self.data.read_guard() + }; + + Some(LockGuard { + guard, + key, + _phantom: PhantomData, + }) + } + + #[allow(clippy::missing_const_for_fn)] + pub fn unlock_read<'g, 'key: 'g, Key: Keyable + 'key>( + guard: LockGuard<'key, L::ReadGuard<'g>, Key>, + ) -> Key { + drop(guard.guard); + guard.key + } +} diff --git a/src/collection/owned_collection.rs b/src/collection/owned_collection.rs deleted file mode 100644 index ea8f2f2..0000000 --- a/src/collection/owned_collection.rs +++ /dev/null @@ -1,65 +0,0 @@ -use std::marker::PhantomData; - -use crate::{lockable::Lock, Keyable, Lockable, OwnedLockable}; - -use super::{LockGuard, OwnedLockCollection}; - -fn get_locks(data: &L) -> Vec<&dyn Lock> { - let mut locks = Vec::new(); - data.get_ptrs(&mut locks); - locks -} - -impl OwnedLockCollection { - #[must_use] - pub const fn new(data: L) -> Self { - Self { data } - } - - pub fn lock<'a, 'key, Key: Keyable + 'key>(&'a self, key: Key) -> LockGuard<'a, 'key, L, Key> { - let locks = get_locks(&self.data); - for lock in locks { - // safety: we have the thread key, and these locks happen in a - // predetermined order - unsafe { lock.lock() }; - } - - // safety: we've locked all of this already - let guard = unsafe { self.data.guard() }; - LockGuard { - guard, - key, - _phantom: PhantomData, - } - } - - pub fn try_lock<'a, 'key: 'a, Key: Keyable + 'key>( - &'a self, - key: Key, - ) -> Option> { - let locks = get_locks(&self.data); - let guard = unsafe { - for (i, lock) in locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_lock(); - - if !success { - for lock in &locks[0..i] { - // safety: this lock was already acquired - lock.unlock(); - } - return None; - } - } - - // safety: we've acquired the locks - self.data.guard() - }; - - Some(LockGuard { - guard, - key, - _phantom: PhantomData, - }) - } -} diff --git a/src/collection/ref.rs b/src/collection/ref.rs new file mode 100644 index 0000000..9fe34c9 --- /dev/null +++ b/src/collection/ref.rs @@ -0,0 +1,267 @@ +use std::marker::PhantomData; + +use crate::{key::Keyable, lockable::Lock, Lockable, OwnedLockable, Sharable}; + +use super::{LockGuard, RefLockCollection}; + +#[must_use] +fn get_locks(data: &L) -> Vec<&dyn Lock> { + let mut locks = Vec::new(); + data.get_ptrs(&mut locks); + locks.sort_by_key(|lock| std::ptr::from_ref(*lock)); + locks +} + +/// returns `true` if the sorted list contains a duplicate +#[must_use] +fn contains_duplicates(l: &[&dyn Lock]) -> bool { + l.windows(2) + .any(|window| std::ptr::eq(window[0], window[1])) +} + +impl<'a, L: Lockable> AsRef for RefLockCollection<'a, L> { + fn as_ref(&self) -> &L { + self.data + } +} + +impl<'a, L: Lockable> AsRef for RefLockCollection<'a, L> { + fn as_ref(&self) -> &Self { + self + } +} + +impl<'a, L: Lockable> AsMut for RefLockCollection<'a, L> { + fn as_mut(&mut self) -> &mut Self { + self + } +} + +impl<'a, L> IntoIterator for &'a RefLockCollection<'a, L> +where + &'a L: IntoIterator, +{ + type Item = <&'a L as IntoIterator>::Item; + type IntoIter = <&'a L as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.data.into_iter() + } +} + +unsafe impl<'c, L: Lockable> Lockable for RefLockCollection<'c, L> { + type Guard<'g> = L::Guard<'g> where Self: 'g; + + type ReadGuard<'g> = L::ReadGuard<'g> where Self: 'g; + + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + ptrs.extend_from_slice(&self.locks); + } + + unsafe fn guard(&self) -> Self::Guard<'_> { + self.data.guard() + } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + self.data.read_guard() + } +} + +unsafe impl<'c, L: Sharable> Sharable for RefLockCollection<'c, L> {} + +impl<'a, L: OwnedLockable> RefLockCollection<'a, L> { + /// Creates a new collection of owned locks. + /// + /// Because the locks are owned, there's no need to do any checks for + /// duplicate values. + /// + /// # Examples + /// + /// ``` + /// use happylock::{LockCollection, Mutex}; + /// + /// let data = (Mutex::new(0), Mutex::new("")); + /// let lock = LockCollection::new(&data); + /// ``` + #[must_use] + pub fn new(data: &'a L) -> RefLockCollection { + RefLockCollection { + locks: get_locks(data), + data, + } + } +} + +impl<'a, L: Lockable> RefLockCollection<'a, L> { + /// Creates a new collections of locks. + /// + /// # Safety + /// + /// This results in undefined behavior if any locks are presented twice + /// within this collection. + /// + /// # Examples + /// + /// ``` + /// use happylock::{LockCollection, Mutex}; + /// + /// let data1 = Mutex::new(0); + /// let data2 = Mutex::new(""); + /// + /// // safety: data1 and data2 refer to distinct mutexes + /// let lock = unsafe { LockCollection::new_unchecked((&data1, &data2)) }; + /// ``` + #[must_use] + pub unsafe fn new_unchecked(data: &'a L) -> Self { + Self { + data, + locks: get_locks(data), + } + } + + /// Creates a new collection of locks. + /// + /// This returns `None` if any locks are found twice in the given + /// collection. + /// + /// # Examples + /// + /// ``` + /// use happylock::{LockCollection, Mutex}; + /// + /// let data1 = Mutex::new(0); + /// let data2 = Mutex::new(""); + /// + /// // data1 and data2 refer to distinct mutexes, so this won't panic + /// let lock = LockCollection::try_new((&data1, &data2)).unwrap(); + /// ``` + #[must_use] + pub fn try_new(data: &'a L) -> Option { + let locks = get_locks(data); + if contains_duplicates(&locks) { + return None; + } + + Some(Self { locks, data }) + } + + /// Locks the collection + /// + /// This function returns a guard that can be used to access the underlying + /// data. When the guard is dropped, the locks in the collection are also + /// dropped. + /// + /// # Examples + /// + /// ``` + /// use happylock::{LockCollection, Mutex, ThreadKey}; + /// + /// let key = ThreadKey::get().unwrap(); + /// let lock = LockCollection::new((Mutex::new(0), Mutex::new(""))); + /// + /// let mut guard = lock.lock(key); + /// *guard.0 += 1; + /// *guard.1 = "1"; + /// ``` + pub fn lock<'key: 'a, Key: Keyable + 'key>( + &'a self, + key: Key, + ) -> LockGuard<'key, L::Guard<'a>, Key> { + for lock in &self.locks { + // safety: we have the thread key + unsafe { lock.lock() }; + } + + LockGuard { + // safety: we've already acquired the lock + guard: unsafe { self.data.guard() }, + key, + _phantom: PhantomData, + } + } + + /// Attempts to lock the without blocking. + /// + /// If successful, this method returns a guard that can be used to access + /// the data, and unlocks the data when it is dropped. Otherwise, `None` is + /// returned. + /// + /// # Examples + /// + /// ``` + /// use happylock::{LockCollection, Mutex, ThreadKey}; + /// + /// let key = ThreadKey::get().unwrap(); + /// let lock = LockCollection::new((Mutex::new(0), Mutex::new(""))); + /// + /// match lock.try_lock(key) { + /// Some(mut guard) => { + /// *guard.0 += 1; + /// *guard.1 = "1"; + /// }, + /// None => unreachable!(), + /// }; + /// + /// ``` + pub fn try_lock<'key: 'a, Key: Keyable + 'key>( + &'a self, + key: Key, + ) -> Option, Key>> { + let guard = unsafe { + for (i, lock) in self.locks.iter().enumerate() { + // safety: we have the thread key + let success = lock.try_lock(); + + if !success { + for lock in &self.locks[0..i] { + // safety: this lock was already acquired + lock.unlock(); + } + return None; + } + } + + // safety: we've acquired the locks + self.data.guard() + }; + + Some(LockGuard { + guard, + key, + _phantom: PhantomData, + }) + } + + /// Unlocks the underlying lockable data type, returning the key that's + /// associated with it. + /// + /// # Examples + /// + /// ``` + /// use happylock::{LockCollection, Mutex, ThreadKey}; + /// + /// let key = ThreadKey::get().unwrap(); + /// let lock = LockCollection::new((Mutex::new(0), Mutex::new(""))); + /// + /// let mut guard = lock.lock(key); + /// *guard.0 += 1; + /// *guard.1 = "1"; + /// let key = LockCollection::unlock(guard); + /// ``` + #[allow(clippy::missing_const_for_fn)] + pub fn unlock<'key: 'a, Key: Keyable + 'key>(guard: LockGuard<'key, L::Guard<'a>, Key>) -> Key { + drop(guard.guard); + guard.key + } +} + +impl<'a, L: 'a> RefLockCollection<'a, L> +where + &'a L: IntoIterator, +{ + /// Returns an iterator over references to each value in the collection. + #[must_use] + pub fn iter(&'a self) -> <&'a L as IntoIterator>::IntoIter { + self.into_iter() + } +} diff --git a/src/collection/ref_collection.rs b/src/collection/ref_collection.rs deleted file mode 100644 index 41f6b16..0000000 --- a/src/collection/ref_collection.rs +++ /dev/null @@ -1,244 +0,0 @@ -use std::marker::PhantomData; - -use crate::{key::Keyable, lockable::Lock, Lockable, OwnedLockable}; - -use super::{LockGuard, RefLockCollection}; - -#[must_use] -fn get_locks(data: &L) -> Vec<&dyn Lock> { - let mut locks = Vec::new(); - data.get_ptrs(&mut locks); - locks.sort_by_key(|lock| std::ptr::from_ref(*lock)); - locks -} - -/// returns `true` if the sorted list contains a duplicate -#[must_use] -fn contains_duplicates(l: &[&dyn Lock]) -> bool { - l.windows(2) - .any(|window| std::ptr::eq(window[0], window[1])) -} - -impl<'a, L: Lockable> AsRef for RefLockCollection<'a, L> { - fn as_ref(&self) -> &L { - self.data - } -} - -impl<'a, L: Lockable> AsRef for RefLockCollection<'a, L> { - fn as_ref(&self) -> &Self { - self - } -} - -impl<'a, L: Lockable> AsMut for RefLockCollection<'a, L> { - fn as_mut(&mut self) -> &mut Self { - self - } -} - -impl<'a, L> IntoIterator for &'a RefLockCollection<'a, L> -where - &'a L: IntoIterator, -{ - type Item = <&'a L as IntoIterator>::Item; - type IntoIter = <&'a L as IntoIterator>::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.data.into_iter() - } -} - -impl<'a, L: OwnedLockable> RefLockCollection<'a, L> { - /// Creates a new collection of owned locks. - /// - /// Because the locks are owned, there's no need to do any checks for - /// duplicate values. - /// - /// # Examples - /// - /// ``` - /// use happylock::{LockCollection, Mutex}; - /// - /// let data = (Mutex::new(0), Mutex::new("")); - /// let lock = LockCollection::new(&data); - /// ``` - #[must_use] - pub fn new(data: &'a L) -> RefLockCollection { - RefLockCollection { - locks: get_locks(data), - data, - } - } -} - -impl<'a, L: Lockable> RefLockCollection<'a, L> { - /// Creates a new collections of locks. - /// - /// # Safety - /// - /// This results in undefined behavior if any locks are presented twice - /// within this collection. - /// - /// # Examples - /// - /// ``` - /// use happylock::{LockCollection, Mutex}; - /// - /// let data1 = Mutex::new(0); - /// let data2 = Mutex::new(""); - /// - /// // safety: data1 and data2 refer to distinct mutexes - /// let lock = unsafe { LockCollection::new_unchecked((&data1, &data2)) }; - /// ``` - #[must_use] - pub unsafe fn new_unchecked(data: &'a L) -> Self { - Self { - data, - locks: get_locks(data), - } - } - - /// Creates a new collection of locks. - /// - /// This returns `None` if any locks are found twice in the given - /// collection. - /// - /// # Examples - /// - /// ``` - /// use happylock::{LockCollection, Mutex}; - /// - /// let data1 = Mutex::new(0); - /// let data2 = Mutex::new(""); - /// - /// // data1 and data2 refer to distinct mutexes, so this won't panic - /// let lock = LockCollection::try_new((&data1, &data2)).unwrap(); - /// ``` - #[must_use] - pub fn try_new(data: &'a L) -> Option { - let locks = get_locks(data); - if contains_duplicates(&locks) { - return None; - } - - Some(Self { locks, data }) - } - - /// Locks the collection - /// - /// This function returns a guard that can be used to access the underlying - /// data. When the guard is dropped, the locks in the collection are also - /// dropped. - /// - /// # Examples - /// - /// ``` - /// use happylock::{LockCollection, Mutex, ThreadKey}; - /// - /// let key = ThreadKey::get().unwrap(); - /// let lock = LockCollection::new((Mutex::new(0), Mutex::new(""))); - /// - /// let mut guard = lock.lock(key); - /// *guard.0 += 1; - /// *guard.1 = "1"; - /// ``` - pub fn lock<'key: 'a, Key: Keyable + 'key>(&'a self, key: Key) -> LockGuard<'a, 'key, L, Key> { - for lock in &self.locks { - // safety: we have the thread key - unsafe { lock.lock() }; - } - - LockGuard { - // safety: we've already acquired the lock - guard: unsafe { self.data.guard() }, - key, - _phantom: PhantomData, - } - } - - /// Attempts to lock the without blocking. - /// - /// If successful, this method returns a guard that can be used to access - /// the data, and unlocks the data when it is dropped. Otherwise, `None` is - /// returned. - /// - /// # Examples - /// - /// ``` - /// use happylock::{LockCollection, Mutex, ThreadKey}; - /// - /// let key = ThreadKey::get().unwrap(); - /// let lock = LockCollection::new((Mutex::new(0), Mutex::new(""))); - /// - /// match lock.try_lock(key) { - /// Some(mut guard) => { - /// *guard.0 += 1; - /// *guard.1 = "1"; - /// }, - /// None => unreachable!(), - /// }; - /// - /// ``` - pub fn try_lock<'key: 'a, Key: Keyable + 'key>( - &'a self, - key: Key, - ) -> Option> { - let guard = unsafe { - for (i, lock) in self.locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_lock(); - - if !success { - for lock in &self.locks[0..i] { - // safety: this lock was already acquired - lock.unlock(); - } - return None; - } - } - - // safety: we've acquired the locks - self.data.guard() - }; - - Some(LockGuard { - guard, - key, - _phantom: PhantomData, - }) - } - - /// Unlocks the underlying lockable data type, returning the key that's - /// associated with it. - /// - /// # Examples - /// - /// ``` - /// use happylock::{LockCollection, Mutex, ThreadKey}; - /// - /// let key = ThreadKey::get().unwrap(); - /// let lock = LockCollection::new((Mutex::new(0), Mutex::new(""))); - /// - /// let mut guard = lock.lock(key); - /// *guard.0 += 1; - /// *guard.1 = "1"; - /// let key = LockCollection::unlock(guard); - /// ``` - #[allow(clippy::missing_const_for_fn)] - pub fn unlock<'key: 'a, Key: Keyable + 'key>(guard: LockGuard<'a, 'key, L, Key>) -> Key { - drop(guard.guard); - guard.key - } -} - -impl<'a, L: 'a> RefLockCollection<'a, L> -where - &'a L: IntoIterator, -{ - /// Returns an iterator over references to each value in the collection. - #[must_use] - pub fn iter(&'a self) -> <&'a L as IntoIterator>::IntoIter { - self.into_iter() - } -} diff --git a/src/collection/retry.rs b/src/collection/retry.rs new file mode 100644 index 0000000..3000f8b --- /dev/null +++ b/src/collection/retry.rs @@ -0,0 +1,271 @@ +use crate::{lockable::Lock, Keyable, Lockable, OwnedLockable, Sharable}; +use std::collections::HashSet; +use std::marker::PhantomData; + +use super::{LockGuard, RetryingLockCollection}; + +fn contains_duplicates(data: L) -> bool { + let mut locks = Vec::new(); + data.get_ptrs(&mut locks); + let locks = locks.into_iter().map(|l| l as *const dyn Lock); + + let mut locks_set = HashSet::new(); + for lock in locks { + if !locks_set.insert(lock) { + return true; + } + } + + false +} + +unsafe impl Lockable for RetryingLockCollection { + type Guard<'g> = L::Guard<'g> where Self: 'g; + + type ReadGuard<'g> = L::ReadGuard<'g> where Self: 'g; + + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + self.data.get_ptrs(ptrs) + } + + unsafe fn guard(&self) -> Self::Guard<'_> { + self.data.guard() + } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + self.data.read_guard() + } +} + +unsafe impl Sharable for RetryingLockCollection {} + +unsafe impl OwnedLockable for RetryingLockCollection {} + +impl RetryingLockCollection { + #[must_use] + pub const fn new(data: L) -> Self { + Self { data } + } +} + +impl<'a, L: OwnedLockable> RetryingLockCollection<&'a L> { + #[must_use] + pub const fn new_ref(data: &'a L) -> Self { + Self { data } + } +} + +impl RetryingLockCollection { + #[must_use] + pub const unsafe fn new_unchecked(data: L) -> Self { + Self { data } + } + + pub fn try_new(data: L) -> Option { + contains_duplicates(&data).then_some(Self { data }) + } + + pub fn lock<'g, 'key: 'g, Key: Keyable + 'key>( + &'g self, + key: Key, + ) -> LockGuard<'key, L::Guard<'g>, Key> { + let mut first_index = 0; + let mut locks = Vec::new(); + self.data.get_ptrs(&mut locks); + + if locks.is_empty() { + return LockGuard { + // safety: there's no data being returned + guard: unsafe { self.data.guard() }, + key, + _phantom: PhantomData, + }; + } + + let guard = unsafe { + 'outer: loop { + // safety: we have the thread key + locks[first_index].lock(); + for (i, lock) in locks.iter().enumerate() { + if i == first_index { + continue; + } + + // safety: we have the thread key + if !lock.try_lock() { + for lock in locks.iter().take(i) { + // safety: we already locked all of these + lock.unlock(); + } + + if first_index >= i { + // safety: this is already locked and can't be unlocked + // by the previous loop + locks[first_index].unlock(); + } + + first_index = i; + continue 'outer; + } + } + + // safety: we locked all the data + break self.data.guard(); + } + }; + + LockGuard { + guard, + key, + _phantom: PhantomData, + } + } + + pub fn try_lock<'g, 'key: 'g, Key: Keyable + 'key>( + &'g self, + key: Key, + ) -> Option, Key>> { + let mut locks = Vec::new(); + self.data.get_ptrs(&mut locks); + + if locks.is_empty() { + return Some(LockGuard { + // safety: there's no data being returned + guard: unsafe { self.data.guard() }, + key, + _phantom: PhantomData, + }); + } + + let guard = unsafe { + for (i, lock) in locks.iter().enumerate() { + // safety: we have the thread key + if !lock.try_lock() { + for lock in locks.iter().take(i) { + // safety: we already locked all of these + lock.unlock(); + } + return None; + } + } + + // safety: we locked all the data + self.data.guard() + }; + + Some(LockGuard { + guard, + key, + _phantom: PhantomData, + }) + } + + pub fn unlock<'key, Key: Keyable + 'key>(guard: LockGuard<'key, L::Guard<'_>, Key>) -> Key { + drop(guard.guard); + guard.key + } +} + +impl RetryingLockCollection { + pub fn read<'g, 'key: 'g, Key: Keyable + 'key>( + &'g self, + key: Key, + ) -> LockGuard<'key, L::ReadGuard<'g>, Key> { + let mut first_index = 0; + let mut locks = Vec::new(); + self.data.get_ptrs(&mut locks); + + if locks.is_empty() { + return LockGuard { + // safety: there's no data being returned + guard: unsafe { self.data.read_guard() }, + key, + _phantom: PhantomData, + }; + } + + let guard = unsafe { + 'outer: loop { + // safety: we have the thread key + locks[first_index].read(); + for (i, lock) in locks.iter().enumerate() { + if i == first_index { + continue; + } + + // safety: we have the thread key + if !lock.try_read() { + for lock in locks.iter().take(i) { + // safety: we already locked all of these + lock.unlock_read(); + } + + if first_index >= i { + // safety: this is already locked and can't be unlocked + // by the previous loop + locks[first_index].unlock_read(); + } + + first_index = i; + continue 'outer; + } + } + + // safety: we locked all the data + break self.data.read_guard(); + } + }; + + LockGuard { + guard, + key, + _phantom: PhantomData, + } + } + + pub fn try_read<'g, 'key: 'g, Key: Keyable + 'key>( + &'g self, + key: Key, + ) -> Option, Key>> { + let mut locks = Vec::new(); + self.data.get_ptrs(&mut locks); + + if locks.is_empty() { + return Some(LockGuard { + // safety: there's no data being returned + guard: unsafe { self.data.read_guard() }, + key, + _phantom: PhantomData, + }); + } + + let guard = unsafe { + for (i, lock) in locks.iter().enumerate() { + // safety: we have the thread key + if !lock.try_read() { + for lock in locks.iter().take(i) { + // safety: we already locked all of these + lock.unlock_read(); + } + return None; + } + } + + // safety: we locked all the data + self.data.read_guard() + }; + + Some(LockGuard { + guard, + key, + _phantom: PhantomData, + }) + } + + pub fn unlock_read<'key, Key: Keyable + 'key>( + guard: LockGuard<'key, L::ReadGuard<'_>, Key>, + ) -> Key { + drop(guard.guard); + guard.key + } +} diff --git a/src/collection/retry_collection.rs b/src/collection/retry_collection.rs deleted file mode 100644 index 73f9e18..0000000 --- a/src/collection/retry_collection.rs +++ /dev/null @@ -1,138 +0,0 @@ -use std::marker::PhantomData; - -use crate::{lockable::Lock, Keyable, Lockable, OwnedLockable}; - -use super::{LockGuard, RetryingLockCollection}; - -fn contains_duplicates(data: L) -> bool { - let mut locks = Vec::new(); - data.get_ptrs(&mut locks); - let mut locks: Vec<_> = locks.into_iter().map(|l| l as *const dyn Lock).collect(); - locks.sort_unstable(); - locks.windows(2).any(|w| std::ptr::addr_eq(w[0], w[1])) -} - -impl RetryingLockCollection { - #[must_use] - pub const fn new(data: L) -> Self { - Self { data } - } -} - -impl<'a, L: OwnedLockable> RetryingLockCollection<&'a L> { - #[must_use] - pub const fn new_ref(data: &'a L) -> Self { - Self { data } - } -} - -impl RetryingLockCollection { - #[must_use] - pub const unsafe fn new_unchecked(data: L) -> Self { - Self { data } - } - - pub fn try_new(data: L) -> Option { - contains_duplicates(&data).then_some(Self { data }) - } - - pub fn lock<'a, 'key: 'a, Key: Keyable + 'key>( - &'a self, - key: Key, - ) -> LockGuard<'a, 'key, L, Key> { - let mut first_index = 0; - let mut locks = Vec::new(); - self.data.get_ptrs(&mut locks); - - if locks.is_empty() { - return LockGuard { - // safety: there's no data being returned - guard: unsafe { self.data.guard() }, - key, - _phantom: PhantomData, - }; - } - - let guard = unsafe { - 'outer: loop { - // safety: we have the thread key - locks[first_index].lock(); - for (i, lock) in locks.iter().enumerate() { - if i == first_index { - continue; - } - - // safety: we have the thread key - if !lock.try_lock() { - for lock in locks.iter().take(i) { - // safety: we already locked all of these - lock.unlock(); - } - - if first_index >= i { - // safety: this is already locked and can't be unlocked - // by the previous loop - locks[first_index].unlock(); - } - - first_index = i; - continue 'outer; - } - } - - // safety: we locked all the data - break self.data.guard(); - } - }; - - LockGuard { - guard, - key, - _phantom: PhantomData, - } - } - - pub fn try_lock<'a, 'key: 'a, Key: Keyable + 'key>( - &'a self, - key: Key, - ) -> Option> { - let mut locks = Vec::new(); - self.data.get_ptrs(&mut locks); - - if locks.is_empty() { - return Some(LockGuard { - // safety: there's no data being returned - guard: unsafe { self.data.guard() }, - key, - _phantom: PhantomData, - }); - } - - let guard = unsafe { - for (i, lock) in locks.iter().enumerate() { - // safety: we have the thread key - if !lock.try_lock() { - for lock in locks.iter().take(i) { - // safety: we already locked all of these - lock.unlock(); - } - return None; - } - } - - // safety: we locked all the data - self.data.guard() - }; - - Some(LockGuard { - guard, - key, - _phantom: PhantomData, - }) - } - - pub fn unlock<'key, Key: Keyable + 'key>(guard: LockGuard<'_, 'key, L, Key>) -> Key { - drop(guard.guard); - guard.key - } -} diff --git a/src/lib.rs b/src/lib.rs index 7e7930f..668f3db 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -113,8 +113,9 @@ pub mod collection; pub mod mutex; pub mod rwlock; +pub use collection::BoxedLockCollection as LockCollection; pub use key::{Keyable, ThreadKey}; -pub use lockable::{Lockable, OwnedLockable}; +pub use lockable::{Lockable, OwnedLockable, Sharable}; #[cfg(feature = "spin")] pub use mutex::SpinLock; diff --git a/src/lockable.rs b/src/lockable.rs index 9b3a4e4..23aeb4c 100644 --- a/src/lockable.rs +++ b/src/lockable.rs @@ -45,6 +45,12 @@ pub unsafe trait Lock: Send + Sync { /// /// It is undefined behavior to use this if the lock is not acquired unsafe fn unlock(&self); + + unsafe fn read(&self); + + unsafe fn try_read(&self) -> bool; + + unsafe fn unlock_read(&self); } pub unsafe trait Lockable { @@ -53,12 +59,21 @@ pub unsafe trait Lockable { where Self: 'g; + type ReadGuard<'g> + where + Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>); #[must_use] unsafe fn guard(&self) -> Self::Guard<'_>; + + #[must_use] + unsafe fn read_guard(&self) -> Self::ReadGuard<'_>; } +pub unsafe trait Sharable: Lockable {} + /// A type that may be locked and unlocked, and is known to be the only valid /// instance of the lock. /// @@ -80,6 +95,18 @@ unsafe impl Lock for Mutex { unsafe fn unlock(&self) { self.raw().unlock() } + + unsafe fn read(&self) { + self.raw().lock() + } + + unsafe fn try_read(&self) -> bool { + self.raw().try_lock() + } + + unsafe fn unlock_read(&self) { + self.raw().unlock() + } } unsafe impl Lock for RwLock { @@ -94,10 +121,23 @@ unsafe impl Lock for RwLock { unsafe fn unlock(&self) { self.raw().unlock_exclusive() } + + unsafe fn read(&self) { + self.raw().lock_shared() + } + + unsafe fn try_read(&self) -> bool { + self.raw().try_lock_shared() + } + + unsafe fn unlock_read(&self) { + self.raw().unlock_shared() + } } unsafe impl Lockable for Mutex { type Guard<'g> = MutexRef<'g, T, R> where Self: 'g; + type ReadGuard<'g> = MutexRef<'g, T, R> where Self: 'g; fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { ptrs.push(self); @@ -106,11 +146,17 @@ unsafe impl Lockable for Mutex { unsafe fn guard(&self) -> Self::Guard<'_> { MutexRef::new(self) } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + MutexRef::new(self) + } } unsafe impl Lockable for RwLock { type Guard<'g> = RwLockWriteRef<'g, T, R> where Self: 'g; + type ReadGuard<'g> = RwLockReadRef<'g, T, R> where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { ptrs.push(self); } @@ -118,8 +164,14 @@ unsafe impl Lockable for RwLock { unsafe fn guard(&self) -> Self::Guard<'_> { RwLockWriteRef::new(self) } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + RwLockReadRef::new(self) + } } +unsafe impl Sharable for RwLock {} + unsafe impl OwnedLockable for Mutex {} unsafe impl OwnedLockable for RwLock {} @@ -127,6 +179,8 @@ unsafe impl OwnedLockable for RwLock unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Lockable for ReadLock<'l, T, R> { type Guard<'g> = RwLockReadRef<'g, T, R> where Self: 'g; + type ReadGuard<'g> = RwLockReadRef<'g, T, R> where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { ptrs.push(self.as_ref()); } @@ -134,11 +188,17 @@ unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Lockable for ReadLock<'l, T unsafe fn guard(&self) -> Self::Guard<'_> { RwLockReadRef::new(self.as_ref()) } + + unsafe fn read_guard(&self) -> Self::Guard<'_> { + RwLockReadRef::new(self.as_ref()) + } } unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Lockable for WriteLock<'l, T, R> { type Guard<'g> = RwLockWriteRef<'g, T, R> where Self: 'g; + type ReadGuard<'g> = RwLockWriteRef<'g, T, R> where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { ptrs.push(self.as_ref()); } @@ -146,11 +206,19 @@ unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Lockable for WriteLock<'l, unsafe fn guard(&self) -> Self::Guard<'_> { RwLockWriteRef::new(self.as_ref()) } + + unsafe fn read_guard(&self) -> Self::Guard<'_> { + RwLockWriteRef::new(self.as_ref()) + } } +unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Sharable for ReadLock<'l, T, R> {} + unsafe impl Lockable for &T { type Guard<'g> = T::Guard<'g> where Self: 'g; + type ReadGuard<'g> = T::ReadGuard<'g> where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { (*self).get_ptrs(ptrs); } @@ -158,11 +226,17 @@ unsafe impl Lockable for &T { unsafe fn guard(&self) -> Self::Guard<'_> { (*self).guard() } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + (*self).read_guard() + } } unsafe impl Lockable for &mut T { type Guard<'g> = T::Guard<'g> where Self: 'g; + type ReadGuard<'g> = T::ReadGuard<'g> where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { (**self).get_ptrs(ptrs) } @@ -170,6 +244,10 @@ unsafe impl Lockable for &mut T { unsafe fn guard(&self) -> Self::Guard<'_> { (**self).guard() } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + (**self).read_guard() + } } unsafe impl OwnedLockable for &mut T {} @@ -177,6 +255,8 @@ unsafe impl OwnedLockable for &mut T {} unsafe impl Lockable for (A,) { type Guard<'g> = (A::Guard<'g>,) where Self: 'g; + type ReadGuard<'g> = (A::ReadGuard<'g>,) where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { self.0.get_ptrs(ptrs); } @@ -184,11 +264,17 @@ unsafe impl Lockable for (A,) { unsafe fn guard(&self) -> Self::Guard<'_> { (self.0.guard(),) } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + (self.0.read_guard(),) + } } unsafe impl Lockable for (A, B) { type Guard<'g> = (A::Guard<'g>, B::Guard<'g>) where Self: 'g; + type ReadGuard<'g> = (A::ReadGuard<'g>, B::ReadGuard<'g>) where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { self.0.get_ptrs(ptrs); self.1.get_ptrs(ptrs); @@ -197,11 +283,17 @@ unsafe impl Lockable for (A, B) { unsafe fn guard(&self) -> Self::Guard<'_> { (self.0.guard(), self.1.guard()) } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + (self.0.read_guard(), self.1.read_guard()) + } } unsafe impl Lockable for (A, B, C) { type Guard<'g> = (A::Guard<'g>, B::Guard<'g>, C::Guard<'g>) where Self: 'g; + type ReadGuard<'g> = (A::ReadGuard<'g>, B::ReadGuard<'g>, C::ReadGuard<'g>) where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { self.0.get_ptrs(ptrs); self.1.get_ptrs(ptrs); @@ -211,11 +303,26 @@ unsafe impl Lockable for (A, B, C) { unsafe fn guard(&self) -> Self::Guard<'_> { (self.0.guard(), self.1.guard(), self.2.guard()) } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + ( + self.0.read_guard(), + self.1.read_guard(), + self.2.read_guard(), + ) + } } unsafe impl Lockable for (A, B, C, D) { type Guard<'g> = (A::Guard<'g>, B::Guard<'g>, C::Guard<'g>, D::Guard<'g>) where Self: 'g; + type ReadGuard<'g> = ( + A::ReadGuard<'g>, + B::ReadGuard<'g>, + C::ReadGuard<'g>, + D::ReadGuard<'g>, + ) where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { self.0.get_ptrs(ptrs); self.1.get_ptrs(ptrs); @@ -231,6 +338,15 @@ unsafe impl Lockable for (A, self.3.guard(), ) } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + ( + self.0.read_guard(), + self.1.read_guard(), + self.2.read_guard(), + self.3.read_guard(), + ) + } } unsafe impl Lockable @@ -244,6 +360,14 @@ unsafe impl Loc E::Guard<'g>, ) where Self: 'g; + type ReadGuard<'g> = ( + A::ReadGuard<'g>, + B::ReadGuard<'g>, + C::ReadGuard<'g>, + D::ReadGuard<'g>, + E::ReadGuard<'g>, + ) where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { self.0.get_ptrs(ptrs); self.1.get_ptrs(ptrs); @@ -261,6 +385,16 @@ unsafe impl Loc self.4.guard(), ) } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + ( + self.0.read_guard(), + self.1.read_guard(), + self.2.read_guard(), + self.3.read_guard(), + self.4.read_guard(), + ) + } } unsafe impl Lockable @@ -275,6 +409,15 @@ unsafe impl, ) where Self: 'g; + type ReadGuard<'g> = ( + A::ReadGuard<'g>, + B::ReadGuard<'g>, + C::ReadGuard<'g>, + D::ReadGuard<'g>, + E::ReadGuard<'g>, + F::ReadGuard<'g>, + ) where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { self.0.get_ptrs(ptrs); self.1.get_ptrs(ptrs); @@ -294,6 +437,17 @@ unsafe impl Self::ReadGuard<'_> { + ( + self.0.read_guard(), + self.1.read_guard(), + self.2.read_guard(), + self.3.read_guard(), + self.4.read_guard(), + self.5.read_guard(), + ) + } } unsafe impl @@ -309,6 +463,16 @@ unsafe impl, ) where Self: 'g; + type ReadGuard<'g> = ( + A::ReadGuard<'g>, + B::ReadGuard<'g>, + C::ReadGuard<'g>, + D::ReadGuard<'g>, + E::ReadGuard<'g>, + F::ReadGuard<'g>, + G::ReadGuard<'g>, + ) where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { self.0.get_ptrs(ptrs); self.1.get_ptrs(ptrs); @@ -330,6 +494,40 @@ unsafe impl Self::ReadGuard<'_> { + ( + self.0.read_guard(), + self.1.read_guard(), + self.2.read_guard(), + self.3.read_guard(), + self.4.read_guard(), + self.5.read_guard(), + self.6.read_guard(), + ) + } +} + +unsafe impl Sharable for (A,) {} +unsafe impl Sharable for (A, B) {} + +unsafe impl Sharable for (A, B, C) {} + +unsafe impl Sharable for (A, B, C, D) {} + +unsafe impl Sharable + for (A, B, C, D, E) +{ +} + +unsafe impl Sharable + for (A, B, C, D, E, F) +{ +} + +unsafe impl + Sharable for (A, B, C, D, E, F, G) +{ } unsafe impl OwnedLockable for (A,) {} @@ -373,6 +571,8 @@ unsafe impl< unsafe impl Lockable for [T; N] { type Guard<'g> = [T::Guard<'g>; N] where Self: 'g; + type ReadGuard<'g> = [T::ReadGuard<'g>; N] where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { for lock in self { lock.get_ptrs(ptrs); @@ -387,11 +587,22 @@ unsafe impl Lockable for [T; N] { guards.map(|g| g.assume_init()) } + + unsafe fn read_guard<'g>(&'g self) -> Self::ReadGuard<'g> { + let mut guards = MaybeUninit::<[MaybeUninit>; N]>::uninit().assume_init(); + for i in 0..N { + guards[i].write(self[i].read_guard()); + } + + guards.map(|g| g.assume_init()) + } } unsafe impl Lockable for Box<[T]> { type Guard<'g> = Box<[T::Guard<'g>]> where Self: 'g; + type ReadGuard<'g> = Box<[T::ReadGuard<'g>]> where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { for lock in self.iter() { lock.get_ptrs(ptrs); @@ -406,11 +617,22 @@ unsafe impl Lockable for Box<[T]> { guards.into_boxed_slice() } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + let mut guards = Vec::new(); + for lock in self.iter() { + guards.push(lock.read_guard()); + } + + guards.into_boxed_slice() + } } unsafe impl Lockable for Vec { type Guard<'g> = Vec> where Self: 'g; + type ReadGuard<'g> = Box<[T::ReadGuard<'g>]> where Self: 'g; + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { for lock in self { lock.get_ptrs(ptrs); @@ -425,8 +647,21 @@ unsafe impl Lockable for Vec { guards } + + unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { + let mut guards = Vec::new(); + for lock in self { + guards.push(lock.read_guard()); + } + + guards.into_boxed_slice() + } } +unsafe impl Sharable for [T; N] {} +unsafe impl Sharable for Box<[T]> {} +unsafe impl Sharable for Vec {} + unsafe impl OwnedLockable for [T; N] {} unsafe impl OwnedLockable for Box<[T]> {} unsafe impl OwnedLockable for Vec {} -- cgit v1.2.3 From ef191a3e8ecf4093fcd08036e35012c1af173a08 Mon Sep 17 00:00:00 2001 From: Botahamec Date: Wed, 22 May 2024 17:27:35 -0400 Subject: Documentation for types --- Cargo.toml | 2 +- examples/dining_philosophers.rs | 3 +- examples/double_mutex.rs | 3 +- src/collection.rs | 78 +++++++++++++++++++++++-- src/collection/boxed.rs | 8 +-- src/collection/owned.rs | 7 ++- src/collection/ref.rs | 9 +-- src/collection/retry.rs | 8 ++- src/lib.rs | 11 +++- src/lockable.rs | 123 +++++++++++++++++++++++++++++++++------- 10 files changed, 204 insertions(+), 48 deletions(-) (limited to 'src/lib.rs') diff --git a/Cargo.toml b/Cargo.toml index 3091f8c..69b9ef7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "happylock" -version = "0.1.1" +version = "0.2.0" authors = ["Mica White "] edition = "2021" description = "Free deadlock prevention" diff --git a/examples/dining_philosophers.rs b/examples/dining_philosophers.rs index 70826ba..1340564 100644 --- a/examples/dining_philosophers.rs +++ b/examples/dining_philosophers.rs @@ -1,6 +1,5 @@ use std::{thread, time::Duration}; -use happylock::collection::RetryingLockCollection; use happylock::{collection::RefLockCollection, Mutex, ThreadKey}; static PHILOSOPHERS: [Philosopher; 5] = [ @@ -52,7 +51,7 @@ impl Philosopher { // safety: no philosopher asks for the same fork twice let forks = [&FORKS[self.left], &FORKS[self.right]]; - let forks = unsafe { RetryingLockCollection::new_unchecked(&forks) }; + let forks = unsafe { RefLockCollection::new_unchecked(&forks) }; let forks = forks.lock(key); println!("{} is eating...", self.name); thread::sleep(Duration::from_secs(1)); diff --git a/examples/double_mutex.rs b/examples/double_mutex.rs index 882fc46..ea61f0d 100644 --- a/examples/double_mutex.rs +++ b/examples/double_mutex.rs @@ -1,6 +1,5 @@ use std::thread; -use happylock::collection::RetryingLockCollection; use happylock::{collection::RefLockCollection, Mutex, ThreadKey}; const N: usize = 10; @@ -12,7 +11,7 @@ fn main() { for _ in 0..N { let th = thread::spawn(move || { let key = ThreadKey::get().unwrap(); - let lock = RetryingLockCollection::new_ref(&DATA); + let lock = RefLockCollection::new(&DATA); let mut guard = lock.lock(key); *guard.1 = (100 - *guard.0).to_string(); *guard.0 += 1; diff --git a/src/collection.rs b/src/collection.rs index a84c1ce..c51e3cf 100644 --- a/src/collection.rs +++ b/src/collection.rs @@ -1,6 +1,6 @@ use std::marker::PhantomData; -use crate::{key::Keyable, lockable::Lock}; +use crate::{key::Keyable, lockable::RawLock}; mod boxed; mod guard; @@ -8,25 +8,95 @@ mod owned; mod r#ref; mod retry; +/// Locks a collection of locks, which cannot be shared immutably. +/// +/// This could be a tuple of [`Lockable`] types, an array, or a `Vec`. But it +/// can be safely locked without causing a deadlock. +/// +/// The data in this collection is guaranteed to not contain duplicates because +/// `L` must always implement [`OwnedLockable`]. The underlying data may not be +/// immutably referenced and locked. Because of this, there is no need for +/// sorting the locks in the collection, or checking for duplicates, because it +/// can be guaranteed that until the underlying collection is mutated (which +/// requires releasing all acquired locks in the collection to do), then the +/// locks will stay in the same order and be locked in that order, preventing +/// cyclic wait. +/// +/// [`Lockable`]: `crate::lockable::Lockable` +/// [`OwnedLockable`]: `crate::lockable::OwnedLockable` #[derive(Debug)] pub struct OwnedLockCollection { data: L, } -/// A type which can be locked. +/// Locks a reference to a collection of locks, by sorting them by memory +/// address. /// /// This could be a tuple of [`Lockable`] types, an array, or a `Vec`. But it /// can be safely locked without causing a deadlock. +/// +/// Upon construction, it must be confirmed that the collection contains no +/// duplicate locks. This can be done by either using [`OwnedLockable`] or by +/// checking. Regardless of how this is done, the locks will be sorted by their +/// memory address before locking them. The sorted order of the locks is stored +/// within this collection. +/// +/// Unlike [`BoxedLockCollection`], this type does not allocate memory for the +/// data, although it does allocate memory for the sorted list of lock +/// references. This makes it slightly faster, but lifetimes must be handled. +/// +/// [`Lockable`]: `crate::lockable::Lockable` +/// [`OwnedLockable`]: `crate::lockable::OwnedLockable` pub struct RefLockCollection<'a, L> { data: &'a L, - locks: Vec<&'a dyn Lock>, + locks: Vec<&'a dyn RawLock>, } +/// Locks a collection of locks, stored in the heap, by sorting them by memory +/// address. +/// +/// This could be a tuple of [`Lockable`] types, an array, or a `Vec`. But it +/// can be safely locked without causing a deadlock. +/// +/// Upon construction, it must be confirmed that the collection contains no +/// duplicate locks. This can be done by either using [`OwnedLockable`] or by +/// checking. Regardless of how this is done, the locks will be sorted by their +/// memory address before locking them. The sorted order of the locks is stored +/// within this collection. +/// +/// Unlike [`RefLockCollection`], this is a self-referential type which boxes +/// the data that is given to it. This means no lifetimes are necessary on the +/// type itself, but it is slightly slower because of the memory allocation. +/// +/// [`Lockable`]: `crate::lockable::Lockable` +/// [`OwnedLockable`]: `crate::lockable::OwnedLockable` pub struct BoxedLockCollection { data: Box, - locks: Vec<&'static dyn Lock>, + locks: Vec<&'static dyn RawLock>, } +/// Locks a collection of locks using a retrying algorithm. +/// +/// This could be a tuple of [`Lockable`] types, an array, or a `Vec`. But it +/// can be safely locked without causing a deadlock. +/// +/// The data in this collection is guaranteed to not contain duplicates, but it +/// also not be sorted. In some cases the lack of sorting can increase +/// performance. However, in most cases, this collection will be slower. Cyclic +/// wait is not guaranteed here, so the locking algorithm must release all its +/// locks if one of the lock attempts blocks. This results in wasted time and +/// potential [livelocking]. +/// +/// However, one case where this might be faster than [`RefLockCollection`] is +/// when the first lock in the collection is always the first in any +/// collection, and the other locks in the collection are always locked after +/// that first lock is acquired. This means that as soon as it is locked, there +/// will be no need to unlock it later on subsequent lock attempts, because +/// they will always succeed. +/// +/// [`Lockable`]: `crate::lockable::Lockable` +/// [`OwnedLockable`]: `crate::lockable::OwnedLockable` +/// [livelocking]: https://en.wikipedia.org/wiki/Deadlock#Livelock #[derive(Debug)] pub struct RetryingLockCollection { data: L, diff --git a/src/collection/boxed.rs b/src/collection/boxed.rs index a62a33d..ea840ab 100644 --- a/src/collection/boxed.rs +++ b/src/collection/boxed.rs @@ -1,14 +1,14 @@ use std::fmt::Debug; use std::marker::PhantomData; -use crate::lockable::Lock; -use crate::{Keyable, Lockable, OwnedLockable, Sharable}; +use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable}; +use crate::Keyable; use super::{BoxedLockCollection, LockGuard}; /// returns `true` if the sorted list contains a duplicate #[must_use] -fn contains_duplicates(l: &[&dyn Lock]) -> bool { +fn contains_duplicates(l: &[&dyn RawLock]) -> bool { l.windows(2) .any(|window| std::ptr::eq(window[0], window[1])) } @@ -18,7 +18,7 @@ unsafe impl Lockable for BoxedLockCollection { type ReadGuard<'g> = L::ReadGuard<'g> where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { self.data.get_ptrs(ptrs) } diff --git a/src/collection/owned.rs b/src/collection/owned.rs index eb5e03a..d77d568 100644 --- a/src/collection/owned.rs +++ b/src/collection/owned.rs @@ -1,10 +1,11 @@ use std::marker::PhantomData; -use crate::{lockable::Lock, Keyable, Lockable, OwnedLockable, Sharable}; +use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable}; +use crate::Keyable; use super::{LockGuard, OwnedLockCollection}; -fn get_locks(data: &L) -> Vec<&dyn Lock> { +fn get_locks(data: &L) -> Vec<&dyn RawLock> { let mut locks = Vec::new(); data.get_ptrs(&mut locks); locks @@ -15,7 +16,7 @@ unsafe impl Lockable for OwnedLockCollection { type ReadGuard<'g> = L::ReadGuard<'g> where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { self.data.get_ptrs(ptrs) } diff --git a/src/collection/ref.rs b/src/collection/ref.rs index 329f0ae..2e2883a 100644 --- a/src/collection/ref.rs +++ b/src/collection/ref.rs @@ -1,12 +1,13 @@ use std::fmt::Debug; use std::marker::PhantomData; -use crate::{key::Keyable, lockable::Lock, Lockable, OwnedLockable, Sharable}; +use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable}; +use crate::Keyable; use super::{LockGuard, RefLockCollection}; #[must_use] -pub fn get_locks(data: &L) -> Vec<&dyn Lock> { +pub fn get_locks(data: &L) -> Vec<&dyn RawLock> { let mut locks = Vec::new(); data.get_ptrs(&mut locks); locks.sort_by_key(|lock| std::ptr::from_ref(*lock)); @@ -15,7 +16,7 @@ pub fn get_locks(data: &L) -> Vec<&dyn Lock> { /// returns `true` if the sorted list contains a duplicate #[must_use] -fn contains_duplicates(l: &[&dyn Lock]) -> bool { +fn contains_duplicates(l: &[&dyn RawLock]) -> bool { l.windows(2) .any(|window| std::ptr::eq(window[0], window[1])) } @@ -43,7 +44,7 @@ unsafe impl<'c, L: Lockable> Lockable for RefLockCollection<'c, L> { type ReadGuard<'g> = L::ReadGuard<'g> where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { ptrs.extend_from_slice(&self.locks); } diff --git a/src/collection/retry.rs b/src/collection/retry.rs index 58a0642..d15d7d6 100644 --- a/src/collection/retry.rs +++ b/src/collection/retry.rs @@ -1,4 +1,6 @@ -use crate::{lockable::Lock, Keyable, Lockable, OwnedLockable, Sharable}; +use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable}; +use crate::Keyable; + use std::collections::HashSet; use std::marker::PhantomData; @@ -7,7 +9,7 @@ use super::{LockGuard, RetryingLockCollection}; fn contains_duplicates(data: L) -> bool { let mut locks = Vec::new(); data.get_ptrs(&mut locks); - let locks = locks.into_iter().map(|l| l as *const dyn Lock); + let locks = locks.into_iter().map(|l| l as *const dyn RawLock); let mut locks_set = HashSet::new(); for lock in locks { @@ -24,7 +26,7 @@ unsafe impl Lockable for RetryingLockCollection { type ReadGuard<'g> = L::ReadGuard<'g> where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { self.data.get_ptrs(ptrs) } diff --git a/src/lib.rs b/src/lib.rs index 668f3db..673d279 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -107,19 +107,24 @@ //! ``` mod key; -mod lockable; pub mod collection; +pub mod lockable; pub mod mutex; pub mod rwlock; -pub use collection::BoxedLockCollection as LockCollection; pub use key::{Keyable, ThreadKey}; -pub use lockable::{Lockable, OwnedLockable, Sharable}; #[cfg(feature = "spin")] pub use mutex::SpinLock; +/// A collection of locks that can be acquired simultaneously. +/// +/// This re-exports [`BoxedLockCollection`] as a sensible default. +/// +/// [`BoxedLockCollection`]: collection::BoxedLockCollection +pub type LockCollection = collection::BoxedLockCollection; + /// A mutual exclusion primitive useful for protecting shared data, which cannot deadlock. /// /// By default, this uses `parking_lot` as a backend. diff --git a/src/lockable.rs b/src/lockable.rs index 23aeb4c..2f98d3a 100644 --- a/src/lockable.rs +++ b/src/lockable.rs @@ -7,14 +7,14 @@ use crate::{ use lock_api::{RawMutex, RawRwLock}; -/// A type that may be locked and unlocked +/// A raw lock type that may be locked and unlocked /// /// # Safety /// /// A deadlock must never occur. The `unlock` method must correctly unlock the /// data. The `get_ptrs` method must be implemented correctly. The `Output` /// must be unlocked when it is dropped. -pub unsafe trait Lock: Send + Sync { +pub unsafe trait RawLock: Send + Sync { /// Blocks until the lock is acquired /// /// # Safety @@ -46,32 +46,111 @@ pub unsafe trait Lock: Send + Sync { /// It is undefined behavior to use this if the lock is not acquired unsafe fn unlock(&self); + /// Blocks until the data the lock protects can be safely read. + /// + /// Some locks, but not all, will allow multiple readers at once. If + /// multiple readers are allowed for a [`Lockable`] type, then the + /// [`Sharable`] marker trait should be implemented. + /// + /// # Safety + /// + /// It is undefined behavior to use this without ownership or mutable + /// access to the [`ThreadKey`], which should last as long as the return + /// value is alive. + /// + /// [`ThreadKey`]: `crate::ThreadKey` unsafe fn read(&self); + // Attempt to read without blocking. + /// + /// Returns `true` if successful, `false` otherwise. + /// + /// Some locks, but not all, will allow multiple readers at once. If + /// multiple readers are allowed for a [`Lockable`] type, then the + /// [`Sharable`] marker trait should be implemented. + /// + /// # Safety + /// + /// It is undefined behavior to use this without ownership or mutable + /// access to the [`ThreadKey`], which should last as long as the return + /// value is alive. + /// + /// [`ThreadKey`]: `crate::ThreadKey` unsafe fn try_read(&self) -> bool; + /// Releases the lock after calling `read`. + /// + /// # Safety + /// + /// It is undefined behavior to use this if the read lock is not acquired unsafe fn unlock_read(&self); } +/// A type that may be locked and unlocked. +/// +/// This trait is usually implemented on collections of [`RawLock`]s. For +/// example, a `Vec>`. +/// +/// # Safety +/// +/// Acquiring the locks returned by `get_ptrs` must allow for the values +/// returned by `guard` or `read_guard` to be safely used for exclusive or +/// shared access, respectively. +/// +/// Dropping the `Guard` and `ReadGuard` types must unlock those same locks. +/// +/// The order of the resulting list from `get_ptrs` must be deterministic. As +/// long as the value is not mutated, the references must always be in the same +/// order. pub unsafe trait Lockable { - /// The guard returned that does not hold a key + /// The exclusive guard that does not hold a key type Guard<'g> where Self: 'g; + /// The shared guard type that does not hold a key type ReadGuard<'g> where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>); + /// Yields a list of references to the [`RawLock`]s contained within this + /// value. + /// + /// These reference locks which must be locked before acquiring a guard, + /// and unlocked when the guard is dropped. The order of the resulting list + /// is deterministic. As long as the value is not mutated, the references + /// will always be in the same order. + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>); + /// Returns a guard that can be used to access the underlying data mutably. + /// + /// # Safety + /// + /// All locks given by calling [`Lockable::get_ptrs`] must be locked + /// exclusively before calling this function. The locks must not be + /// unlocked until this guard is dropped. #[must_use] unsafe fn guard(&self) -> Self::Guard<'_>; + /// Returns a guard that can be used to immutably access the underlying + /// data. + /// + /// # Safety + /// + /// All locks given by calling [`Lockable::get_ptrs`] must be locked using + /// [`RawLock::read`] before calling this function. The locks must not be + /// unlocked until this guard is dropped. #[must_use] unsafe fn read_guard(&self) -> Self::ReadGuard<'_>; } +/// A marker trait to indicate that multiple readers can access the lock at a +/// time. +/// +/// # Safety +/// +/// This type must only be implemented if the lock can be safely shared between +/// multiple readers. pub unsafe trait Sharable: Lockable {} /// A type that may be locked and unlocked, and is known to be the only valid @@ -83,7 +162,7 @@ pub unsafe trait Sharable: Lockable {} /// time, i.e., this must either be an owned value or a mutable reference. pub unsafe trait OwnedLockable: Lockable {} -unsafe impl Lock for Mutex { +unsafe impl RawLock for Mutex { unsafe fn lock(&self) { self.raw().lock() } @@ -109,7 +188,7 @@ unsafe impl Lock for Mutex { } } -unsafe impl Lock for RwLock { +unsafe impl RawLock for RwLock { unsafe fn lock(&self) { self.raw().lock_exclusive() } @@ -139,7 +218,7 @@ unsafe impl Lockable for Mutex { type Guard<'g> = MutexRef<'g, T, R> where Self: 'g; type ReadGuard<'g> = MutexRef<'g, T, R> where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { ptrs.push(self); } @@ -157,7 +236,7 @@ unsafe impl Lockable for RwLock { type ReadGuard<'g> = RwLockReadRef<'g, T, R> where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { ptrs.push(self); } @@ -181,7 +260,7 @@ unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Lockable for ReadLock<'l, T type ReadGuard<'g> = RwLockReadRef<'g, T, R> where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { ptrs.push(self.as_ref()); } @@ -199,7 +278,7 @@ unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Lockable for WriteLock<'l, type ReadGuard<'g> = RwLockWriteRef<'g, T, R> where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { ptrs.push(self.as_ref()); } @@ -219,7 +298,7 @@ unsafe impl Lockable for &T { type ReadGuard<'g> = T::ReadGuard<'g> where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { (*self).get_ptrs(ptrs); } @@ -237,7 +316,7 @@ unsafe impl Lockable for &mut T { type ReadGuard<'g> = T::ReadGuard<'g> where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { (**self).get_ptrs(ptrs) } @@ -257,7 +336,7 @@ unsafe impl Lockable for (A,) { type ReadGuard<'g> = (A::ReadGuard<'g>,) where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { self.0.get_ptrs(ptrs); } @@ -275,7 +354,7 @@ unsafe impl Lockable for (A, B) { type ReadGuard<'g> = (A::ReadGuard<'g>, B::ReadGuard<'g>) where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { self.0.get_ptrs(ptrs); self.1.get_ptrs(ptrs); } @@ -294,7 +373,7 @@ unsafe impl Lockable for (A, B, C) { type ReadGuard<'g> = (A::ReadGuard<'g>, B::ReadGuard<'g>, C::ReadGuard<'g>) where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { self.0.get_ptrs(ptrs); self.1.get_ptrs(ptrs); self.2.get_ptrs(ptrs); @@ -323,7 +402,7 @@ unsafe impl Lockable for (A, D::ReadGuard<'g>, ) where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { self.0.get_ptrs(ptrs); self.1.get_ptrs(ptrs); self.2.get_ptrs(ptrs); @@ -368,7 +447,7 @@ unsafe impl Loc E::ReadGuard<'g>, ) where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { self.0.get_ptrs(ptrs); self.1.get_ptrs(ptrs); self.2.get_ptrs(ptrs); @@ -418,7 +497,7 @@ unsafe impl, ) where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { self.0.get_ptrs(ptrs); self.1.get_ptrs(ptrs); self.2.get_ptrs(ptrs); @@ -473,7 +552,7 @@ unsafe impl, ) where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { self.0.get_ptrs(ptrs); self.1.get_ptrs(ptrs); self.2.get_ptrs(ptrs); @@ -573,7 +652,7 @@ unsafe impl Lockable for [T; N] { type ReadGuard<'g> = [T::ReadGuard<'g>; N] where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { for lock in self { lock.get_ptrs(ptrs); } @@ -603,7 +682,7 @@ unsafe impl Lockable for Box<[T]> { type ReadGuard<'g> = Box<[T::ReadGuard<'g>]> where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { for lock in self.iter() { lock.get_ptrs(ptrs); } @@ -633,7 +712,7 @@ unsafe impl Lockable for Vec { type ReadGuard<'g> = Box<[T::ReadGuard<'g>]> where Self: 'g; - fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn Lock>) { + fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) { for lock in self { lock.get_ptrs(ptrs); } -- cgit v1.2.3 From 1ed88daa00d478472181f0987112a2b0f2266694 Mon Sep 17 00:00:00 2001 From: Botahamec Date: Wed, 22 May 2024 20:47:28 -0400 Subject: top-level docs --- README.md | 98 ++++++++++++++++++++++++++++++++++++--------------- examples/fibonacci.rs | 29 +++++++++++++++ src/lib.rs | 96 +++++++++++++++++++++++++++++++++++++++---------- 3 files changed, 176 insertions(+), 47 deletions(-) create mode 100644 examples/fibonacci.rs (limited to 'src/lib.rs') diff --git a/README.md b/README.md index bef7ef5..01c259e 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,16 @@ prevent deadlocks, we need to prevent one of the following: 3. circular wait (The language must enforce that every thread locks mutexes in the exact same order) 4. partial allocation (The language must enforce total allocation) -This library prevents #4, by requiring that all of the resources that a thread needs be locked at once. This is an atomic operation, so either all locks will be acquired, or none will. +This library seeks to solve **partial allocation** by requiring total +allocation. All the resources a thread needs must be allocated at the same +time. In order to request new resources, the old resources must be dropped +first. Requesting multiple resources at once is atomic. You either get all the +requested resources or none at all. + +As an optimization, this library also often prevents **circular wait**. Many +collections sort the locks in order of their memory address. As long as the +locks are always acquired in that order, then time doesn't need to be wasted +on releasing locks after a failure and re-acquiring them later. ## Example @@ -22,16 +31,16 @@ This library prevents #4, by requiring that all of the resources that a thread n let data: Mutex = Mutex::new(0); for _ in 0..N { - thread::spawn(move || { - // each thread gets one thread key - let key = ThreadKey::get().unwrap(); + thread::spawn(move || { + // each thread gets one thread key + let key = ThreadKey::get().unwrap(); - // unlocking a mutex requires a ThreadKey - let mut data = data.lock(key); - *data += 1; + // unlocking a mutex requires a ThreadKey + let mut data = data.lock(key); + *data += 1; - // the key is unlocked at the end of the scope - }); + // the key is unlocked at the end of the scope + }); } let key = ThreadKey::get().unwrap(); @@ -48,16 +57,16 @@ static DATA_1: Mutex = Mutex::new(0); static DATA_2: Mutex = Mutex::new(String::new()); for _ in 0..N { - thread::spawn(move || { - let key = ThreadKey::get().unwrap(); + thread::spawn(move || { + let key = ThreadKey::get().unwrap(); - // happylock ensures at runtime there are no duplicate locks - let collection = LockCollection::try_new((&DATA_1, &DATA_2)).unwrap(); - let mut guard = collection.lock(key); + // happylock ensures at runtime there are no duplicate locks + let collection = LockCollection::try_new((&DATA_1, &DATA_2)).unwrap(); + let mut guard = collection.lock(key); - *guard.1 = (100 - *guard.0).to_string(); - *guard.0 += 1; - }); + *guard.1 = (100 - *guard.0).to_string(); + *guard.0 += 1; + }); } let key = ThreadKey::get().unwrap(); @@ -67,32 +76,65 @@ println!("{}", *data.0); println!("{}", *data.1); ``` +In many cases, the [`LockCollection::new`] or [`LockCollection::new_ref`] method can be used, improving performance. + +```rust +use std::thread; +use happylock::{LockCollection, Mutex, ThreadKey}; + +const N: usize = 100; + +static DATA: [Mutex; 2] = [Mutex::new(0), Mutex::new(1)]; + +for _ in 0..N { + thread::spawn(move || { + let key = ThreadKey::get().unwrap(); + // a reference to a type that implements `OwnedLockable` will never + // contain duplicates, so no duplicate checking is needed. + let collection = LockCollection::new_ref(&DATA); + let mut guard = collection.lock(key); + let x = *guard[1]; + *guard[1] += *guard[0]; + *guard[0] = x; + }); +} + +let key = ThreadKey::get().unwrap(); +let data = LockCollection::new_ref(&DATA); +let data = data.lock(key); + +println!("{}", data[0]); +println!("{}", data[1]); +``` + ## Performance **The `ThreadKey` is a mostly-zero cost abstraction.** It doesn't use any memory, and it doesn't really exist at run-time. The only cost comes from calling `ThreadKey::get()`, because the function has to ensure at runtime that the key hasn't already been taken. Dropping the key will also have a small cost. -**Avoid `LockCollection::try_new`.** This constructor will check to make sure that the collection contains no duplicate locks. This is an O(nlogn) operation, where n is the number of locks in the collection. `LockCollection::new` and `LockCollection::new_ref` don't need these checks because they use `OwnedLockable`, which is guaranteed to be unique as long as it is accessible. As a last resort, `LockCollection::new_unchecked` doesn't do this check, but is unsafe to call. +**Consider [`OwnedLockCollection`].** This will almost always be the fastest lock collection. It doesn't expose the underlying collection immutably, which means that it will always be locked in the same order, and doesn't need any sorting. + +**Avoid [`LockCollection::try_new`].** This constructor will check to make sure that the collection contains no duplicate locks. In most cases, this is O(nlogn), where n is the number of locks in the collections but in the case of [`RetryingLockCollection`], it's close to O(n). [`LockCollection::new`] and [`LockCollection::new_ref`] don't need these checks because they use [`OwnedLockable`], which is guaranteed to be unique as long as it is accessible. As a last resort [`LockCollection::new_unchecked`] doesn't do this check, but is unsafe to call. -**Avoid using distinct lock orders for `RetryingLockCollection`.** The problem is that this collection must iterate through the list of locks, and not complete until every single one of them is unlocked. This also means that attempting to lock multiple mutexes gives you a lower chance of ever running. Only one needs to be locked for the operation to need a reset. This problem can be prevented by not doing that in your code. Resources should be obtained in the same order on every thread. +**Know how to use [`RetryingLockCollection`].** This collection doesn't do any sorting, but uses a wasteful lock algorithm. It can't rely on the order of the locks to be the same across threads, so if it finds a lock that it can't acquire without blocking, it'll first release all of the locks it already acquired to avoid blocking other threads. This is wasteful because this algorithm may end up re-acquiring the same lock multiple times. To avoid this, ensure that (1) the first lock in the collection is always the first lock in any collection it appears in, and (2) the other locks in the collection are always preceded by that first lock. This will prevent any wasted time from re-acquiring locks. If you're unsure, [`LockCollection`] is a sensible default. ## Future Work It might to possible to break the `ThreadKey` system by having two crates import this crate and call `ThreadKey::get`. I'm not quite sure how this works, but Rust could decide to give each crate their own key, ergo one thread would get two keys. I don't think the standard library would have this issue. At a certain point, I have to recognize that someone could also just import the standard library mutex and get a deadlock that way. -We should add `Condvar` at some point. I didn't because I've never used it before, and I'm probably not the right person to solve this problem. I think all the synchronization problems could be solved by having `Condvar::wait` take a `ThreadKey` instead of a `MutexGuard`. Something similar can probably be done for `Barrier`. But again, I'm no expert. +Are the ergonomics here any good? This is completely uncharted territory. Maybe there are some useful helper methods we don't have here yet. Maybe `try_lock` should return a `Result`. Maybe `lock_api` or `spin` implements some useful methods that I kept out for this proof of concept. Maybe there are some lock-specific methods that could be added to `LockCollection`. More types might be lockable using a lock collection. -Do `OnceLock` or `LazyLock` ever deadlock? We might not need to add those here. - -It'd be nice to be able to use the mutexes built into the operating system, saving on binary size. Using `std::sync::Mutex` sounds promising, but it doesn't implement `RawMutex`, and implementing that is very difficult, if not impossible. +It'd be nice to be able to use the mutexes built into the operating system, saving on binary size. Using `std::sync::Mutex` sounds promising, but it doesn't implement `RawMutex`, and implementing that is very difficult, if not impossible. Maybe I could implement my own abstraction over the OS mutexes. I could also simply implement `Lockable` for the standard library mutex. Personally, I don't like mutex poisoning, but maybe it can be worked into the library if you're into that sort of thing. -Are the ergonomics here any good? This is completely uncharted territory. Maybe there are some useful helper methods we don't have here yet. Maybe `try_lock` should return a `Result`. Maybe `lock_api` or `spin` implements some useful methods that I kept out for this proof of concept. Maybe there are some lock-specific methods that could be added to `LockCollection`. More types might be lockable using a `LockGuard`. +It'd be interesting to add some methods such as `lock_clone` or `lock_swap`. This would still require a thread key, in case the mutex is already locked. The only way this could be done without a thread key is with a `&mut Mutex`, but we already have `as_mut`. A `try_lock_clone` or `try_lock_swap` might not need a `ThreadKey` though. A special lock that looks like `Cell` but implements `Sync` could be shared without a thread key, because the lock would be dropped immediately (preventing non-preemptive allocation). It might make some common operations easier. -I want to try to get this working without the standard library. There are a few problems with this though. For instance, this crate uses `thread_local` to allow other threads to have their own keys. Also, the only practical type of mutex that would work is a spinlock. Although, more could be implemented using the `RawMutex` trait. The `LockCollection` requires memory allocation at this time in order to check for duplicate locks. +Now that we have the `Sharable` trait, indicating that all of the locks in a collection can be shared, we could implement a `Readonly` wrapper around the collections that don't allow access to `lock` and `try_lock`. The idea would be that if you're not exclusively locking the collection, then you don't need to check for duplicates in the collection. Calling `.read()` on the same `RwLock` twice dooes not cause a deadlock. -It'd be interesting to add some methods such as `lock_clone` or `lock_swap`. This would still require a thread key, in case the mutex is already locked. The only way this could be done without a thread key is with a `&mut Mutex`, but we already have `get_mut`. A `try_lock_clone` or `try_lock_swap` might not need a `ThreadKey` though. A special lock that looks like `Cell` but implements `Sync` could be shared without a thread key, because the lock would be dropped immediately (preventing non-preemptive allocation). It might make some common operations easier. +I want to try to get this working without the standard library. There are a few problems with this though. For instance, this crate uses `thread_local` to allow other threads to have their own keys. Also, the only practical type of mutex that would work is a spinlock. Although, more could be implemented using the `RawMutex` trait. The `Lockable` trait requires memory allocation at this time in order to check for duplicate locks. -There might be some use in trying to prevent circular wait. There could be a special type that only allows the locking mutexes in a specific order. This would still require a thread key so that nobody tries to unlock multiple lock sequences at the same time. The biggest problem is that `LockSequence::lock_next` would need to return the same value each time, which is not very flexible. Most use cases for this are solved already by using `LockCollection`. +I've been thinking about addiung `Condvar` and `Barrier`, but I've been stopped by two things. I don't use either of those very often, so I'm probably not the right person to try to implement either of them. They're also weird, and harder to prevent deadlocking for. They're sort of the opposite of a mutex, since a mutex guarantees that at least one thread can always access each resource. + +Do `OnceLock` or `LazyLock` ever deadlock? We might not need to add those here. -Some sort of `DynamicLock` type might be useful so that, for example, a `Mutex` and an `RwLock` could be unlocked at the same time inside of a `Vec>`. Although, this wouldn't solve the problem of needing a `Mutex` and a `Mutex` at the same time. This would be better solved usin the existing tuple system. +We could implement special methods for something like a `LockCollection>` where we only lock the first three items. diff --git a/examples/fibonacci.rs b/examples/fibonacci.rs new file mode 100644 index 0000000..d43b01c --- /dev/null +++ b/examples/fibonacci.rs @@ -0,0 +1,29 @@ +use happylock::{LockCollection, Mutex, ThreadKey}; +use std::thread; + +const N: usize = 36; + +static DATA: [Mutex; 2] = [Mutex::new(0), Mutex::new(1)]; + +fn main() { + for _ in 0..N { + thread::spawn(move || { + let key = ThreadKey::get().unwrap(); + + // a reference to a type that implements `OwnedLockable` will never + // contain duplicates, so no duplicate checking is needed. + let collection = LockCollection::new_ref(&DATA); + let mut guard = collection.lock(key); + + let x = *guard[1]; + *guard[1] += *guard[0]; + *guard[0] = x; + }); + } + + let key = ThreadKey::get().unwrap(); + let data = LockCollection::new_ref(&DATA); + let data = data.lock(key); + println!("{}", data[0]); + println!("{}", data[1]); +} diff --git a/src/lib.rs b/src/lib.rs index 673d279..9c39c6d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,7 +14,7 @@ //! # Theory //! //! There are four conditions necessary for a deadlock to occur. In order to -//! prevent deadlocks, we need to prevent one of the following: +//! prevent deadlocks, we just need to prevent one of the following: //! //! 1. mutual exclusion //! 2. non-preemptive allocation @@ -27,24 +27,10 @@ //! first. Requesting multiple resources at once is atomic. You either get all //! the requested resources or none at all. //! -//! # Performance -//! -//! **Avoid [`LockCollection::try_new`].** This constructor will check to make -//! sure that the collection contains no duplicate locks. This is an O(n^2) -//! operation, where n is the number of locks in the collection. -//! [`LockCollection::new`] and [`LockCollection::new_ref`] don't need these -//! checks because they use [`OwnedLockable`], which is guaranteed to be unique -//! as long as it is accessible. As a last resort, -//! [`LockCollection::new_unchecked`] doesn't do this check, but is unsafe to -//! call. -//! -//! **Avoid using distinct lock orders for [`LockCollection`].** The problem is -//! that this library must iterate through the list of locks, and not complete -//! until every single one of them is unlocked. This also means that attempting -//! to lock multiple mutexes gives you a lower chance of ever running. Only one -//! needs to be locked for the operation to need a reset. This problem can be -//! prevented by not doing that in your code. Resources should be obtained in -//! the same order on every thread. +//! As an optimization, this library also often prevents **circular wait**. +//! Many collections sort the locks in order of their memory address. As long +//! as the locks are always acquired in that order, then time doesn't need to +//! be wasted on releasing locks after a failure and re-acquiring them later. //! //! # Examples //! @@ -105,6 +91,78 @@ //! println!("{}", *data.0); //! println!("{}", *data.1); //! ``` +//! +//! In many cases, the [`LockCollection::new`] or [`LockCollection::new_ref`] +//! method can be used, improving performance. +//! +//! ```rust +//! use std::thread; +//! use happylock::{LockCollection, Mutex, ThreadKey}; +//! +//! const N: usize = 100; +//! +//! static DATA: [Mutex; 2] = [Mutex::new(0), Mutex::new(1)]; +//! +//! for _ in 0..N { +//! thread::spawn(move || { +//! let key = ThreadKey::get().unwrap(); +//! +//! // a reference to a type that implements `OwnedLockable` will never +//! // contain duplicates, so no duplicate checking is needed. +//! let collection = LockCollection::new_ref(&DATA); +//! let mut guard = collection.lock(key); +//! +//! let x = *guard[1]; +//! *guard[1] += *guard[0]; +//! *guard[0] = x; +//! }); +//! } +//! +//! let key = ThreadKey::get().unwrap(); +//! let data = LockCollection::new_ref(&DATA); +//! let data = data.lock(key); +//! println!("{}", data[0]); +//! println!("{}", data[1]); +//! ``` +//! +//! # Performance +//! +//! **The `ThreadKey` is a mostly-zero cost abstraction.** It doesn't use any +//! memory, and it doesn't really exist at run-time. The only cost comes from +//! calling `ThreadKey::get()`, because the function has to ensure at runtime +//! that the key hasn't already been taken. Dropping the key will also have a +//! small cost. +//! +//! **Consider [`OwnedLockCollection`].** This will almost always be the +//! fastest lock collection. It doesn't expose the underlying collection +//! immutably, which means that it will always be locked in the same order, and +//! doesn't need any sorting. +//! +//! **Avoid [`LockCollection::try_new`].** This constructor will check to make +//! sure that the collection contains no duplicate locks. In most cases, this +//! is O(nlogn), where n is the number of locks in the collections but in the +//! case of [`RetryingLockCollection`], it's close to O(n). +//! [`LockCollection::new`] and [`LockCollection::new_ref`] don't need these +//! checks because they use [`OwnedLockable`], which is guaranteed to be unique +//! as long as it is accessible. As a last resort, +//! [`LockCollection::new_unchecked`] doesn't do this check, but is unsafe to +//! call. +//! +//! **Know how to use [`RetryingLockCollection`].** This collection doesn't do +//! any sorting, but uses a wasteful lock algorithm. It can't rely on the order +//! of the locks to be the same across threads, so if it finds a lock that it +//! can't acquire without blocking, it'll first release all of the locks it +//! already acquired to avoid blocking other threads. This is wasteful because +//! this algorithm may end up re-acquiring the same lock multiple times. To +//! avoid this, ensure that (1) the first lock in the collection is always the +//! first lock in any collection it appears in, and (2) the other locks in the +//! collection are always preceded by that first lock. This will prevent any +//! wasted time from re-acquiring locks. If you're unsure, [`LockCollection`] +//! is a sensible default. +//! +//! [`OwnedLockable`]: `lockable::OwnedLockable` +//! [`OwnedLockCollection`]: `collection::OwnedLockCollection` +//! [`RetryingLockCollection`]: `collection::RetryingLockCollection` mod key; -- cgit v1.2.3 From f81d4b40a007fecf6502a36b4c24a1e31807a731 Mon Sep 17 00:00:00 2001 From: Botahamec Date: Thu, 23 May 2024 19:50:32 -0400 Subject: Comments --- src/collection.rs | 20 +++++++++++++++++++- src/collection/boxed.rs | 30 +++++++----------------------- src/collection/owned.rs | 28 +++++----------------------- src/collection/ref.rs | 28 +++++----------------------- src/collection/utils.rs | 44 ++++++++++++++++++++++++++++++++++++++++++++ src/key.rs | 7 +++++++ src/lib.rs | 3 +++ src/lockable.rs | 28 ++++++++++++++++++++++++++-- src/mutex.rs | 5 ++++- src/mutex/guard.rs | 10 ++++++++++ src/mutex/mutex.rs | 4 ++++ src/rwlock/write_lock.rs | 11 +++++------ 12 files changed, 139 insertions(+), 79 deletions(-) create mode 100644 src/collection/utils.rs (limited to 'src/lib.rs') diff --git a/src/collection.rs b/src/collection.rs index 5dc6946..27ec1c4 100644 --- a/src/collection.rs +++ b/src/collection.rs @@ -7,6 +7,7 @@ mod guard; mod owned; mod r#ref; mod retry; +mod utils; /// Locks a collection of locks, which cannot be shared immutably. /// @@ -24,6 +25,9 @@ mod retry; /// /// [`Lockable`]: `crate::lockable::Lockable` /// [`OwnedLockable`]: `crate::lockable::OwnedLockable` + +// this type caches the idea that no immutable references to the underlying +// collection exist #[derive(Debug)] pub struct OwnedLockCollection { data: L, @@ -47,6 +51,13 @@ pub struct OwnedLockCollection { /// /// [`Lockable`]: `crate::lockable::Lockable` /// [`OwnedLockable`]: `crate::lockable::OwnedLockable` + +// This type was born when I eventually realized that I needed a self +// referential structure. That used boxing, so I elected to make a more +// efficient implementation (polonius please save us) + +// This type caches the sorting order of the locks and the fact that it doesn't +// contain any duplicates. pub struct RefLockCollection<'a, L> { data: &'a L, locks: Vec<&'a dyn RawLock>, @@ -70,9 +81,14 @@ pub struct RefLockCollection<'a, L> { /// /// [`Lockable`]: `crate::lockable::Lockable` /// [`OwnedLockable`]: `crate::lockable::OwnedLockable` + +// This type caches the sorting order of the locks and the fact that it doesn't +// contain any duplicates. pub struct BoxedLockCollection { data: Box, - locks: Vec<&'static dyn RawLock>, + locks: Vec<&'static dyn RawLock>, // As far as you know, it's static. + // Believe it or not, saying the lifetime + // is static when it's not isn't UB } /// Locks a collection of locks using a retrying algorithm. @@ -97,6 +113,8 @@ pub struct BoxedLockCollection { /// [`Lockable`]: `crate::lockable::Lockable` /// [`OwnedLockable`]: `crate::lockable::OwnedLockable` /// [livelocking]: https://en.wikipedia.org/wiki/Deadlock#Livelock + +// This type caches the fact that there are no duplicates #[derive(Debug)] pub struct RetryingLockCollection { data: L, diff --git a/src/collection/boxed.rs b/src/collection/boxed.rs index 224eedb..5ced6d1 100644 --- a/src/collection/boxed.rs +++ b/src/collection/boxed.rs @@ -4,7 +4,7 @@ use std::marker::PhantomData; use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable}; use crate::Keyable; -use super::{BoxedLockCollection, LockGuard}; +use super::{utils, BoxedLockCollection, LockGuard}; /// returns `true` if the sorted list contains a duplicate #[must_use] @@ -185,6 +185,8 @@ impl BoxedLockCollection { let data = Box::new(data); let mut locks = Vec::new(); data.get_ptrs(&mut locks); + + // cast to *const () because fat pointers can't be converted to usize locks.sort_by_key(|lock| std::ptr::from_ref(*lock).cast::<()>() as usize); // safety: the box will be dropped after the lock references, so it's @@ -310,17 +312,8 @@ impl BoxedLockCollection { key: Key, ) -> Option, Key>> { let guard = unsafe { - for (i, lock) in self.locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_lock(); - - if !success { - for lock in &self.locks[0..i] { - // safety: this lock was already acquired - lock.unlock(); - } - return None; - } + if !utils::ordered_try_lock(&self.locks) { + return None; } // safety: we've acquired the locks @@ -424,17 +417,8 @@ impl BoxedLockCollection { key: Key, ) -> Option, Key>> { let guard = unsafe { - for (i, lock) in self.locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_read(); - - if !success { - for lock in &self.locks[0..i] { - // safety: this lock was already acquired - lock.unlock_read(); - } - return None; - } + if !utils::ordered_try_read(&self.locks) { + return None; } // safety: we've acquired the locks diff --git a/src/collection/owned.rs b/src/collection/owned.rs index e1549b2..919c403 100644 --- a/src/collection/owned.rs +++ b/src/collection/owned.rs @@ -3,7 +3,7 @@ use std::marker::PhantomData; use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable}; use crate::Keyable; -use super::{LockGuard, OwnedLockCollection}; +use super::{utils, LockGuard, OwnedLockCollection}; fn get_locks(data: &L) -> Vec<&dyn RawLock> { let mut locks = Vec::new(); @@ -191,17 +191,8 @@ impl OwnedLockCollection { ) -> Option, Key>> { let locks = get_locks(&self.data); let guard = unsafe { - for (i, lock) in locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_lock(); - - if !success { - for lock in &locks[0..i] { - // safety: this lock was already acquired - lock.unlock(); - } - return None; - } + if !utils::ordered_try_lock(&locks) { + return None; } // safety: we've acquired the locks @@ -315,17 +306,8 @@ impl OwnedLockCollection { ) -> Option, Key>> { let locks = get_locks(&self.data); let guard = unsafe { - for (i, lock) in locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_read(); - - if !success { - for lock in &locks[0..i] { - // safety: this lock was already acquired - lock.unlock(); - } - return None; - } + if !utils::ordered_try_read(&locks) { + return None; } // safety: we've acquired the locks diff --git a/src/collection/ref.rs b/src/collection/ref.rs index e5c548f..d8c7f2e 100644 --- a/src/collection/ref.rs +++ b/src/collection/ref.rs @@ -4,7 +4,7 @@ use std::marker::PhantomData; use crate::lockable::{Lockable, OwnedLockable, RawLock, Sharable}; use crate::Keyable; -use super::{LockGuard, RefLockCollection}; +use super::{utils, LockGuard, RefLockCollection}; #[must_use] pub fn get_locks(data: &L) -> Vec<&dyn RawLock> { @@ -221,17 +221,8 @@ impl<'a, L: Lockable> RefLockCollection<'a, L> { key: Key, ) -> Option, Key>> { let guard = unsafe { - for (i, lock) in self.locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_lock(); - - if !success { - for lock in &self.locks[0..i] { - // safety: this lock was already acquired - lock.unlock(); - } - return None; - } + if !utils::ordered_try_lock(&self.locks) { + return None; } // safety: we've acquired the locks @@ -339,17 +330,8 @@ impl<'a, L: Sharable> RefLockCollection<'a, L> { key: Key, ) -> Option, Key>> { let guard = unsafe { - for (i, lock) in self.locks.iter().enumerate() { - // safety: we have the thread key - let success = lock.try_read(); - - if !success { - for lock in &self.locks[0..i] { - // safety: this lock was already acquired - lock.unlock_read(); - } - return None; - } + if !utils::ordered_try_read(&self.locks) { + return None; } // safety: we've acquired the locks diff --git a/src/collection/utils.rs b/src/collection/utils.rs new file mode 100644 index 0000000..dc58399 --- /dev/null +++ b/src/collection/utils.rs @@ -0,0 +1,44 @@ +use crate::lockable::RawLock; + +/// Locks the locks in the order they are given. This causes deadlock if the +/// locks contain duplicates, or if this is called by multiple threads with the +/// locks in different orders. +pub unsafe fn ordered_try_lock(locks: &[&dyn RawLock]) -> bool { + unsafe { + for (i, lock) in locks.iter().enumerate() { + // safety: we have the thread key + let success = lock.try_lock(); + + if !success { + for lock in &locks[0..i] { + // safety: this lock was already acquired + lock.unlock(); + } + return false; + } + } + + true + } +} + +/// Locks the locks in the order they are given. This causes deadlock f this is +/// called by multiple threads with the locks in different orders. +pub unsafe fn ordered_try_read(locks: &[&dyn RawLock]) -> bool { + unsafe { + for (i, lock) in locks.iter().enumerate() { + // safety: we have the thread key + let success = lock.try_read(); + + if !success { + for lock in &locks[0..i] { + // safety: this lock was already acquired + lock.unlock_read(); + } + return false; + } + } + + true + } +} diff --git a/src/key.rs b/src/key.rs index 875f4be..4d6504f 100644 --- a/src/key.rs +++ b/src/key.rs @@ -7,6 +7,8 @@ use thread_local::ThreadLocal; use sealed::Sealed; +// Sealed to prevent other key types from being implemented. Otherwise, this +// would almost instant undefined behavior. mod sealed { use super::ThreadKey; @@ -15,6 +17,9 @@ mod sealed { impl Sealed for &mut ThreadKey {} } +// I am concerned that having multiple crates linked together with different +// static variables could break my key system. Library code probably shouldn't +// be creating keys at all. static KEY: Lazy> = Lazy::new(ThreadLocal::new); /// The key for the current thread. @@ -34,6 +39,7 @@ pub struct ThreadKey { /// values invalid. pub unsafe trait Keyable: Sealed {} unsafe impl Keyable for ThreadKey {} +// the ThreadKey can't be moved while a mutable reference to it exists unsafe impl Keyable for &mut ThreadKey {} impl Debug for ThreadKey { @@ -42,6 +48,7 @@ impl Debug for ThreadKey { } } +// If you lose the thread key, you can get it back by calling ThreadKey::get impl Drop for ThreadKey { fn drop(&mut self) { unsafe { KEY.get().unwrap().force_unlock() } diff --git a/src/lib.rs b/src/lib.rs index 9c39c6d..643c3e7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -176,6 +176,9 @@ pub use key::{Keyable, ThreadKey}; #[cfg(feature = "spin")] pub use mutex::SpinLock; +// Personally, I think re-exports look ugly in the rust documentation, so I +// went with type aliases instead. + /// A collection of locks that can be acquired simultaneously. /// /// This re-exports [`BoxedLockCollection`] as a sensible default. diff --git a/src/lockable.rs b/src/lockable.rs index 6b9c7c6..9f44981 100644 --- a/src/lockable.rs +++ b/src/lockable.rs @@ -14,6 +14,13 @@ use lock_api::{RawMutex, RawRwLock}; /// A deadlock must never occur. The `unlock` method must correctly unlock the /// data. The `get_ptrs` method must be implemented correctly. The `Output` /// must be unlocked when it is dropped. + +// Why not use a RawRwLock? Because that would be semantically incorrect, and I +// don't want an INIT or GuardMarker associated item. +// Originally, RawLock had a sister trait: RawSharableLock. I removed it +// because it'd be difficult to implement a separate type that takes a +// different kind of RawLock. But now the Sharable marker trait is needed to +// indicate if reads can be used. pub unsafe trait RawLock: Send + Sync { /// Blocks until the lock is acquired /// @@ -175,6 +182,8 @@ unsafe impl RawLock for Mutex { self.raw().unlock() } + // this is the closest thing to a read we can get, but Sharable isn't + // implemented for this unsafe fn read(&self) { self.raw().lock() } @@ -291,8 +300,13 @@ unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Lockable for WriteLock<'l, } } +// Technically, the exclusive locks can also be shared, but there's currently +// no way to express that. I don't think I want to ever express that. unsafe impl<'l, T: Send, R: RawRwLock + Send + Sync> Sharable for ReadLock<'l, T, R> {} +// Because both ReadLock and WriteLock hold references to RwLocks, they can't +// implement OwnedLockable + unsafe impl Lockable for &T { type Guard<'g> = T::Guard<'g> where Self: 'g; @@ -335,6 +349,8 @@ unsafe impl Sharable for &mut T {} unsafe impl OwnedLockable for &mut T {} +/// Implements `Lockable`, `Sharable`, and `OwnedLockable` for tuples +/// ex: `tuple_impls!(A B C, 0 1 2);` macro_rules! tuple_impls { ($($generic:ident)*, $($value:tt)*) => { unsafe impl<$($generic: Lockable,)*> Lockable for ($($generic,)*) { @@ -347,6 +363,8 @@ macro_rules! tuple_impls { } unsafe fn guard(&self) -> Self::Guard<'_> { + // It's weird that this works + // I don't think any other way of doing it compiles ($(self.$value.guard(),)*) } @@ -381,6 +399,8 @@ unsafe impl Lockable for [T; N] { } unsafe fn guard<'g>(&'g self) -> Self::Guard<'g> { + // The MaybeInit helper functions for arrays aren't stable yet, so + // we'll just have to implement it ourselves let mut guards = MaybeUninit::<[MaybeUninit>; N]>::uninit().assume_init(); for i in 0..N { guards[i].write(self[i].guard()); @@ -430,7 +450,8 @@ unsafe impl Lockable for Box<[T]> { } unsafe impl Lockable for Vec { - type Guard<'g> = Vec> where Self: 'g; + // There's no reason why I'd ever want to extend a list of lock guards + type Guard<'g> = Box<[T::Guard<'g>]> where Self: 'g; type ReadGuard<'g> = Box<[T::ReadGuard<'g>]> where Self: 'g; @@ -446,7 +467,7 @@ unsafe impl Lockable for Vec { guards.push(lock.guard()); } - guards + guards.into_boxed_slice() } unsafe fn read_guard(&self) -> Self::ReadGuard<'_> { @@ -459,6 +480,9 @@ unsafe impl Lockable for Vec { } } +// I'd make a generic impl> Lockable for I +// but I think that'd require sealing up this trait + unsafe impl Sharable for [T; N] {} unsafe impl Sharable for Box<[T]> {} unsafe impl Sharable for Vec {} diff --git a/src/mutex.rs b/src/mutex.rs index a3baa00..b30c2b1 100644 --- a/src/mutex.rs +++ b/src/mutex.rs @@ -49,8 +49,11 @@ pub struct MutexRef<'a, T: ?Sized + 'a, R: RawMutex>( /// /// [`lock`]: `Mutex::lock` /// [`try_lock`]: `Mutex::try_lock` + +// This is the most lifetime-intensive thing I've ever written. Can I graduate +// from borrow checker university now? pub struct MutexGuard<'a, 'key: 'a, T: ?Sized + 'a, Key: Keyable + 'key, R: RawMutex> { - mutex: MutexRef<'a, T, R>, + mutex: MutexRef<'a, T, R>, // this way we don't need to re-implement Drop thread_key: Key, _phantom: PhantomData<&'key ()>, } diff --git a/src/mutex/guard.rs b/src/mutex/guard.rs index 9e8e2e6..f9324ad 100644 --- a/src/mutex/guard.rs +++ b/src/mutex/guard.rs @@ -8,6 +8,9 @@ use crate::key::Keyable; use super::{Mutex, MutexGuard, MutexRef}; +// This makes things slightly easier because now you can use +// `println!("{guard}")` instead of `println!("{}", *guard)`. I wonder if I +// should implement some other standard library traits like this too? impl<'a, T: Debug + ?Sized + 'a, R: RawMutex> Debug for MutexRef<'a, T, R> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { Debug::fmt(&**self, f) @@ -63,11 +66,18 @@ impl<'a, T: ?Sized + 'a, R: RawMutex> AsMut for MutexRef<'a, T, R> { impl<'a, T: ?Sized + 'a, R: RawMutex> MutexRef<'a, T, R> { /// Creates a reference to the underlying data of a mutex without /// attempting to lock it or take ownership of the key. + + // This might be useful to export, because it makes it easier to express + // the concept of: "Get the data out the mutex but don't lock it or take + // the key". But it's also quite dangerous to drop. pub(crate) unsafe fn new(mutex: &'a Mutex) -> Self { Self(mutex, PhantomData) } } +// it's kinda annoying to re-implement some of this stuff on guards +// there's nothing i can do about that + impl<'a, 'key, T: Debug + ?Sized + 'a, Key: Keyable + 'key, R: RawMutex> Debug for MutexGuard<'a, 'key, T, Key, R> { diff --git a/src/mutex/mutex.rs b/src/mutex/mutex.rs index 52b6081..89dfef9 100644 --- a/src/mutex/mutex.rs +++ b/src/mutex/mutex.rs @@ -48,6 +48,7 @@ impl Debug for Mutex { // safety: this is just a try lock, and the value is dropped // immediately after, so there's no risk of blocking ourselves // or any other threads + // when i implement try_clone this code will become less unsafe if let Some(value) = unsafe { self.try_lock_no_key() } { f.debug_struct("Mutex").field("data", &&*value).finish() } else { @@ -77,6 +78,9 @@ impl From for Mutex { } } +// We don't need a `get_mut` because we don't have mutex poisoning. Hurray! +// This is safe because you can't have a mutable reference to the lock if it's +// locked. Being locked requires an immutable reference because of the guard. impl AsMut for Mutex { fn as_mut(&mut self) -> &mut T { self.get_mut() diff --git a/src/rwlock/write_lock.rs b/src/rwlock/write_lock.rs index 8501cd8..2cf73cd 100644 --- a/src/rwlock/write_lock.rs +++ b/src/rwlock/write_lock.rs @@ -11,7 +11,9 @@ impl<'l, T: ?Sized + Debug, R: RawRwLock> Debug for WriteLock<'l, T, R> { // safety: this is just a try lock, and the value is dropped // immediately after, so there's no risk of blocking ourselves // or any other threads - if let Some(value) = unsafe { self.try_lock_no_key() } { + // It makes zero sense to try using an exclusive lock for this, so this + // is the only time when WriteLock does a read. + if let Some(value) = unsafe { self.0.try_read_no_key() } { f.debug_struct("WriteLock").field("data", &&*value).finish() } else { struct LockedPlaceholder; @@ -75,11 +77,8 @@ impl<'l, T: ?Sized, R: RawRwLock> WriteLock<'l, T, R> { self.0.try_write(key) } - /// Attempts to create an exclusive lock without a key. Locking this - /// without exclusive access to the key is undefined behavior. - pub(crate) unsafe fn try_lock_no_key(&self) -> Option> { - self.0.try_write_no_key() - } + // There's no `try_lock_no_key`. Instead, `try_read_no_key` is called on + // the referenced `RwLock`. /// Immediately drops the guard, and consequently releases the exclusive /// lock. -- cgit v1.2.3