summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Cargo.toml2
-rw-r--r--happylock.md205
-rw-r--r--package-lock.json6
-rw-r--r--package.json1
-rw-r--r--src/collection/boxed.rs1
-rw-r--r--src/collection/guard.rs6
-rw-r--r--src/collection/owned.rs2
-rw-r--r--src/collection/ref.rs1
-rw-r--r--src/key.rs1
-rw-r--r--src/mutex.rs17
-rw-r--r--src/mutex/guard.rs8
-rw-r--r--src/mutex/mutex.rs1
-rw-r--r--src/poisonable.rs26
-rw-r--r--src/poisonable/error.rs2
-rw-r--r--src/poisonable/flag.rs1
-rw-r--r--src/poisonable/guard.rs8
-rw-r--r--src/poisonable/poisonable.rs8
-rw-r--r--src/rwlock.rs31
-rw-r--r--src/rwlock/read_guard.rs8
-rw-r--r--src/rwlock/read_lock.rs1
-rw-r--r--src/rwlock/rwlock.rs1
-rw-r--r--src/rwlock/write_guard.rs8
-rw-r--r--src/rwlock/write_lock.rs1
23 files changed, 286 insertions, 60 deletions
diff --git a/Cargo.toml b/Cargo.toml
index 90a8e4f..59302c7 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "happylock"
-version = "0.4.1"
+version = "0.4.2"
authors = ["Mica White <botahamec@outlook.com>"]
edition = "2021"
rust-version = "1.82"
diff --git a/happylock.md b/happylock.md
index 0e250fe..973b819 100644
--- a/happylock.md
+++ b/happylock.md
@@ -2,6 +2,7 @@
marp: true
theme: gaia
class: invert
+author: Mica White
---
<!-- _class: lead invert -->
@@ -112,7 +113,7 @@ use happylock::{ThreadKey, Mutex};
fn main() {
// each thread can only have one thread key (that's why we unwrap)
- // ThreadKey is not Send, Sync, Copy, or Clone
+ // ThreadKey is not Send, Copy, or Clone
let key = ThreadKey::get().unwrap();
let mutex = Mutex::new(10);
@@ -153,6 +154,8 @@ fn main() {
}
```
+This `LockCollection` can be implemented simply by releasing the currently acquired locks and retrying on failure
+
---
## The Lockable API
@@ -266,7 +269,17 @@ Time Complexity: O(nlogn)
## Problem: Live-locking
-Although this library is able to successfully prevent deadlocks, livelocks may still be an issue. Imagine thread 1 gets resource 1, thread 2 gets resource 2, thread 1 realizes it can't get resource 2, thread 2 realizes it can't get resource 1, thread 1 drops resource 1, thread 2 drops resource 2, and then repeat forever. In practice, this situation probably wouldn't last forever. But it would be nice if this could be prevented somehow.
+Although this library is able to successfully prevent deadlocks, livelocks may still be an issue.
+
+1. Thread 1 locks mutex 1
+2. Thread 2 locks mutex 2
+3. Thread 1 tries to lock mutex 2 and fails
+4. Thread 2 tries to lock mutex 1 and fails
+5. Thread 1 releases mutex 1
+6. Thread 2 releases mutex 2
+7. Repeat
+
+This pattern will probably end eventually, but we should really avoid it, for performance reasons.
---
@@ -384,8 +397,8 @@ This is what we were trying to avoid earlier
This is what I used in HappyLock 0.1:
```rust
-struct ReadLock<'a, T(&'a RwLock<T>);
-struct WriteLock<'a, T(&'a RwLock<T>);
+struct ReadLock<'a, T>(&'a RwLock<T>);
+struct WriteLock<'a, T>(&'a RwLock<T>);
```
**Problem:** This can't be used inside of an `OwnedLockCollection`
@@ -413,7 +426,7 @@ unsafe trait Lockable {
---
-## Not every lock can be read doe
+## Not every lock can be read tho
```rust
// This trait is used to indicate that reading is actually useful
@@ -432,25 +445,6 @@ impl<L: Sharable> OwnedLockable<L> {
---
-## Missing Features
-
-- `Condvar`/`Barrier`
-- We probably don't need `OnceLock` or `LazyLock`
-- Standard Library Backend
-- Mutex poisoning
-- Support for `no_std`
-- Convenience methods: `lock_swap`, `lock_set`?
-- `try_lock_swap` doesn't need a `ThreadKey`
-- Going further: `LockCell` API (preemptive allocation)
-
----
-
-<!--_class: invert lead -->
-
-## What's next?
-
----
-
## Poisoning
```rust
@@ -473,50 +467,55 @@ Allows: `Poisonable<LockCollection>` and `LockCollection<Poisonable>`
---
-## OS Locks
+# `LockableGetMut` and `LockableIntoInner`
-- Using `parking_lot` makes the binary size much larger
-- Unfortunately, it's impossible to implement `RawLock` on the standard library lock primitives
-- Creating a new crate based on a fork of the standard library is hard
-- Solution: create a new library (`sys_locks`), which exposes raw locks from the operating system
-- This is more complicated than you might think
-
----
+```rust
+fn Mutex::<T>::get_mut(&mut self) -> &mut T // already exists in std
+// this is safe because a mutable reference means nobody else can access the lock
-## Expanding Cyclic Wait
+trait LockableGetMut: Lockable {
+ type Inner<'a>;
-> ... sometimes you need to lock an object to read its value and determine what should be locked next... is there a way to address it?
+ fn get_mut(&mut self) -> Self::Inner<'_>
+}
-```rust
-let guard = m1.lock(key);
-if *guard == true {
- let key = Mutex::unlock(m);
- let data = [&m1, &m2];
- let collection = LockCollection::try_new(data).unwrap();
- let guard = collection.lock(key);
+impl<A: LockableGetMut, B: LockableGetMut> LockableGetMut for (A, B) {
+ type Inner = (A::Inner<'a>, B::Inner<'b>);
- // m1 might no longer be true here...
+ fn get_mut(&mut self) -> Self::Inner<'_> {
+ (self.0.get_mut(), self.1.get_mut())
+ }
}
```
---
-## What I Really Want
+## Missing Features
-```txt
-ordered locks: m1, m2, m3
+- `Condvar`/`Barrier`
+- `OnceLock` or `LazyLock`
+- Standard Library Backend
+- Support for `no_std`
+- Convenience methods: `lock_swap`, `lock_set`?
+- `try_lock_swap` doesn't need a `ThreadKey`
+- Going further: `LockCell` API (preemptive allocation)
-if m1 is true
- lock m2 and keep m1 locked
-else
- skip m2 and lock m3
-```
+---
-We can specify lock orders using `OwnedLockCollection`
+<!--_class: invert lead -->
+
+## What's next?
+
+---
-Then we need an iterator over the collection to keep that ordering
-This will be hard to do with tuples (but might not be impossible)
+## OS Locks
+
+- Using `parking_lot` makes the binary size much larger
+- Unfortunately, it's impossible to implement `RawLock` on the standard library lock primitives
+- Creating a new crate based on a fork of the standard library is hard
+- Solution: create a new library (`sys_locks`), which exposes raw locks from the operating system
+- This is more complicated than you might think
---
@@ -618,6 +617,106 @@ A `Readonly` collection cannot be exclusively locked.
- can these even deadlock?
---
+## Expanding Cyclic Wait
+
+> ... sometimes you need to lock an object to read its value and determine what should be locked next... is there a way to address it?
+
+```rust
+let guard = m1.lock(key);
+if *guard == true {
+ let key = Mutex::unlock(m);
+ let data = [&m1, &m2];
+ let collection = LockCollection::try_new(data).unwrap();
+ let guard = collection.lock(key);
+
+ // m1 might no longer be true here...
+}
+```
+
+---
+
+## What I Really Want
+
+```txt
+ordered locks: m1, m2, m3
+
+if m1 is true
+ lock m2 and keep m1 locked
+else
+ skip m2 and lock m3
+```
+
+We can specify lock orders using `OwnedLockCollection`
+
+Then we need an iterator over the collection to keep that ordering
+
+This will be hard to do with tuples (but is not be impossible)
+
+---
+
+## Something like this
+
+```rust
+let key = ThreadKey::get().unwrap();
+let collection: OwnedLockCollection<(Vec<i32>, Vec<String>);
+let iterator: LockIterator<(Vec<i32>, Vec<String>)> = collection.locking_iter(key);
+let (guard, next: LockIterator<Vec<String>>) = collection.next();
+
+unsafe trait IntoLockIterator: Lockable {
+ type Next: Lockable;
+ type Rest;
+
+ unsafe fn next(&self) -> Self::Next; // must be called before `rest`
+ fn rest(&self) -> Self::Rest;
+}
+
+unsafe impl<A: Lockable, B: Lockable> IntoLockIterator for (A, B) {
+ type Next = A;
+ type Rest = B;
+
+ unsafe fn next(&self) -> Self::Next { self.0 }
+
+ unsafe fn rest(&self) -> Self::Rest { self.1 }
+}
+```
+
+---
+
+## Here are the helper functions we'll need
+
+```rust
+struct LockIterator<Current: IntoLockIterator, Rest: IntoLockIterator = ()>;
+
+impl<Current, Rest> LockIterator<Current, Rest> {
+ // locks the next item and moves on
+ fn next(self) -> (Current::Next::Guard, LockIterator<Current::Rest>);
+
+ // moves on without locking anything
+ fn skip(self) -> LockIterator<Current::Rest>;
+
+ // steps into the next item, allowing parts of it to be locked
+ // For example, if i have LockIterator<(Vec<String>, Vec<i32>)>, but only
+ // want to lock parts of the first Vec, then I can step into it,
+ // locking what i need to, and then exit.
+ // This is the first use of LockIterator's second generic parameter
+ fn step_into(self) -> LockIterator<Current::Next, Rest=Current::Rest>;
+
+ // Once I'm done with my step_into, I can leave and move on
+ fn exit(self) -> LockIterator<Rest>;
+}
+```
+
+---
+
+## A Quick Problem with this Approach
+
+We're going to be returning a lot of guards.
+
+The `ThreadKey` is held by the `LockIterator`.
+
+**How do we ensure that the `ThreadKey` is not used again until all of the guards are dropped?**
+
+---
<!--_class: invert lead -->
diff --git a/package-lock.json b/package-lock.json
new file mode 100644
index 0000000..0dc16d2
--- /dev/null
+++ b/package-lock.json
@@ -0,0 +1,6 @@
+{
+ "name": "happylock",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {}
+}
diff --git a/package.json b/package.json
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/package.json
@@ -0,0 +1 @@
+{}
diff --git a/src/collection/boxed.rs b/src/collection/boxed.rs
index 7a84b2a..98d7632 100644
--- a/src/collection/boxed.rs
+++ b/src/collection/boxed.rs
@@ -154,6 +154,7 @@ impl<T, L: AsRef<T>> AsRef<T> for BoxedLockCollection<L> {
}
}
+#[mutants::skip]
impl<L: Debug> Debug for BoxedLockCollection<L> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct(stringify!(BoxedLockCollection))
diff --git a/src/collection/guard.rs b/src/collection/guard.rs
index fc8df30..9412343 100644
--- a/src/collection/guard.rs
+++ b/src/collection/guard.rs
@@ -6,32 +6,38 @@ use crate::key::Keyable;
use super::LockGuard;
+#[mutants::skip] // it's hard to get two guards safely
impl<Guard: PartialEq, Key: Keyable> PartialEq for LockGuard<'_, Guard, Key> {
fn eq(&self, other: &Self) -> bool {
self.guard.eq(&other.guard)
}
}
+#[mutants::skip] // it's hard to get two guards safely
impl<Guard: PartialOrd, Key: Keyable> PartialOrd for LockGuard<'_, Guard, Key> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.guard.partial_cmp(&other.guard)
}
}
+#[mutants::skip] // it's hard to get two guards safely
impl<Guard: Eq, Key: Keyable> Eq for LockGuard<'_, Guard, Key> {}
+#[mutants::skip] // it's hard to get two guards safely
impl<Guard: Ord, Key: Keyable> Ord for LockGuard<'_, Guard, Key> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.guard.cmp(&other.guard)
}
}
+#[mutants::skip] // hashing involves RNG and is hard to test
impl<Guard: Hash, Key: Keyable> Hash for LockGuard<'_, Guard, Key> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.guard.hash(state)
}
}
+#[mutants::skip]
impl<Guard: Debug, Key: Keyable> Debug for LockGuard<'_, Guard, Key> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Debug::fmt(&**self, f)
diff --git a/src/collection/owned.rs b/src/collection/owned.rs
index 8e8afce..a96300d 100644
--- a/src/collection/owned.rs
+++ b/src/collection/owned.rs
@@ -7,6 +7,7 @@ use crate::Keyable;
use super::{utils, LockGuard, OwnedLockCollection};
+#[mutants::skip] // it's hard to test individual locks in an OwnedLockCollection
fn get_locks<L: Lockable>(data: &L) -> Vec<&dyn RawLock> {
let mut locks = Vec::new();
data.get_ptrs(&mut locks);
@@ -61,6 +62,7 @@ unsafe impl<L: Lockable> Lockable for OwnedLockCollection<L> {
where
Self: 'g;
+ #[mutants::skip] // It's hard to test lkocks in an OwnedLockCollection, because they're owned
fn get_ptrs<'a>(&'a self, ptrs: &mut Vec<&'a dyn RawLock>) {
self.data.get_ptrs(ptrs)
}
diff --git a/src/collection/ref.rs b/src/collection/ref.rs
index 2f4db20..512bdec 100644
--- a/src/collection/ref.rs
+++ b/src/collection/ref.rs
@@ -108,6 +108,7 @@ impl<T, L: AsRef<T>> AsRef<T> for RefLockCollection<'_, L> {
}
}
+#[mutants::skip]
impl<L: Debug> Debug for RefLockCollection<'_, L> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct(stringify!(RefLockCollection))
diff --git a/src/key.rs b/src/key.rs
index 768f09e..c7369be 100644
--- a/src/key.rs
+++ b/src/key.rs
@@ -43,6 +43,7 @@ unsafe impl Keyable for &mut ThreadKey {}
// Safety: a &ThreadKey is useless by design.
unsafe impl Sync for ThreadKey {}
+#[mutants::skip]
impl Debug for ThreadKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "ThreadKey")
diff --git a/src/mutex.rs b/src/mutex.rs
index 004e5d4..99d0981 100644
--- a/src/mutex.rs
+++ b/src/mutex.rs
@@ -161,7 +161,7 @@ pub struct MutexGuard<'a, 'key: 'a, T: ?Sized + 'a, Key: Keyable + 'key, R: RawM
#[cfg(test)]
mod tests {
- use crate::ThreadKey;
+ use crate::{LockCollection, ThreadKey};
use super::*;
@@ -199,6 +199,21 @@ mod tests {
}
#[test]
+ fn ord_works() {
+ let key = ThreadKey::get().unwrap();
+ let mutex1: crate::Mutex<_> = Mutex::new(1);
+ let mutex2: crate::Mutex<_> = Mutex::new(2);
+ let mutex3: crate::Mutex<_> = Mutex::new(2);
+ let collection = LockCollection::try_new((&mutex1, &mutex2, &mutex3)).unwrap();
+
+ let guard = collection.lock(key);
+ assert!(guard.0 < guard.1);
+ assert!(guard.1 > guard.0);
+ assert!(guard.1 == guard.2);
+ assert!(guard.0 != guard.2)
+ }
+
+ #[test]
fn dropping_guard_releases_mutex() {
let mut key = ThreadKey::get().unwrap();
let mutex: crate::Mutex<_> = Mutex::new("Hello, world!");
diff --git a/src/mutex/guard.rs b/src/mutex/guard.rs
index 0d35cf4..f7a01a4 100644
--- a/src/mutex/guard.rs
+++ b/src/mutex/guard.rs
@@ -33,12 +33,14 @@ impl<T: Ord + ?Sized, R: RawMutex> Ord for MutexRef<'_, T, R> {
}
}
+#[mutants::skip] // hashing involves RNG and is hard to test
impl<T: Hash + ?Sized, R: RawMutex> Hash for MutexRef<'_, T, R> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.deref().hash(state)
}
}
+#[mutants::skip]
impl<T: Debug + ?Sized, R: RawMutex> Debug for MutexRef<'_, T, R> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Debug::fmt(&**self, f)
@@ -103,14 +105,17 @@ impl<'a, T: ?Sized, R: RawMutex> MutexRef<'a, T, R> {
// it's kinda annoying to re-implement some of this stuff on guards
// there's nothing i can do about that
+#[mutants::skip] // it's hard to get two guards safely
impl<T: PartialEq + ?Sized, R: RawMutex, Key: Keyable> PartialEq for MutexGuard<'_, '_, T, Key, R> {
fn eq(&self, other: &Self) -> bool {
self.deref().eq(&**other)
}
}
+#[mutants::skip] // it's hard to get two guards safely
impl<T: Eq + ?Sized, R: RawMutex, Key: Keyable> Eq for MutexGuard<'_, '_, T, Key, R> {}
+#[mutants::skip] // it's hard to get two guards safely
impl<T: PartialOrd + ?Sized, R: RawMutex, Key: Keyable> PartialOrd
for MutexGuard<'_, '_, T, Key, R>
{
@@ -119,18 +124,21 @@ impl<T: PartialOrd + ?Sized, R: RawMutex, Key: Keyable> PartialOrd
}
}
+#[mutants::skip] // it's hard to get two guards safely
impl<T: Ord + ?Sized, R: RawMutex, Key: Keyable> Ord for MutexGuard<'_, '_, T, Key, R> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.deref().cmp(&**other)
}
}
+#[mutants::skip] // hashing involves RNG and is hard to test
impl<T: Hash + ?Sized, R: RawMutex, Key: Keyable> Hash for MutexGuard<'_, '_, T, Key, R> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.deref().hash(state)
}
}
+#[mutants::skip]
impl<T: Debug + ?Sized, Key: Keyable, R: RawMutex> Debug for MutexGuard<'_, '_, T, Key, R> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Debug::fmt(&**self, f)
diff --git a/src/mutex/mutex.rs b/src/mutex/mutex.rs
index 7a3f75c..5b838a2 100644
--- a/src/mutex/mutex.rs
+++ b/src/mutex/mutex.rs
@@ -129,6 +129,7 @@ impl<T, R: RawMutex> Mutex<T, R> {
}
}
+#[mutants::skip]
impl<T: ?Sized + Debug, R: RawMutex> Debug for Mutex<T, R> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// safety: this is just a try lock, and the value is dropped
diff --git a/src/poisonable.rs b/src/poisonable.rs
index da7758f..e577ce9 100644
--- a/src/poisonable.rs
+++ b/src/poisonable.rs
@@ -96,7 +96,7 @@ pub type TryLockPoisonableResult<'flag, 'key, G, Key> =
mod tests {
use super::*;
use crate::lockable::Lockable;
- use crate::{Mutex, ThreadKey};
+ use crate::{LockCollection, Mutex, ThreadKey};
#[test]
fn display_works() {
@@ -109,6 +109,24 @@ mod tests {
}
#[test]
+ fn ord_works() {
+ let key = ThreadKey::get().unwrap();
+ let lock1 = Poisonable::new(Mutex::new(1));
+ let lock2 = Poisonable::new(Mutex::new(3));
+ let lock3 = Poisonable::new(Mutex::new(3));
+ let collection = LockCollection::try_new((&lock1, &lock2, &lock3)).unwrap();
+
+ let guard = collection.lock(key);
+ let guard1 = guard.0.as_ref().unwrap();
+ let guard2 = guard.1.as_ref().unwrap();
+ let guard3 = guard.2.as_ref().unwrap();
+ assert!(guard1 < guard2);
+ assert!(guard2 > guard1);
+ assert!(guard2 == guard3);
+ assert!(guard1 != guard3);
+ }
+
+ #[test]
fn get_ptrs() {
let mutex = Mutex::new(5);
let poisonable = Poisonable::new(mutex);
@@ -118,4 +136,10 @@ mod tests {
assert_eq!(lock_ptrs.len(), 1);
assert!(std::ptr::addr_eq(lock_ptrs[0], &poisonable.inner));
}
+
+ #[test]
+ fn new_poisonable_is_not_poisoned() {
+ let mutex = Poisonable::new(Mutex::new(42));
+ assert!(!mutex.is_poisoned());
+ }
}
diff --git a/src/poisonable/error.rs b/src/poisonable/error.rs
index d543294..9721ce4 100644
--- a/src/poisonable/error.rs
+++ b/src/poisonable/error.rs
@@ -3,6 +3,7 @@ use std::error::Error;
use super::{PoisonError, PoisonGuard, TryLockPoisonableError};
+#[mutants::skip]
impl<Guard> fmt::Debug for PoisonError<Guard> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PoisonError").finish_non_exhaustive()
@@ -149,6 +150,7 @@ impl<Guard> PoisonError<Guard> {
}
}
+#[mutants::skip]
impl<G, Key> fmt::Debug for TryLockPoisonableError<'_, '_, G, Key> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
diff --git a/src/poisonable/flag.rs b/src/poisonable/flag.rs
index 7714f5f..6b567c8 100644
--- a/src/poisonable/flag.rs
+++ b/src/poisonable/flag.rs
@@ -28,6 +28,7 @@ impl PoisonFlag {
Self()
}
+ #[mutants::skip] // None of the tests have panic = "abort", so this can't be tested
pub fn is_poisoned(&self) -> bool {
false
}
diff --git a/src/poisonable/guard.rs b/src/poisonable/guard.rs
index 97d1c60..36566f5 100644
--- a/src/poisonable/guard.rs
+++ b/src/poisonable/guard.rs
@@ -48,12 +48,14 @@ impl<Guard: Ord> Ord for PoisonRef<'_, Guard> {
}
}
+#[mutants::skip] // hashing involves RNG and is hard to test
impl<Guard: Hash> Hash for PoisonRef<'_, Guard> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.guard.hash(state)
}
}
+#[mutants::skip]
impl<Guard: Debug> Debug for PoisonRef<'_, Guard> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Debug::fmt(&**self, f)
@@ -92,32 +94,38 @@ impl<Guard> AsMut<Guard> for PoisonRef<'_, Guard> {
}
}
+#[mutants::skip] // it's hard to get two guards safely
impl<Guard: PartialEq, Key: Keyable> PartialEq for PoisonGuard<'_, '_, Guard, Key> {
fn eq(&self, other: &Self) -> bool {
self.guard.eq(&other.guard)
}
}
+#[mutants::skip] // it's hard to get two guards safely
impl<Guard: PartialOrd, Key: Keyable> PartialOrd for PoisonGuard<'_, '_, Guard, Key> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.guard.partial_cmp(&other.guard)
}
}
+#[mutants::skip] // it's hard to get two guards safely
impl<Guard: Eq, Key: Keyable> Eq for PoisonGuard<'_, '_, Guard, Key> {}
+#[mutants::skip] // it's hard to get two guards safely
impl<Guard: Ord, Key: Keyable> Ord for PoisonGuard<'_, '_, Guard, Key> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.guard.cmp(&other.guard)
}
}
+#[mutants::skip] // hashing involves RNG and is hard to test
impl<Guard: Hash, Key: Keyable> Hash for PoisonGuard<'_, '_, Guard, Key> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.guard.hash(state)
}
}
+#[mutants::skip]
impl<Guard: Debug, Key: Keyable> Debug for PoisonGuard<'_, '_, Guard, Key> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Debug::fmt(&self.guard, f)
diff --git a/src/poisonable/poisonable.rs b/src/poisonable/poisonable.rs
index c6da89d..0bc2b03 100644
--- a/src/poisonable/poisonable.rs
+++ b/src/poisonable/poisonable.rs
@@ -56,9 +56,9 @@ unsafe impl<L: Lockable> Lockable for Poisonable<L> {
let ref_guard = PoisonRef::new(&self.poisoned, self.inner.guard());
if self.is_poisoned() {
- Ok(ref_guard)
- } else {
Err(PoisonError::new(ref_guard))
+ } else {
+ Ok(ref_guard)
}
}
}
@@ -73,9 +73,9 @@ unsafe impl<L: Sharable> Sharable for Poisonable<L> {
let ref_guard = PoisonRef::new(&self.poisoned, self.inner.read_guard());
if self.is_poisoned() {
- Ok(ref_guard)
- } else {
Err(PoisonError::new(ref_guard))
+ } else {
+ Ok(ref_guard)
}
}
}
diff --git a/src/rwlock.rs b/src/rwlock.rs
index ea7d3f0..f78e648 100644
--- a/src/rwlock.rs
+++ b/src/rwlock.rs
@@ -117,6 +117,7 @@ pub struct RwLockWriteGuard<'a, 'key: 'a, T: ?Sized, Key: Keyable + 'key, R: Raw
#[cfg(test)]
mod tests {
use crate::lockable::Lockable;
+ use crate::LockCollection;
use crate::RwLock;
use crate::ThreadKey;
@@ -234,6 +235,21 @@ mod tests {
}
#[test]
+ fn write_ord() {
+ let key = ThreadKey::get().unwrap();
+ let lock1: crate::RwLock<_> = RwLock::new(1);
+ let lock2: crate::RwLock<_> = RwLock::new(5);
+ let lock3: crate::RwLock<_> = RwLock::new(5);
+ let collection = LockCollection::try_new((&lock1, &lock2, &lock3)).unwrap();
+ let guard = collection.lock(key);
+
+ assert!(guard.0 < guard.1);
+ assert!(guard.1 > guard.0);
+ assert!(guard.1 == guard.2);
+ assert!(guard.0 != guard.2);
+ }
+
+ #[test]
fn read_ref_display_works() {
let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
let guard = unsafe { lock.try_read_no_key().unwrap() };
@@ -248,6 +264,21 @@ mod tests {
}
#[test]
+ fn read_ord() {
+ let key = ThreadKey::get().unwrap();
+ let lock1: crate::RwLock<_> = RwLock::new(1);
+ let lock2: crate::RwLock<_> = RwLock::new(5);
+ let lock3: crate::RwLock<_> = RwLock::new(5);
+ let collection = LockCollection::try_new((&lock1, &lock2, &lock3)).unwrap();
+ let guard = collection.read(key);
+
+ assert!(guard.0 < guard.1);
+ assert!(guard.1 > guard.0);
+ assert!(guard.1 == guard.2);
+ assert!(guard.0 != guard.2);
+ }
+
+ #[test]
fn dropping_read_ref_releases_rwlock() {
let lock: crate::RwLock<_> = RwLock::new("Hello, world!");
diff --git a/src/rwlock/read_guard.rs b/src/rwlock/read_guard.rs
index 8678a8e..2195e44 100644
--- a/src/rwlock/read_guard.rs
+++ b/src/rwlock/read_guard.rs
@@ -33,12 +33,14 @@ impl<T: Ord + ?Sized, R: RawRwLock> Ord for RwLockReadRef<'_, T, R> {
}
}
+#[mutants::skip] // hashing involves PRNG and is hard to test
impl<T: Hash + ?Sized, R: RawRwLock> Hash for RwLockReadRef<'_, T, R> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.deref().hash(state)
}
}
+#[mutants::skip]
impl<T: Debug + ?Sized, R: RawRwLock> Debug for RwLockReadRef<'_, T, R> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Debug::fmt(&**self, f)
@@ -85,6 +87,7 @@ impl<'a, T: ?Sized, R: RawRwLock> RwLockReadRef<'a, T, R> {
}
}
+#[mutants::skip] // it's hard to get two read guards safely
impl<T: PartialEq + ?Sized, R: RawRwLock, Key: Keyable> PartialEq
for RwLockReadGuard<'_, '_, T, Key, R>
{
@@ -93,8 +96,10 @@ impl<T: PartialEq + ?Sized, R: RawRwLock, Key: Keyable> PartialEq
}
}
+#[mutants::skip] // it's hard to get two read guards safely
impl<T: Eq + ?Sized, R: RawRwLock, Key: Keyable> Eq for RwLockReadGuard<'_, '_, T, Key, R> {}
+#[mutants::skip] // it's hard to get two read guards safely
impl<T: PartialOrd + ?Sized, R: RawRwLock, Key: Keyable> PartialOrd
for RwLockReadGuard<'_, '_, T, Key, R>
{
@@ -103,18 +108,21 @@ impl<T: PartialOrd + ?Sized, R: RawRwLock, Key: Keyable> PartialOrd
}
}
+#[mutants::skip] // it's hard to get two read guards safely
impl<T: Ord + ?Sized, R: RawRwLock, Key: Keyable> Ord for RwLockReadGuard<'_, '_, T, Key, R> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.deref().cmp(&**other)
}
}
+#[mutants::skip] // hashing involves PRNG and is hard to test
impl<T: Hash + ?Sized, R: RawRwLock, Key: Keyable> Hash for RwLockReadGuard<'_, '_, T, Key, R> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.deref().hash(state)
}
}
+#[mutants::skip]
impl<T: Debug + ?Sized, Key: Keyable, R: RawRwLock> Debug for RwLockReadGuard<'_, '_, T, Key, R> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Debug::fmt(&**self, f)
diff --git a/src/rwlock/read_lock.rs b/src/rwlock/read_lock.rs
index ae593e2..5ac0bbb 100644
--- a/src/rwlock/read_lock.rs
+++ b/src/rwlock/read_lock.rs
@@ -33,6 +33,7 @@ unsafe impl<T: Send, R: RawRwLock + Send + Sync> Sharable for ReadLock<'_, T, R>
}
}
+#[mutants::skip]
impl<T: ?Sized + Debug, R: RawRwLock> Debug for ReadLock<'_, T, R> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// safety: this is just a try lock, and the value is dropped
diff --git a/src/rwlock/rwlock.rs b/src/rwlock/rwlock.rs
index a249675..7a105d7 100644
--- a/src/rwlock/rwlock.rs
+++ b/src/rwlock/rwlock.rs
@@ -140,6 +140,7 @@ impl<T, R: RawRwLock> RwLock<T, R> {
}
}
+#[mutants::skip]
impl<T: ?Sized + Debug, R: RawRwLock> Debug for RwLock<T, R> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// safety: this is just a try lock, and the value is dropped
diff --git a/src/rwlock/write_guard.rs b/src/rwlock/write_guard.rs
index aec3d3d..ff559b8 100644
--- a/src/rwlock/write_guard.rs
+++ b/src/rwlock/write_guard.rs
@@ -33,12 +33,14 @@ impl<T: Ord + ?Sized, R: RawRwLock> Ord for RwLockWriteRef<'_, T, R> {
}
}
+#[mutants::skip] // hashing involves PRNG and is difficult to test
impl<T: Hash + ?Sized, R: RawRwLock> Hash for RwLockWriteRef<'_, T, R> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.deref().hash(state)
}
}
+#[mutants::skip]
impl<T: Debug + ?Sized, R: RawRwLock> Debug for RwLockWriteRef<'_, T, R> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Debug::fmt(&**self, f)
@@ -100,6 +102,7 @@ impl<'a, T: ?Sized + 'a, R: RawRwLock> RwLockWriteRef<'a, T, R> {
}
}
+#[mutants::skip] // it's hard to get two read guards safely
impl<T: PartialEq + ?Sized, R: RawRwLock, Key: Keyable> PartialEq
for RwLockWriteGuard<'_, '_, T, Key, R>
{
@@ -108,8 +111,10 @@ impl<T: PartialEq + ?Sized, R: RawRwLock, Key: Keyable> PartialEq
}
}
+#[mutants::skip] // it's hard to get two read guards safely
impl<T: Eq + ?Sized, R: RawRwLock, Key: Keyable> Eq for RwLockWriteGuard<'_, '_, T, Key, R> {}
+#[mutants::skip] // it's hard to get two read guards safely
impl<T: PartialOrd + ?Sized, R: RawRwLock, Key: Keyable> PartialOrd
for RwLockWriteGuard<'_, '_, T, Key, R>
{
@@ -118,18 +123,21 @@ impl<T: PartialOrd + ?Sized, R: RawRwLock, Key: Keyable> PartialOrd
}
}
+#[mutants::skip] // it's hard to get two read guards safely
impl<T: Ord + ?Sized, R: RawRwLock, Key: Keyable> Ord for RwLockWriteGuard<'_, '_, T, Key, R> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.deref().cmp(&**other)
}
}
+#[mutants::skip] // hashing involves PRNG and is difficult to test
impl<T: Hash + ?Sized, R: RawRwLock, Key: Keyable> Hash for RwLockWriteGuard<'_, '_, T, Key, R> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.deref().hash(state)
}
}
+#[mutants::skip]
impl<T: Debug + ?Sized, Key: Keyable, R: RawRwLock> Debug for RwLockWriteGuard<'_, '_, T, Key, R> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
Debug::fmt(&**self, f)
diff --git a/src/rwlock/write_lock.rs b/src/rwlock/write_lock.rs
index ff00c06..443fbcd 100644
--- a/src/rwlock/write_lock.rs
+++ b/src/rwlock/write_lock.rs
@@ -25,6 +25,7 @@ unsafe impl<T: Send, R: RawRwLock + Send + Sync> Lockable for WriteLock<'_, T, R
// Technically, the exclusive locks can also be shared, but there's currently
// no way to express that. I don't think I want to ever express that.
+#[mutants::skip]
impl<T: ?Sized + Debug, R: RawRwLock> Debug for WriteLock<'_, T, R> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// safety: this is just a try lock, and the value is dropped