spin/rwlock.rs
1//! A lock that provides data access to either one writer or many readers.
2
3use crate::{
4 atomic::{AtomicUsize, Ordering},
5 RelaxStrategy, Spin,
6};
7use core::{
8 cell::UnsafeCell,
9 fmt,
10 marker::PhantomData,
11 mem,
12 mem::ManuallyDrop,
13 ops::{Deref, DerefMut},
14};
15
16/// A lock that provides data access to either one writer or many readers.
17///
18/// This lock behaves in a similar manner to its namesake `std::sync::RwLock` but uses
19/// spinning for synchronisation instead. Unlike its namespace, this lock does not
20/// track lock poisoning.
21///
22/// This type of lock allows a number of readers or at most one writer at any
23/// point in time. The write portion of this lock typically allows modification
24/// of the underlying data (exclusive access) and the read portion of this lock
25/// typically allows for read-only access (shared access).
26///
27/// The type parameter `T` represents the data that this lock protects. It is
28/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
29/// allow concurrent access through readers. The RAII guards returned from the
30/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
31/// to allow access to the contained of the lock.
32///
33/// An [`RwLockUpgradableGuard`](RwLockUpgradableGuard) can be upgraded to a
34/// writable guard through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade)
35/// [`RwLockUpgradableGuard::try_upgrade`](RwLockUpgradableGuard::try_upgrade) functions.
36/// Writable or upgradeable guards can be downgraded through their respective `downgrade`
37/// functions.
38///
39/// Based on Facebook's
40/// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h).
41/// This implementation is unfair to writers - if the lock always has readers, then no writers will
42/// ever get a chance. Using an upgradeable lock guard can *somewhat* alleviate this issue as no
43/// new readers are allowed when an upgradeable guard is held, but upgradeable guards can be taken
44/// when there are existing readers. However if the lock is that highly contended and writes are
45/// crucial then this implementation may be a poor choice.
46///
47/// # Examples
48///
49/// ```
50/// use spin;
51///
52/// let lock = spin::RwLock::new(5);
53///
54/// // many reader locks can be held at once
55/// {
56/// let r1 = lock.read();
57/// let r2 = lock.read();
58/// assert_eq!(*r1, 5);
59/// assert_eq!(*r2, 5);
60/// } // read locks are dropped at this point
61///
62/// // only one write lock may be held, however
63/// {
64/// let mut w = lock.write();
65/// *w += 1;
66/// assert_eq!(*w, 6);
67/// } // write lock is dropped here
68/// ```
69pub struct RwLock<T: ?Sized, R = Spin> {
70 phantom: PhantomData<R>,
71 lock: AtomicUsize,
72 data: UnsafeCell<T>,
73}
74
75const READER: usize = 1 << 2;
76const UPGRADED: usize = 1 << 1;
77const WRITER: usize = 1;
78
79/// A guard that provides immutable data access.
80///
81/// When the guard falls out of scope it will decrement the read count,
82/// potentially releasing the lock.
83pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
84 lock: &'a AtomicUsize,
85 data: *const T,
86}
87
88/// A guard that provides mutable data access.
89///
90/// When the guard falls out of scope it will release the lock.
91pub struct RwLockWriteGuard<'a, T: 'a + ?Sized, R = Spin> {
92 phantom: PhantomData<R>,
93 inner: &'a RwLock<T, R>,
94 data: *mut T,
95}
96
97/// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`].
98///
99/// No writers or other upgradeable guards can exist while this is in scope. New reader
100/// creation is prevented (to alleviate writer starvation) but there may be existing readers
101/// when the lock is acquired.
102///
103/// When the guard falls out of scope it will release the lock.
104pub struct RwLockUpgradableGuard<'a, T: 'a + ?Sized, R = Spin> {
105 phantom: PhantomData<R>,
106 inner: &'a RwLock<T, R>,
107 data: *const T,
108}
109
110// Same unsafe impls as `std::sync::RwLock`
111unsafe impl<T: ?Sized + Send, R> Send for RwLock<T, R> {}
112unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLock<T, R> {}
113
114unsafe impl<T: ?Sized + Send + Sync, R> Send for RwLockWriteGuard<'_, T, R> {}
115unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLockWriteGuard<'_, T, R> {}
116
117unsafe impl<T: ?Sized + Sync> Send for RwLockReadGuard<'_, T> {}
118unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
119
120unsafe impl<T: ?Sized + Send + Sync, R> Send for RwLockUpgradableGuard<'_, T, R> {}
121unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLockUpgradableGuard<'_, T, R> {}
122
123impl<T, R> RwLock<T, R> {
124 /// Creates a new spinlock wrapping the supplied data.
125 ///
126 /// May be used statically:
127 ///
128 /// ```
129 /// use spin;
130 ///
131 /// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
132 ///
133 /// fn demo() {
134 /// let lock = RW_LOCK.read();
135 /// // do something with lock
136 /// drop(lock);
137 /// }
138 /// ```
139 #[inline]
140 pub const fn new(data: T) -> Self {
141 RwLock {
142 phantom: PhantomData,
143 lock: AtomicUsize::new(0),
144 data: UnsafeCell::new(data),
145 }
146 }
147
148 /// Consumes this `RwLock`, returning the underlying data.
149 #[inline]
150 pub fn into_inner(self) -> T {
151 // We know statically that there are no outstanding references to
152 // `self` so there's no need to lock.
153 let RwLock { data, .. } = self;
154 data.into_inner()
155 }
156 /// Returns a mutable pointer to the underying data.
157 ///
158 /// This is mostly meant to be used for applications which require manual unlocking, but where
159 /// storing both the lock and the pointer to the inner data gets inefficient.
160 ///
161 /// While this is safe, writing to the data is undefined behavior unless the current thread has
162 /// acquired a write lock, and reading requires either a read or write lock.
163 ///
164 /// # Example
165 /// ```
166 /// let lock = spin::RwLock::new(42);
167 ///
168 /// unsafe {
169 /// core::mem::forget(lock.write());
170 ///
171 /// assert_eq!(lock.as_mut_ptr().read(), 42);
172 /// lock.as_mut_ptr().write(58);
173 ///
174 /// lock.force_write_unlock();
175 /// }
176 ///
177 /// assert_eq!(*lock.read(), 58);
178 ///
179 /// ```
180 #[inline(always)]
181 pub fn as_mut_ptr(&self) -> *mut T {
182 self.data.get()
183 }
184}
185
186impl<T: ?Sized, R: RelaxStrategy> RwLock<T, R> {
187 /// Locks this rwlock with shared read access, blocking the current thread
188 /// until it can be acquired.
189 ///
190 /// The calling thread will be blocked until there are no more writers which
191 /// hold the lock. There may be other readers currently inside the lock when
192 /// this method returns. This method does not provide any guarantees with
193 /// respect to the ordering of whether contentious readers or writers will
194 /// acquire the lock first.
195 ///
196 /// Returns an RAII guard which will release this thread's shared access
197 /// once it is dropped.
198 ///
199 /// ```
200 /// let mylock = spin::RwLock::new(0);
201 /// {
202 /// let mut data = mylock.read();
203 /// // The lock is now locked and the data can be read
204 /// println!("{}", *data);
205 /// // The lock is dropped
206 /// }
207 /// ```
208 #[inline]
209 pub fn read(&self) -> RwLockReadGuard<T> {
210 loop {
211 match self.try_read() {
212 Some(guard) => return guard,
213 None => R::relax(),
214 }
215 }
216 }
217
218 /// Lock this rwlock with exclusive write access, blocking the current
219 /// thread until it can be acquired.
220 ///
221 /// This function will not return while other writers or other readers
222 /// currently have access to the lock.
223 ///
224 /// Returns an RAII guard which will drop the write access of this rwlock
225 /// when dropped.
226 ///
227 /// ```
228 /// let mylock = spin::RwLock::new(0);
229 /// {
230 /// let mut data = mylock.write();
231 /// // The lock is now locked and the data can be written
232 /// *data += 1;
233 /// // The lock is dropped
234 /// }
235 /// ```
236 #[inline]
237 pub fn write(&self) -> RwLockWriteGuard<T, R> {
238 loop {
239 match self.try_write_internal(false) {
240 Some(guard) => return guard,
241 None => R::relax(),
242 }
243 }
244 }
245
246 /// Obtain a readable lock guard that can later be upgraded to a writable lock guard.
247 /// Upgrades can be done through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) method.
248 #[inline]
249 pub fn upgradeable_read(&self) -> RwLockUpgradableGuard<T, R> {
250 loop {
251 match self.try_upgradeable_read() {
252 Some(guard) => return guard,
253 None => R::relax(),
254 }
255 }
256 }
257}
258
259impl<T: ?Sized, R> RwLock<T, R> {
260 // Acquire a read lock, returning the new lock value.
261 fn acquire_reader(&self) -> usize {
262 // An arbitrary cap that allows us to catch overflows long before they happen
263 const MAX_READERS: usize = core::usize::MAX / READER / 2;
264
265 let value = self.lock.fetch_add(READER, Ordering::Acquire);
266
267 if value > MAX_READERS * READER {
268 self.lock.fetch_sub(READER, Ordering::Relaxed);
269 panic!("Too many lock readers, cannot safely proceed");
270 } else {
271 value
272 }
273 }
274
275 /// Attempt to acquire this lock with shared read access.
276 ///
277 /// This function will never block and will return immediately if `read`
278 /// would otherwise succeed. Returns `Some` of an RAII guard which will
279 /// release the shared access of this thread when dropped, or `None` if the
280 /// access could not be granted. This method does not provide any
281 /// guarantees with respect to the ordering of whether contentious readers
282 /// or writers will acquire the lock first.
283 ///
284 /// ```
285 /// let mylock = spin::RwLock::new(0);
286 /// {
287 /// match mylock.try_read() {
288 /// Some(data) => {
289 /// // The lock is now locked and the data can be read
290 /// println!("{}", *data);
291 /// // The lock is dropped
292 /// },
293 /// None => (), // no cigar
294 /// };
295 /// }
296 /// ```
297 #[inline]
298 pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
299 let value = self.acquire_reader();
300
301 // We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held.
302 // This helps reduce writer starvation.
303 if value & (WRITER | UPGRADED) != 0 {
304 // Lock is taken, undo.
305 self.lock.fetch_sub(READER, Ordering::Release);
306 None
307 } else {
308 Some(RwLockReadGuard {
309 lock: &self.lock,
310 data: unsafe { &*self.data.get() },
311 })
312 }
313 }
314
315 /// Return the number of readers that currently hold the lock (including upgradable readers).
316 ///
317 /// # Safety
318 ///
319 /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
320 /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
321 pub fn reader_count(&self) -> usize {
322 let state = self.lock.load(Ordering::Relaxed);
323 state / READER + (state & UPGRADED) / UPGRADED
324 }
325
326 /// Return the number of writers that currently hold the lock.
327 ///
328 /// Because [`RwLock`] guarantees exclusive mutable access, this function may only return either `0` or `1`.
329 ///
330 /// # Safety
331 ///
332 /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
333 /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
334 pub fn writer_count(&self) -> usize {
335 (self.lock.load(Ordering::Relaxed) & WRITER) / WRITER
336 }
337
338 /// Force decrement the reader count.
339 ///
340 /// # Safety
341 ///
342 /// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s
343 /// live, or if called more times than `read` has been called, but can be
344 /// useful in FFI contexts where the caller doesn't know how to deal with
345 /// RAII. The underlying atomic operation uses `Ordering::Release`.
346 #[inline]
347 pub unsafe fn force_read_decrement(&self) {
348 debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0);
349 self.lock.fetch_sub(READER, Ordering::Release);
350 }
351
352 /// Force unlock exclusive write access.
353 ///
354 /// # Safety
355 ///
356 /// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s
357 /// live, or if called when there are current readers, but can be useful in
358 /// FFI contexts where the caller doesn't know how to deal with RAII. The
359 /// underlying atomic operation uses `Ordering::Release`.
360 #[inline]
361 pub unsafe fn force_write_unlock(&self) {
362 debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0);
363 self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
364 }
365
366 #[inline(always)]
367 fn try_write_internal(&self, strong: bool) -> Option<RwLockWriteGuard<T, R>> {
368 if compare_exchange(
369 &self.lock,
370 0,
371 WRITER,
372 Ordering::Acquire,
373 Ordering::Relaxed,
374 strong,
375 )
376 .is_ok()
377 {
378 Some(RwLockWriteGuard {
379 phantom: PhantomData,
380 inner: self,
381 data: unsafe { &mut *self.data.get() },
382 })
383 } else {
384 None
385 }
386 }
387
388 /// Attempt to lock this rwlock with exclusive write access.
389 ///
390 /// This function does not ever block, and it will return `None` if a call
391 /// to `write` would otherwise block. If successful, an RAII guard is
392 /// returned.
393 ///
394 /// ```
395 /// let mylock = spin::RwLock::new(0);
396 /// {
397 /// match mylock.try_write() {
398 /// Some(mut data) => {
399 /// // The lock is now locked and the data can be written
400 /// *data += 1;
401 /// // The lock is implicitly dropped
402 /// },
403 /// None => (), // no cigar
404 /// };
405 /// }
406 /// ```
407 #[inline]
408 pub fn try_write(&self) -> Option<RwLockWriteGuard<T, R>> {
409 self.try_write_internal(true)
410 }
411
412 /// Attempt to lock this rwlock with exclusive write access.
413 ///
414 /// Unlike [`RwLock::try_write`], this function is allowed to spuriously fail even when acquiring exclusive write access
415 /// would otherwise succeed, which can result in more efficient code on some platforms.
416 #[inline]
417 pub fn try_write_weak(&self) -> Option<RwLockWriteGuard<T, R>> {
418 self.try_write_internal(false)
419 }
420
421 /// Tries to obtain an upgradeable lock guard.
422 #[inline]
423 pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradableGuard<T, R>> {
424 if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 {
425 Some(RwLockUpgradableGuard {
426 phantom: PhantomData,
427 inner: self,
428 data: unsafe { &*self.data.get() },
429 })
430 } else {
431 // We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock.
432 // When they unlock, they will clear the bit.
433 None
434 }
435 }
436
437 /// Returns a mutable reference to the underlying data.
438 ///
439 /// Since this call borrows the `RwLock` mutably, no actual locking needs to
440 /// take place -- the mutable borrow statically guarantees no locks exist.
441 ///
442 /// # Examples
443 ///
444 /// ```
445 /// let mut lock = spin::RwLock::new(0);
446 /// *lock.get_mut() = 10;
447 /// assert_eq!(*lock.read(), 10);
448 /// ```
449 pub fn get_mut(&mut self) -> &mut T {
450 // We know statically that there are no other references to `self`, so
451 // there's no need to lock the inner lock.
452 unsafe { &mut *self.data.get() }
453 }
454}
455
456impl<T: ?Sized + fmt::Debug, R> fmt::Debug for RwLock<T, R> {
457 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
458 match self.try_read() {
459 Some(guard) => write!(f, "RwLock {{ data: ")
460 .and_then(|()| (&*guard).fmt(f))
461 .and_then(|()| write!(f, " }}")),
462 None => write!(f, "RwLock {{ <locked> }}"),
463 }
464 }
465}
466
467impl<T: ?Sized + Default, R> Default for RwLock<T, R> {
468 fn default() -> Self {
469 Self::new(Default::default())
470 }
471}
472
473impl<T, R> From<T> for RwLock<T, R> {
474 fn from(data: T) -> Self {
475 Self::new(data)
476 }
477}
478
479impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
480 /// Leak the lock guard, yielding a reference to the underlying data.
481 ///
482 /// Note that this function will permanently lock the original lock for all but reading locks.
483 ///
484 /// ```
485 /// let mylock = spin::RwLock::new(0);
486 ///
487 /// let data: &i32 = spin::RwLockReadGuard::leak(mylock.read());
488 ///
489 /// assert_eq!(*data, 0);
490 /// ```
491 #[inline]
492 pub fn leak(this: Self) -> &'rwlock T {
493 let this = ManuallyDrop::new(this);
494 // Safety: We know statically that only we are referencing data
495 unsafe { &*this.data }
496 }
497}
498
499impl<'rwlock, T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'rwlock, T> {
500 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
501 fmt::Debug::fmt(&**self, f)
502 }
503}
504
505impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'rwlock, T> {
506 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
507 fmt::Display::fmt(&**self, f)
508 }
509}
510
511impl<'rwlock, T: ?Sized, R: RelaxStrategy> RwLockUpgradableGuard<'rwlock, T, R> {
512 /// Upgrades an upgradeable lock guard to a writable lock guard.
513 ///
514 /// ```
515 /// let mylock = spin::RwLock::new(0);
516 ///
517 /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
518 /// let writable = upgradeable.upgrade();
519 /// ```
520 #[inline]
521 pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T, R> {
522 loop {
523 self = match self.try_upgrade_internal(false) {
524 Ok(guard) => return guard,
525 Err(e) => e,
526 };
527
528 R::relax();
529 }
530 }
531}
532
533impl<'rwlock, T: ?Sized, R> RwLockUpgradableGuard<'rwlock, T, R> {
534 #[inline(always)]
535 fn try_upgrade_internal(self, strong: bool) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> {
536 if compare_exchange(
537 &self.inner.lock,
538 UPGRADED,
539 WRITER,
540 Ordering::Acquire,
541 Ordering::Relaxed,
542 strong,
543 )
544 .is_ok()
545 {
546 let inner = self.inner;
547
548 // Forget the old guard so its destructor doesn't run (before mutably aliasing data below)
549 mem::forget(self);
550
551 // Upgrade successful
552 Ok(RwLockWriteGuard {
553 phantom: PhantomData,
554 inner,
555 data: unsafe { &mut *inner.data.get() },
556 })
557 } else {
558 Err(self)
559 }
560 }
561
562 /// Tries to upgrade an upgradeable lock guard to a writable lock guard.
563 ///
564 /// ```
565 /// let mylock = spin::RwLock::new(0);
566 /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
567 ///
568 /// match upgradeable.try_upgrade() {
569 /// Ok(writable) => /* upgrade successful - use writable lock guard */ (),
570 /// Err(upgradeable) => /* upgrade unsuccessful */ (),
571 /// };
572 /// ```
573 #[inline]
574 pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> {
575 self.try_upgrade_internal(true)
576 }
577
578 /// Tries to upgrade an upgradeable lock guard to a writable lock guard.
579 ///
580 /// Unlike [`RwLockUpgradableGuard::try_upgrade`], this function is allowed to spuriously fail even when upgrading
581 /// would otherwise succeed, which can result in more efficient code on some platforms.
582 #[inline]
583 pub fn try_upgrade_weak(self) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> {
584 self.try_upgrade_internal(false)
585 }
586
587 #[inline]
588 /// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
589 ///
590 /// ```
591 /// let mylock = spin::RwLock::new(1);
592 ///
593 /// let upgradeable = mylock.upgradeable_read();
594 /// assert!(mylock.try_read().is_none());
595 /// assert_eq!(*upgradeable, 1);
596 ///
597 /// let readable = upgradeable.downgrade(); // This is guaranteed not to spin
598 /// assert!(mylock.try_read().is_some());
599 /// assert_eq!(*readable, 1);
600 /// ```
601 pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
602 // Reserve the read guard for ourselves
603 self.inner.acquire_reader();
604
605 let inner = self.inner;
606
607 // Dropping self removes the UPGRADED bit
608 mem::drop(self);
609
610 RwLockReadGuard {
611 lock: &inner.lock,
612 data: unsafe { &*inner.data.get() },
613 }
614 }
615
616 /// Leak the lock guard, yielding a reference to the underlying data.
617 ///
618 /// Note that this function will permanently lock the original lock.
619 ///
620 /// ```
621 /// let mylock = spin::RwLock::new(0);
622 ///
623 /// let data: &i32 = spin::RwLockUpgradableGuard::leak(mylock.upgradeable_read());
624 ///
625 /// assert_eq!(*data, 0);
626 /// ```
627 #[inline]
628 pub fn leak(this: Self) -> &'rwlock T {
629 let this = ManuallyDrop::new(this);
630 // Safety: We know statically that only we are referencing data
631 unsafe { &*this.data }
632 }
633}
634
635impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockUpgradableGuard<'rwlock, T, R> {
636 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
637 fmt::Debug::fmt(&**self, f)
638 }
639}
640
641impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockUpgradableGuard<'rwlock, T, R> {
642 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
643 fmt::Display::fmt(&**self, f)
644 }
645}
646
647impl<'rwlock, T: ?Sized, R> RwLockWriteGuard<'rwlock, T, R> {
648 /// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
649 ///
650 /// ```
651 /// let mylock = spin::RwLock::new(0);
652 ///
653 /// let mut writable = mylock.write();
654 /// *writable = 1;
655 ///
656 /// let readable = writable.downgrade(); // This is guaranteed not to spin
657 /// # let readable_2 = mylock.try_read().unwrap();
658 /// assert_eq!(*readable, 1);
659 /// ```
660 #[inline]
661 pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
662 // Reserve the read guard for ourselves
663 self.inner.acquire_reader();
664
665 let inner = self.inner;
666
667 // Dropping self removes the UPGRADED bit
668 mem::drop(self);
669
670 RwLockReadGuard {
671 lock: &inner.lock,
672 data: unsafe { &*inner.data.get() },
673 }
674 }
675
676 /// Downgrades the writable lock guard to an upgradable, shared lock guard. Cannot fail and is guaranteed not to spin.
677 ///
678 /// ```
679 /// let mylock = spin::RwLock::new(0);
680 ///
681 /// let mut writable = mylock.write();
682 /// *writable = 1;
683 ///
684 /// let readable = writable.downgrade_to_upgradeable(); // This is guaranteed not to spin
685 /// assert_eq!(*readable, 1);
686 /// ```
687 #[inline]
688 pub fn downgrade_to_upgradeable(self) -> RwLockUpgradableGuard<'rwlock, T, R> {
689 debug_assert_eq!(
690 self.inner.lock.load(Ordering::Acquire) & (WRITER | UPGRADED),
691 WRITER
692 );
693
694 // Reserve the read guard for ourselves
695 self.inner.lock.store(UPGRADED, Ordering::Release);
696
697 let inner = self.inner;
698
699 // Dropping self removes the UPGRADED bit
700 mem::forget(self);
701
702 RwLockUpgradableGuard {
703 phantom: PhantomData,
704 inner,
705 data: unsafe { &*inner.data.get() },
706 }
707 }
708
709 /// Leak the lock guard, yielding a mutable reference to the underlying data.
710 ///
711 /// Note that this function will permanently lock the original lock.
712 ///
713 /// ```
714 /// let mylock = spin::RwLock::new(0);
715 ///
716 /// let data: &mut i32 = spin::RwLockWriteGuard::leak(mylock.write());
717 ///
718 /// *data = 1;
719 /// assert_eq!(*data, 1);
720 /// ```
721 #[inline]
722 pub fn leak(this: Self) -> &'rwlock mut T {
723 let mut this = ManuallyDrop::new(this);
724 // Safety: We know statically that only we are referencing data
725 unsafe { &mut *this.data }
726 }
727}
728
729impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockWriteGuard<'rwlock, T, R> {
730 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
731 fmt::Debug::fmt(&**self, f)
732 }
733}
734
735impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockWriteGuard<'rwlock, T, R> {
736 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
737 fmt::Display::fmt(&**self, f)
738 }
739}
740
741impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
742 type Target = T;
743
744 fn deref(&self) -> &T {
745 // Safety: We know statically that only we are referencing data
746 unsafe { &*self.data }
747 }
748}
749
750impl<'rwlock, T: ?Sized, R> Deref for RwLockUpgradableGuard<'rwlock, T, R> {
751 type Target = T;
752
753 fn deref(&self) -> &T {
754 // Safety: We know statically that only we are referencing data
755 unsafe { &*self.data }
756 }
757}
758
759impl<'rwlock, T: ?Sized, R> Deref for RwLockWriteGuard<'rwlock, T, R> {
760 type Target = T;
761
762 fn deref(&self) -> &T {
763 // Safety: We know statically that only we are referencing data
764 unsafe { &*self.data }
765 }
766}
767
768impl<'rwlock, T: ?Sized, R> DerefMut for RwLockWriteGuard<'rwlock, T, R> {
769 fn deref_mut(&mut self) -> &mut T {
770 // Safety: We know statically that only we are referencing data
771 unsafe { &mut *self.data }
772 }
773}
774
775impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
776 fn drop(&mut self) {
777 debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0);
778 self.lock.fetch_sub(READER, Ordering::Release);
779 }
780}
781
782impl<'rwlock, T: ?Sized, R> Drop for RwLockUpgradableGuard<'rwlock, T, R> {
783 fn drop(&mut self) {
784 debug_assert_eq!(
785 self.inner.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED),
786 UPGRADED
787 );
788 self.inner.lock.fetch_sub(UPGRADED, Ordering::AcqRel);
789 }
790}
791
792impl<'rwlock, T: ?Sized, R> Drop for RwLockWriteGuard<'rwlock, T, R> {
793 fn drop(&mut self) {
794 debug_assert_eq!(self.inner.lock.load(Ordering::Relaxed) & WRITER, WRITER);
795
796 // Writer is responsible for clearing both WRITER and UPGRADED bits.
797 // The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held.
798 self.inner
799 .lock
800 .fetch_and(!(WRITER | UPGRADED), Ordering::Release);
801 }
802}
803
804#[inline(always)]
805fn compare_exchange(
806 atomic: &AtomicUsize,
807 current: usize,
808 new: usize,
809 success: Ordering,
810 failure: Ordering,
811 strong: bool,
812) -> Result<usize, usize> {
813 if strong {
814 atomic.compare_exchange(current, new, success, failure)
815 } else {
816 atomic.compare_exchange_weak(current, new, success, failure)
817 }
818}
819
820#[cfg(feature = "lock_api")]
821unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLock for RwLock<(), R> {
822 type GuardMarker = lock_api_crate::GuardSend;
823
824 const INIT: Self = Self::new(());
825
826 #[inline(always)]
827 fn lock_exclusive(&self) {
828 // Prevent guard destructor running
829 core::mem::forget(self.write());
830 }
831
832 #[inline(always)]
833 fn try_lock_exclusive(&self) -> bool {
834 // Prevent guard destructor running
835 self.try_write().map(|g| core::mem::forget(g)).is_some()
836 }
837
838 #[inline(always)]
839 unsafe fn unlock_exclusive(&self) {
840 drop(RwLockWriteGuard {
841 inner: self,
842 data: &mut (),
843 phantom: PhantomData,
844 });
845 }
846
847 #[inline(always)]
848 fn lock_shared(&self) {
849 // Prevent guard destructor running
850 core::mem::forget(self.read());
851 }
852
853 #[inline(always)]
854 fn try_lock_shared(&self) -> bool {
855 // Prevent guard destructor running
856 self.try_read().map(|g| core::mem::forget(g)).is_some()
857 }
858
859 #[inline(always)]
860 unsafe fn unlock_shared(&self) {
861 drop(RwLockReadGuard {
862 lock: &self.lock,
863 data: &(),
864 });
865 }
866
867 #[inline(always)]
868 fn is_locked(&self) -> bool {
869 self.lock.load(Ordering::Relaxed) != 0
870 }
871}
872
873#[cfg(feature = "lock_api")]
874unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockUpgrade for RwLock<(), R> {
875 #[inline(always)]
876 fn lock_upgradable(&self) {
877 // Prevent guard destructor running
878 core::mem::forget(self.upgradeable_read());
879 }
880
881 #[inline(always)]
882 fn try_lock_upgradable(&self) -> bool {
883 // Prevent guard destructor running
884 self.try_upgradeable_read()
885 .map(|g| core::mem::forget(g))
886 .is_some()
887 }
888
889 #[inline(always)]
890 unsafe fn unlock_upgradable(&self) {
891 drop(RwLockUpgradableGuard {
892 inner: self,
893 data: &(),
894 phantom: PhantomData,
895 });
896 }
897
898 #[inline(always)]
899 unsafe fn upgrade(&self) {
900 let tmp_guard = RwLockUpgradableGuard {
901 inner: self,
902 data: &(),
903 phantom: PhantomData,
904 };
905 core::mem::forget(tmp_guard.upgrade());
906 }
907
908 #[inline(always)]
909 unsafe fn try_upgrade(&self) -> bool {
910 let tmp_guard = RwLockUpgradableGuard {
911 inner: self,
912 data: &(),
913 phantom: PhantomData,
914 };
915 tmp_guard
916 .try_upgrade()
917 .map(|g| core::mem::forget(g))
918 .is_ok()
919 }
920}
921
922#[cfg(feature = "lock_api")]
923unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockDowngrade for RwLock<(), R> {
924 unsafe fn downgrade(&self) {
925 let tmp_guard = RwLockWriteGuard {
926 inner: self,
927 data: &mut (),
928 phantom: PhantomData,
929 };
930 core::mem::forget(tmp_guard.downgrade());
931 }
932}
933
934#[cfg(feature = "lock_api1")]
935unsafe impl lock_api::RawRwLockUpgradeDowngrade for RwLock<()> {
936 unsafe fn downgrade_upgradable(&self) {
937 let tmp_guard = RwLockUpgradableGuard {
938 inner: self,
939 data: &(),
940 phantom: PhantomData,
941 };
942 core::mem::forget(tmp_guard.downgrade());
943 }
944
945 unsafe fn downgrade_to_upgradable(&self) {
946 let tmp_guard = RwLockWriteGuard {
947 inner: self,
948 data: &mut (),
949 phantom: PhantomData,
950 };
951 core::mem::forget(tmp_guard.downgrade_to_upgradeable());
952 }
953}
954
955#[cfg(test)]
956mod tests {
957 use std::prelude::v1::*;
958
959 use std::sync::atomic::{AtomicUsize, Ordering};
960 use std::sync::mpsc::channel;
961 use std::sync::Arc;
962 use std::thread;
963
964 type RwLock<T> = super::RwLock<T>;
965
966 #[derive(Eq, PartialEq, Debug)]
967 struct NonCopy(i32);
968
969 #[test]
970 fn smoke() {
971 let l = RwLock::new(());
972 drop(l.read());
973 drop(l.write());
974 drop((l.read(), l.read()));
975 drop(l.write());
976 }
977
978 // TODO: needs RNG
979 //#[test]
980 //fn frob() {
981 // static R: RwLock = RwLock::new();
982 // const N: usize = 10;
983 // const M: usize = 1000;
984 //
985 // let (tx, rx) = channel::<()>();
986 // for _ in 0..N {
987 // let tx = tx.clone();
988 // thread::spawn(move|| {
989 // let mut rng = rand::thread_rng();
990 // for _ in 0..M {
991 // if rng.gen_weighted_bool(N) {
992 // drop(R.write());
993 // } else {
994 // drop(R.read());
995 // }
996 // }
997 // drop(tx);
998 // });
999 // }
1000 // drop(tx);
1001 // let _ = rx.recv();
1002 // unsafe { R.destroy(); }
1003 //}
1004
1005 #[test]
1006 fn test_rw_arc() {
1007 let arc = Arc::new(RwLock::new(0));
1008 let arc2 = arc.clone();
1009 let (tx, rx) = channel();
1010
1011 let t = thread::spawn(move || {
1012 let mut lock = arc2.write();
1013 for _ in 0..10 {
1014 let tmp = *lock;
1015 *lock = -1;
1016 thread::yield_now();
1017 *lock = tmp + 1;
1018 }
1019 tx.send(()).unwrap();
1020 });
1021
1022 // Readers try to catch the writer in the act
1023 let mut children = Vec::new();
1024 for _ in 0..5 {
1025 let arc3 = arc.clone();
1026 children.push(thread::spawn(move || {
1027 let lock = arc3.read();
1028 assert!(*lock >= 0);
1029 }));
1030 }
1031
1032 // Wait for children to pass their asserts
1033 for r in children {
1034 assert!(r.join().is_ok());
1035 }
1036
1037 // Wait for writer to finish
1038 rx.recv().unwrap();
1039 let lock = arc.read();
1040 assert_eq!(*lock, 10);
1041
1042 assert!(t.join().is_ok());
1043 }
1044
1045 #[test]
1046 fn test_rw_access_in_unwind() {
1047 let arc = Arc::new(RwLock::new(1));
1048 let arc2 = arc.clone();
1049 let _ = thread::spawn(move || -> () {
1050 struct Unwinder {
1051 i: Arc<RwLock<isize>>,
1052 }
1053 impl Drop for Unwinder {
1054 fn drop(&mut self) {
1055 let mut lock = self.i.write();
1056 *lock += 1;
1057 }
1058 }
1059 let _u = Unwinder { i: arc2 };
1060 panic!();
1061 })
1062 .join();
1063 let lock = arc.read();
1064 assert_eq!(*lock, 2);
1065 }
1066
1067 #[test]
1068 fn test_rwlock_unsized() {
1069 let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
1070 {
1071 let b = &mut *rw.write();
1072 b[0] = 4;
1073 b[2] = 5;
1074 }
1075 let comp: &[i32] = &[4, 2, 5];
1076 assert_eq!(&*rw.read(), comp);
1077 }
1078
1079 #[test]
1080 fn test_rwlock_try_write() {
1081 use std::mem::drop;
1082
1083 let lock = RwLock::new(0isize);
1084 let read_guard = lock.read();
1085
1086 let write_result = lock.try_write();
1087 match write_result {
1088 None => (),
1089 Some(_) => assert!(
1090 false,
1091 "try_write should not succeed while read_guard is in scope"
1092 ),
1093 }
1094
1095 drop(read_guard);
1096 }
1097
1098 #[test]
1099 fn test_rw_try_read() {
1100 let m = RwLock::new(0);
1101 ::std::mem::forget(m.write());
1102 assert!(m.try_read().is_none());
1103 }
1104
1105 #[test]
1106 fn test_into_inner() {
1107 let m = RwLock::new(NonCopy(10));
1108 assert_eq!(m.into_inner(), NonCopy(10));
1109 }
1110
1111 #[test]
1112 fn test_into_inner_drop() {
1113 struct Foo(Arc<AtomicUsize>);
1114 impl Drop for Foo {
1115 fn drop(&mut self) {
1116 self.0.fetch_add(1, Ordering::SeqCst);
1117 }
1118 }
1119 let num_drops = Arc::new(AtomicUsize::new(0));
1120 let m = RwLock::new(Foo(num_drops.clone()));
1121 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
1122 {
1123 let _inner = m.into_inner();
1124 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
1125 }
1126 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
1127 }
1128
1129 #[test]
1130 fn test_force_read_decrement() {
1131 let m = RwLock::new(());
1132 ::std::mem::forget(m.read());
1133 ::std::mem::forget(m.read());
1134 ::std::mem::forget(m.read());
1135 assert!(m.try_write().is_none());
1136 unsafe {
1137 m.force_read_decrement();
1138 m.force_read_decrement();
1139 }
1140 assert!(m.try_write().is_none());
1141 unsafe {
1142 m.force_read_decrement();
1143 }
1144 assert!(m.try_write().is_some());
1145 }
1146
1147 #[test]
1148 fn test_force_write_unlock() {
1149 let m = RwLock::new(());
1150 ::std::mem::forget(m.write());
1151 assert!(m.try_read().is_none());
1152 unsafe {
1153 m.force_write_unlock();
1154 }
1155 assert!(m.try_read().is_some());
1156 }
1157
1158 #[test]
1159 fn test_upgrade_downgrade() {
1160 let m = RwLock::new(());
1161 {
1162 let _r = m.read();
1163 let upg = m.try_upgradeable_read().unwrap();
1164 assert!(m.try_read().is_none());
1165 assert!(m.try_write().is_none());
1166 assert!(upg.try_upgrade().is_err());
1167 }
1168 {
1169 let w = m.write();
1170 assert!(m.try_upgradeable_read().is_none());
1171 let _r = w.downgrade();
1172 assert!(m.try_upgradeable_read().is_some());
1173 assert!(m.try_read().is_some());
1174 assert!(m.try_write().is_none());
1175 }
1176 {
1177 let _u = m.upgradeable_read();
1178 assert!(m.try_upgradeable_read().is_none());
1179 }
1180
1181 assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok());
1182 }
1183}