1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
//! Lock that panics when used in a IRQ context
//!
//! See the [sync] module documentation.
//!
//! [sync]: crate::sync

use core::fmt;

pub use spin::MutexGuard as SpinLockGuard;

/// This type provides mutual exclusion based on spinning.
/// It will panic if used in the context of an interrupt.
///
/// # Description
///
/// The behaviour of these locks is similar to `std::sync::Mutex`. they
/// differ on the following:
///
/// - The lock will not be poisoned in case of failure;
///
/// # Simple examples
///
/// ```
/// use crate::sync::SpinLock;
/// let spin_lock = SpinLock::new(0);
///
/// // Modify the data
/// {
///     let mut data = spin_lock.lock();
///     *data = 2;
/// }
///
/// // Read the data
/// let answer =
/// {
///     let data = spin_lock.lock();
///     *data
/// };
///
/// assert_eq!(answer, 2);
/// ```
///
/// # Thread-safety example
///
/// ```
/// use crate::sync::SpinLock;
/// use std::sync::{Arc, Barrier};
///
/// let numthreads = 1000;
/// let spin_lock = Arc::new(SpinLock::new(0));
///
/// // We use a barrier to ensure the readout happens after all writing
/// let barrier = Arc::new(Barrier::new(numthreads + 1));
///
/// for _ in (0..numthreads)
/// {
///     let my_barrier = barrier.clone();
///     let my_lock = spin_lock.clone();
///     std::thread::spawn(move||
///     {
///         let mut guard = my_lock.lock();
///         *guard += 1;
///
///         // Release the lock to prevent a deadlock
///         drop(guard);
///         my_barrier.wait();
///     });
/// }
///
/// barrier.wait();
///
/// let answer = { *spin_lock.lock() };
/// assert_eq!(answer, numthreads);
/// ```
#[repr(transparent)]
pub struct SpinLock<T: ?Sized>(spin::Mutex<T>);

impl<T> SpinLock<T> {
    /// Creates a new spinlock wrapping the supplied data.
    ///
    /// May be used statically:
    ///
    /// ```
    /// use crate::sync::SpinLock;
    ///
    /// static SPINLOCK: SpinLock<()> = SpinLock::new(());
    ///
    /// fn demo() {
    ///     let lock = SPINLOCK.lock();
    ///     // do something with lock
    ///     drop(lock);
    /// }
    /// ```
    pub const fn new(data: T) -> SpinLock<T> {
        SpinLock(spin::Mutex::new(data))
    }

    /// Consumes this spinlock, returning the underlying data.
    pub fn into_inner(self) -> T {
        self.0.into_inner()
    }
}

impl<T: ?Sized> SpinLock<T> {
    /// Locks the spinlock and returns a guard.
    ///
    /// The returned value may be dereferenced for data access
    /// and the lock will be dropped when the guard falls out of scope.
    ///
    /// Panics if called in an interrupt context.
    ///
    /// ```
    /// let mylock = crate::sync::SpinLock::new(0);
    /// {
    ///     let mut data = mylock.lock();
    ///     // The lock is now locked and the data can be accessed
    ///     *data += 1;
    ///     // The lock is implicitly dropped
    /// }
    ///
    /// ```
    pub fn lock(&self) -> SpinLockGuard<T> {
        use core::sync::atomic::Ordering;
        use crate::cpu_locals::ARE_CPU_LOCALS_INITIALIZED_YET;
        use crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT;
        use super::INTERRUPT_DISARM;
        if !INTERRUPT_DISARM.load(Ordering::SeqCst) && ARE_CPU_LOCALS_INITIALIZED_YET.load(Ordering::SeqCst) && INSIDE_INTERRUPT_COUNT.load(Ordering::SeqCst) != 0 {
            panic!("\
                You have attempted to lock a spinlock in interrupt context. \
                This is most likely a design flaw. \
                See documentation of the sync module.");
        }
        self.0.lock()
    }

    /// Force unlock the spinlock. If the lock isn't held, this is a no-op.
    ///
    /// # Safety
    ///
    /// This is *extremely* unsafe if the lock is not held by the current
    /// thread. However, this can be useful in some instances for exposing the
    /// lock to FFI that doesn't know how to deal with RAII.
    pub unsafe fn force_unlock(&self) {
        self.0.force_unlock()
    }

    /// Tries to lock the spinlock. If it is already locked, it will return None. Otherwise it returns
    /// a guard within Some.
    pub fn try_lock(&self) -> Option<SpinLockGuard<T>> {
        use core::sync::atomic::Ordering;
        if crate::i386::interrupt_service_routines::INSIDE_INTERRUPT_COUNT.load(Ordering::SeqCst) != 0 {
            panic!("\
                You have attempted to lock a spinlock in interrupt context. \
                This is most likely a design flaw. \
                See documentation of the sync module.");
        }
        self.0.try_lock()
    }
}

impl<T: ?Sized + fmt::Debug> fmt::Debug for SpinLock<T> {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        self.0.fmt(f)
    }
}

impl<T: ?Sized + Default> Default for SpinLock<T> {
    fn default() -> SpinLock<T> {
        Self::new(Default::default())
    }
}