1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
//! Basic functionality for dealing with memory.
//!
//! Contains definition for VirtualAddress and PhysicalAddress,
//! and UserSpacePointer

use core::ops::{Deref, DerefMut};
use core::mem;
use core::fmt::{Formatter, Error, Display, Debug, LowerHex};
use crate::error::KernelError;
use failure::Backtrace;
use core::iter::Step;

use crate::paging::PAGE_SIZE;
use crate::utils::{align_down, align_up, div_ceil};

/// Rounds an address to its page address
#[inline] pub fn round_to_page(addr: usize) -> usize { align_down(addr, PAGE_SIZE) }

/// Rounds an address to the next page address except if its offset in that page is 0
#[inline] pub fn round_to_page_upper(addr: usize) -> usize { align_up(addr, PAGE_SIZE) }

/// Counts the number of pages `size` takes
#[inline] pub fn count_pages(size: usize) -> usize { div_ceil(size, PAGE_SIZE) }

/// Represents a Physical address
#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct PhysicalAddress(pub usize);

/// Represents a Virtual address
#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct VirtualAddress(pub usize);

impl VirtualAddress  {
    /// Gets the address as a `usize`.
    pub const fn addr(self) -> usize { self.0 }
}

impl PhysicalAddress {
    /// Gets the address as a `usize`.
    pub const fn addr(self) -> usize { self.0 }
}

impl ::core::ops::Add<usize> for VirtualAddress {
    type Output = VirtualAddress;
    /// Adding a length to an address gives another address
    fn add(self, other: usize) -> VirtualAddress { VirtualAddress(self.0 + other) }
}

impl ::core::ops::Add<usize> for PhysicalAddress {
    type Output = PhysicalAddress;
    /// Adding a length to an address gives another address
    fn add(self, other: usize) -> PhysicalAddress { PhysicalAddress(self.0 + other) }
}

impl ::core::ops::Add<VirtualAddress> for usize {
    type Output = VirtualAddress;
    /// Adding a length to an address gives another address
    fn add(self, other: VirtualAddress) -> VirtualAddress { VirtualAddress(self + other.0) }
}

impl ::core::ops::Add<PhysicalAddress> for usize {
    type Output = PhysicalAddress;
    /// Adding a length to an address gives another address
    fn add(self, other: PhysicalAddress) -> PhysicalAddress { PhysicalAddress(self + other.0) }
}

impl ::core::ops::Sub<usize> for VirtualAddress {
    type Output = VirtualAddress;
    /// Subtracting a length from an address gives another address
    fn sub(self, other: usize) -> VirtualAddress { VirtualAddress(self.0 - other) }
}

impl ::core::ops::Sub<usize> for PhysicalAddress {
    type Output = PhysicalAddress;
    /// Subtracting a length from an address gives another address
    fn sub(self, other: usize) -> PhysicalAddress { PhysicalAddress(self.0 - other) }
}

impl ::core::ops::AddAssign<usize> for VirtualAddress {
    /// Adding a length to an address gives another address
    fn add_assign(&mut self, rhs: usize) { self.0 += rhs }
}

impl ::core::ops::AddAssign<usize> for PhysicalAddress {
    /// Adding a length to an address gives another address
    fn add_assign(&mut self, rhs: usize) { self.0 += rhs }
}

impl ::core::ops::SubAssign<usize> for VirtualAddress {
    /// Subtracting a length from an address gives another address
    fn sub_assign(&mut self, rhs: usize) { self.0 -= rhs }
}

impl ::core::ops::SubAssign<usize> for PhysicalAddress {
    /// Subtracting a length from an address gives another address
    fn sub_assign(&mut self, rhs: usize) { self.0 -= rhs }
}

impl ::core::ops::Sub<VirtualAddress> for VirtualAddress {
    type Output = usize;
    /// Subtracting two address gives their distance
    fn sub(self, rhs: VirtualAddress) -> usize { self.0 - rhs.0 }
}

impl ::core::ops::Sub<PhysicalAddress> for PhysicalAddress {
    type Output = usize;
    /// Subtracting two address gives their distance
    fn sub(self, rhs: PhysicalAddress) -> usize { self.0 - rhs.0 }
}

impl Debug for PhysicalAddress {
    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
        write!(f, "P {:#010x}", self.0)
    }
}

impl Display for PhysicalAddress {
    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
        write!(f, "P {:#010x}", self.0)
    }
}

impl LowerHex for PhysicalAddress {
    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
        write!(f, "P {:#010x}", self.0)
    }
}

impl Debug for VirtualAddress {
    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
        write!(f, "V {:#010x}", self.0)
    }
}

impl Display for VirtualAddress {
    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
        write!(f, "V {:#010x}", self.0)
    }
}

impl LowerHex for VirtualAddress {
    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
        write!(f, "V {:#010x}", self.0)
    }
}

impl PhysicalAddress {
    /// Tries to add an offset to a PhysicalAddress, returning None if this would cause an overflow.
    ///
    /// This function does not return a KernelError, as it does not know whether the address or the size
    /// is the cause of the error.
    pub fn checked_add(self, rhs: usize) -> Option<PhysicalAddress> {
        self.0.checked_add(rhs).map(PhysicalAddress)
    }

    /// Checks that this address meets the given alignment.
    ///
    /// # Errors
    ///
    /// * `InvalidAddress`: `self` is not aligned to `alignment`.
    pub fn check_aligned_to(self, alignment: usize) -> Result<(), KernelError> {
        match self.0 % alignment {
            0 => Ok(()),
            _ => Err(KernelError::InvalidAddress { address: self.0, backtrace: Backtrace::new() })
        }
    }

    /// Rounds down to PAGE_SIZE.
    pub fn floor(self) -> PhysicalAddress { PhysicalAddress(round_to_page(self.0)) }

    /// Rounds up PAGE_SIZE.
    pub fn ceil(self) -> PhysicalAddress { PhysicalAddress(round_to_page_upper(self.0)) }
}

impl VirtualAddress {
    /// Tries to add an offset to a VirtualAddress, returning None if this would cause an overflow.
    ///
    /// This function does not return a KernelError, as it does not know whether the address or the size
    /// is the cause of the error.
    pub fn checked_add(self, rhs: usize) -> Option<VirtualAddress> {
        self.0.checked_add(rhs).map(VirtualAddress)
    }

    /// Checks that this address meets the given alignment.
    ///
    /// # Errors
    ///
    /// * `InvalidAddress`: `self` is not aligned to `alignment`.
    pub fn check_aligned_to(self, alignment: usize) -> Result<(), KernelError> {
        match self.0 % alignment {
            0 => Ok(()),
            _ => Err(KernelError::InvalidAddress { address: self.0, backtrace: Backtrace::new() })
        }
    }

    /// Rounds down to PAGE_SIZE.
    pub fn floor(self) -> VirtualAddress { VirtualAddress(round_to_page(self.0)) }

    /// Rounds up PAGE_SIZE.
    pub fn ceil(self) -> VirtualAddress { VirtualAddress(round_to_page_upper(self.0)) }
}

unsafe impl core::iter::Step for PhysicalAddress {
    fn steps_between(start: &Self, end: &Self) -> Option<usize> { Step::steps_between(&start.0, &end.0) }
    fn forward_checked(start: Self, count: usize) -> Option<Self> { Step::forward_checked(start.0, count).map(PhysicalAddress) }
    fn backward_checked(start: Self, count: usize) -> Option<Self> { Step::backward_checked(start.0, count).map(PhysicalAddress) }
}

unsafe impl core::iter::Step for VirtualAddress {
    fn steps_between(start: &Self, end: &Self) -> Option<usize> { Step::steps_between(&start.0, &end.0) }
    fn forward_checked(start: Self, count: usize) -> Option<Self> { Step::forward_checked(start.0, count).map(VirtualAddress) }
    fn backward_checked(start: Self, count: usize) -> Option<Self> { Step::backward_checked(start.0, count).map(VirtualAddress) }
}

// TODO: Properly implement UserSpacePtr
// BODY: UserSpacePtr right now is just a glorified, horribly unsafe reference.
// BODY: We should change its interface to provide a lot more safety. It should
// BODY: have a get function returning a Result<T, UserspaceError> that copies
// BODY: the underlying type, returning an error if the underlying pointer is
// BODY: invalid (at least if it points to kernel memory. Maybe also if it is
// BODY: not mapped?).
// BODY:
// BODY: We also have to handle unsized types. Maybe have a get_ref() which
// BODY: returns a &T? Maybe don't allow unsized types? I should check how
// BODY: Horizon/NX deals with it in sendsyncrequest (that's the only place that
// BODY: allows huge data afaik)
/// A pointer to read-only userspace memory. Prevents userspace from trying to
/// use a syscall on kernel memory.
#[repr(transparent)]
#[derive(Debug)]
pub struct UserSpacePtr<T: ?Sized>(pub *const T);

impl<T: ?Sized> Clone for UserSpacePtr<T> {
    fn clone(&self) -> UserSpacePtr<T> {
        UserSpacePtr(self.0)
    }
}
impl<T: ?Sized> Copy for UserSpacePtr<T> {}

impl<I> UserSpacePtr<[I]> {
    /// Forms a UserSpacePtr slice from a pointer and a length. The `len`
    /// argument is the number of **elements**, not the number of bytes.
    pub fn from_raw_parts(data: *const I, len: usize) -> UserSpacePtr<[I]> {
        unsafe {
            UserSpacePtr(mem::transmute(FatPtr {
                data: data as usize,
                len: len
            }))
        }
    }
}

impl<T: ?Sized> Deref for UserSpacePtr<T> {
    type Target = T;

    fn deref(&self) -> &T {
        unsafe {
            &*self.0
        }
    }
}

/// A pointer to read-write userspace memory. Prevents userspace from trying to
/// use a syscall on kernel memory.
#[repr(transparent)]
#[derive(Debug)]
pub struct UserSpacePtrMut<T: ?Sized>(pub *mut T);

impl<I> UserSpacePtrMut<[I]> {
    /// Forms a UserSpacePtrMut slice from a pointer and a length. The `len`
    /// argument is the number of **elements**, not the number of bytes.
    pub fn from_raw_parts_mut(data: *mut I, len: usize) -> UserSpacePtrMut<[I]> {
        unsafe {
            UserSpacePtrMut(mem::transmute(FatPtr {
                data: data as usize,
                len: len
            }))
        }
    }
}

impl<T: ?Sized> Clone for UserSpacePtrMut<T> {
    fn clone(&self) -> UserSpacePtrMut<T> {
        UserSpacePtrMut(self.0)
    }
}
impl<T: ?Sized> Copy for UserSpacePtrMut<T> {}

impl<T: ?Sized> Deref for UserSpacePtrMut<T> {
    type Target = T;

    fn deref(&self) -> &T {
        unsafe {
            &*self.0
        }
    }
}

impl<T: ?Sized> DerefMut for UserSpacePtrMut<T> {
    fn deref_mut(&mut self) -> &mut T {
        unsafe {
            &mut *self.0
        }
    }
}

impl<T> Into<UserSpacePtr<T>> for UserSpacePtrMut<T> {
    fn into(self) -> UserSpacePtr<T> {
        UserSpacePtr(self.0)
    }
}

// TODO: Replace FatPtr with a libcore type when one lands.
// BODY: Currently, libcore (well, the rust standard library in general) has no
// BODY: type to represent a DST, or a fat pointer, or whatever. So we have to
// BODY: define it ourselves. Now, having talked to the rust compiler devs, the
// BODY: FatPtr definition we're using is fine and is very very unlikely to
// BODY: change. However, we should still switch to a standard type when one is
// BODY: defined.
// BODY:
// BODY: Blocking on https://github.com/rust-lang/rfcs/pull/2580
/// Internal rust representation of a DST pointer.
///
/// Note that this is necessary due to the lack of a real DST type in the rust
/// standard library. See https://github.com/rust-lang/rfcs/pull/2580
#[repr(C)]
#[derive(Debug)]
pub struct FatPtr {
    /// A pointer to the underlying slice.
    pub data: usize,
    /// The length of the slice, in number of elements.
    pub len: usize,
}