1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
//! The management of kernel memory
//!
//! ```
//! j-------------------------------j j---------------------j
//! |        Process Memory         | |    Kernel Memory    |
//! j-------------------------------j j---------------------j
//!                 |                            |
//!     j-----------------------j                |
//!     | Userspace Bookkeeping |                |
//!     j-----------------------j                |
//!                 |                            |
//! j--------------------------------+----------------~-----j
//! |           User Land            |   Kernel Land  | RTL |
//! j--------------------------------+----------------~-----j
//!                         Page tables
//! ```
//!
//! We choose to separate UserLand and KernelLand + RecursiveTablesLand memory management.
//! This way we can allow concurrent access on different lands, and modifying the kernel memory
//! directly accesses the active page tables, without need to lock the ProcessStruct.
//!
//! This solves the problem of accessing the page tables in an early state, where there is no
//! current process yet.

use super::lands::{KernelLand, RecursiveTablesLand, VirtualSpaceLand};
use super::arch::{PAGE_SIZE, ActiveHierarchy};
use super::hierarchical_table::{TableHierarchy, PageState};
use super::MappingAccessRights;
use crate::mem::{VirtualAddress, PhysicalAddress};
use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator, FrameAllocatorTrait,
                      mark_frame_bootstrap_allocated};
use crate::sync::{SpinLockIRQ, SpinLockIRQGuard};
use crate::error::KernelError;
use failure::Backtrace;

/// A struct that acts on KernelLand and RecursiveTablesLand.
///
/// Always modifies the ACTIVE_PAGE_TABLES.
/// When switching to a new set of page tables in a process switch, the modifications will be copied
/// to the set just before switching to it.
///
/// Because of this mechanism we do not permit modifying KernelLand in other tables
/// than the currently active ones.
#[derive(Debug)]
pub struct KernelMemory {
    /// The currently active page tables.
    tables: ActiveHierarchy
}

/// A mutex protecting the KernelMemory manager.
///
/// This mutex is independent from the one protecting
/// UserLand memory, and both lands can be modified concurrently thanks to each manager
/// not observing the other lands.
pub static KERNEL_MEMORY: SpinLockIRQ<KernelMemory> = SpinLockIRQ::new(KernelMemory { tables: ActiveHierarchy });

/// Locks the KERNEL_MEMORY
pub fn get_kernel_memory() -> SpinLockIRQGuard<'static, KernelMemory> { KERNEL_MEMORY.lock() }

impl KernelMemory {

    /// Finds a hole in the virtual space at least 'length' long, and respecting alignment.
    pub fn find_virtual_space_aligned(&mut self, length: usize, alignment: usize) -> Result<VirtualAddress, KernelError> {
        match self.tables.find_available_virtual_space_aligned(length, KernelLand::start_addr(), KernelLand::end_addr(), alignment) {
            Some(addr) => Ok(addr),
            None => Err(KernelError::VirtualMemoryExhaustion { backtrace: Backtrace::new() })
        }
    }

    /// Finds a hole in the virtual space at least 'length' long.
    pub fn find_virtual_space(&mut self, length: usize) -> Result<VirtualAddress, KernelError> {
        self.find_virtual_space_aligned(length, PAGE_SIZE)
    }

    /// Maps a single physical regions to a given virtual address.
    ///
    /// # Panics
    ///
    /// Panics if virtual region is not in KernelLand.
    // todo check va alignment
    pub fn map_phys_region_to(&mut self, phys: PhysicalMemRegion, address: VirtualAddress, flags: MappingAccessRights) {
        assert!(KernelLand::contains_region(address, phys.size()));
        self.tables.map_to_from_iterator(phys.into_iter(), address, flags);
        // physical region must not be deallocated while it is mapped
        ::core::mem::forget(phys);
    }

    /// Maps a single physical region anywhere.
    ///
    /// # Panics
    ///
    /// Panics if encounters virtual space exhaustion.
    pub fn map_phys_region(&mut self, phys: PhysicalMemRegion, flags: MappingAccessRights) -> VirtualAddress {
        let va = self.find_virtual_space(phys.size()).unwrap();
        self.map_phys_region_to(phys, va, flags);
        va
    }

    /// Maps a list of physical region anywhere.
    ///
    /// # Unsafe
    ///
    /// This function cannot ensure that the frames won't be dropped while still mapped.
    ///
    /// # Panics
    ///
    /// Panics if encounters virtual space exhaustion.
    pub(super) unsafe fn map_phys_regions(&mut self, phys: &[PhysicalMemRegion], flags: MappingAccessRights) -> VirtualAddress {
        let length = phys.iter().flatten().count() * PAGE_SIZE;
        let va = self.find_virtual_space(length).unwrap();
        self.tables.map_to_from_iterator(phys.iter().flatten(), va, flags);
        va
    }

    /// Maps a list of physical region yielded by an iterator.
    ///
    /// # Unsafe
    ///
    /// This function cannot ensure that the frames won't be dropped while still mapped.
    ///
    /// # Panics
    ///
    /// Panics if virtual region is not in KernelLand.
    /// Panics if encounters virtual space exhaustion.
    // todo check va alignment
    pub(super) unsafe fn map_frame_iterator_to<I>(&mut self, iterator: I, address: VirtualAddress, flags: MappingAccessRights)
    where I: Iterator<Item=PhysicalAddress> + Clone
    {
        assert!(KernelLand::contains_region(address,
                                            iterator.clone().count() * PAGE_SIZE));
        self.tables.map_to_from_iterator(iterator, address, flags);
    }

    /// Maps a list of physical region yielded by the iterator.
    /// Chooses the address.
    ///
    /// # Unsafe
    ///
    /// This function cannot ensure that the frames won't be dropped while still mapped.
    ///
    /// # Panics
    ///
    /// Panics if encounters virtual space exhaustion.
    pub(super) unsafe fn map_frame_iterator<I>(&mut self, iterator: I, flags: MappingAccessRights) -> VirtualAddress
    where I: Iterator<Item=PhysicalAddress> + Clone
    {
        let length = iterator.clone().count() * PAGE_SIZE;
        // TODO: Don't unwrap on OOM in map_frame_iterator.
        // BODY: map_frame_iterator should return an error on OOM instead of
        // BODY: making the whole kernel panic...
        let va = self.find_virtual_space(length).unwrap();
        self.tables.map_to_from_iterator(iterator, va, flags);
        va
    }

    /// Allocates and maps a single page, choosing a spot in VMEM for it.
    ///
    /// # Panics
    ///
    /// Panics if encounters physical memory exhaustion.
    /// Panics if encounters virtual space exhaustion.
    pub fn get_page(&mut self) -> VirtualAddress {
        let pr = FrameAllocator::allocate_frame().unwrap();
        self.map_phys_region(pr, MappingAccessRights::k_rw())
    }

    /// Allocates non-contiguous frames, and map them at the given address.
    ///
    /// # Panics
    ///
    /// Panics if encounters physical memory exhaustion.
    /// Panics if encounters virtual space exhaustion.
    /// Panics if destination was already mapped.
    /// Panics if `length` is not a multiple of PAGE_SIZE.
    // todo check va alignment
    pub fn map_allocate_to(&mut self, va: VirtualAddress, length: usize, flags: MappingAccessRights) {
        assert!(KernelLand::contains_region(va, length));
        assert!(length % PAGE_SIZE == 0, "length must be a multiple of PAGE_SIZE");
        let mut prs = FrameAllocator::allocate_frames_fragmented(length).unwrap();
        self.tables.map_to_from_iterator(prs.iter().flatten(), va, flags);

        // do not drop the frames, they are mapped in the page tables !
        while let Some(region) = prs.pop() {
            ::core::mem::forget(region);
        }
    }

    /// Allocates and maps the given length, chosing a spot in VMEM for it.
    ///
    /// # Panics
    ///
    /// Panics if encounters physical memory exhaustion.
    /// Panics if encounters virtual space exhaustion.
    /// Panics if `length` is not a multiple of PAGE_SIZE.
    pub fn get_pages(&mut self, length: usize) -> VirtualAddress {
        assert!(length % PAGE_SIZE == 0, "length must be a multiple of PAGE_SIZE");
        let va = self.find_virtual_space(length).unwrap();
        self.map_allocate_to(va, length, MappingAccessRights::k_rw());
        va
    }

    /// Guards a range of addresses.
    ///
    /// # Panics
    ///
    /// Panics if destination was already mapped.
    /// Panics if `length` is not a multiple of PAGE_SIZE.
    // todo check va alignment
    pub fn guard(&mut self, address: VirtualAddress, length: usize) {
        assert!(length % PAGE_SIZE == 0, "length must be a multiple of PAGE_SIZE");
        self.get_hierarchy().guard(address, length);
    }

    /// Reads the state of the mapping at a given address.
    ///
    /// # Panics
    ///
    /// If `address` is not in KernelLand.
    pub fn mapping_state(&mut self, addr: VirtualAddress) -> PageState<PhysicalAddress> {
        let mut mapping= None;
        let addr_aligned = VirtualAddress(crate::utils::align_down(addr.addr(), PAGE_SIZE));
        assert!(KernelLand::contains_address(addr));
        // use for_every_entry with length of just one page
        self.tables.for_every_entry(addr_aligned, PAGE_SIZE,
        | state, _ | mapping = Some(state));
        mapping.unwrap()
    }

    /// Deletes a mapping in the page tables.
    /// This functions assumes the frames were not tracked anywhere else, and drops them.
    ///
    /// # Panics
    ///
    ///
    /// Panics if encounters any entry that was not mapped.
    /// Panics if virtual region is not in KernelLand.
    /// Panics if `length` is not page aligned.
    // todo check va alignment
    pub fn unmap(&mut self, address: VirtualAddress, length: usize) {
        assert!(KernelLand::contains_region(address, length));
        assert!(length % PAGE_SIZE == 0, "length must be a multiple of PAGE_SIZE");
        self.tables.unmap(address, length, |paddr| {
            let pr = unsafe {
                // safe, they were only tracked by the page tables
                PhysicalMemRegion::reconstruct(paddr, PAGE_SIZE)
            };
            drop(pr)
        });
    }

    /// Deletes a mapping in the page tables, but does not free the underlying physical memory.
    ///
    /// # Panics
    ///
    /// Panics if encounters any entry that was not mapped.
    /// Panics if virtual region is not in KernelLand.
    /// Panics if `length` is not page aligned.
    // todo check va alignment
    pub fn unmap_no_dealloc(&mut self, address: VirtualAddress, length: usize) {
        assert!(KernelLand::contains_region(address, length));
        assert!(length % PAGE_SIZE == 0, "length must be a multiple of PAGE_SIZE");
        self.tables.unmap(address, length, |_paddr| { /* leak the frame */ });
    }

    /// Marks all frames mapped in KernelLand as reserve
    /// This is used at startup to reserve frames mapped by the bootstrap
    ///
    /// # Panic
    ///
    /// Panics if it tries to overwrite an existing reservation
    pub fn reserve_kernel_land_frames(&mut self) {
        self.tables.for_every_entry(KernelLand::start_addr(),
                                    KernelLand::length() + RecursiveTablesLand::length(),
        |entry_state, length| {
            if let PageState::Present(mapped_frame) = entry_state {
                for offset in (0..length).step_by(PAGE_SIZE) {
                    mark_frame_bootstrap_allocated(mapped_frame + offset)
                }
            }
        });
    }

    /// Safe access to the active page tables.
    pub(super) fn get_hierarchy(&mut self) -> &mut ActiveHierarchy {
        &mut self.tables
    }

    /// Prints the state of the KernelLand by parsing the page tables. Used for debugging purposes.
    #[allow(clippy::missing_docs_in_private_items)]
    pub fn dump_kernelland_state(&mut self) {
        #[derive(Debug, Clone, Copy)]
        enum State { Present(VirtualAddress, PhysicalAddress), Guarded(VirtualAddress), Available(VirtualAddress) }
        impl State {
            fn get_vaddr(&self) -> VirtualAddress {
                match *self {
                    State::Present(addr, _) => addr,
                    State::Guarded(addr) => addr,
                    State::Available(addr) => addr,
                }
            }

            fn update(&mut self, newstate: State) {
                //let old_self = ::core::mem::replace(self, State::Present(VirtualAddress(0), PhysicalAddress(0)));
                let old_self = *self;
                let real_newstate = match (old_self, newstate) {
                    // fuse guarded states
                    (State::Guarded(addr), State::Guarded(_)) => State::Guarded(addr),
                    // fuse available states
                    (State::Available(addr), State::Available(_)) => State::Available(addr),
                    // fuse present states only if physical frames are contiguous
                    (State::Present(addr, phys), State::Present(newaddr, newphys))
                        if newphys.addr().wrapping_sub(phys.addr()) == newaddr - addr
                            => State::Present(addr, phys),
                    // otherwise print the old mapping, and start a new one
                    (old, new) => {
                        old.print(new.get_vaddr() - 1);
                        new
                    }
                };
                *self = real_newstate;
            }

            fn from(state: PageState<PhysicalAddress>, addr: VirtualAddress) -> State {
                match state {
                    PageState::Present(table) => State::Present(addr, table),
                    PageState::Guarded => State::Guarded(addr),
                    PageState::Available => State::Available(addr)
                }
            }

            fn print(&self, end_addr: VirtualAddress) {
                match *self {
                    State::Guarded(addr) => info!("{:#010x} - {:#010x} - GUARDED", addr, end_addr),
                    State::Available(addr) => info!("{:#010x} - {:#010x} - AVAILABLE", addr, end_addr),
                    State::Present(addr, phys) => info!("{:#010x} - {:#010x} - MAPS {:#010x} - {:#010x} ({} frames)",
                                                        addr, end_addr, phys, (phys + (end_addr - addr)), ((end_addr + 1) - addr) / PAGE_SIZE),
                };
            }
        }

        let mut address: VirtualAddress = KernelLand::start_addr();
        let mut state = None;
        self.tables.for_every_entry(KernelLand::start_addr(), KernelLand::length(), |entry, length| {
            match state {
                // the first run
                None => { state = Some(State::from(entry, address)) },
                // all others
                Some(ref mut state) => state.update(State::from(entry, address))
            }
            address += length;
        });

        // print the last state
        match state {
            Some(state) => state.print(RecursiveTablesLand::start_addr() - 1),
            None => info!("Tables are empty")
        }
    }
}