use super::lands::{KernelLand, RecursiveTablesLand, VirtualSpaceLand};
use super::arch::{PAGE_SIZE, ActiveHierarchy};
use super::hierarchical_table::{TableHierarchy, PageState};
use super::MappingAccessRights;
use crate::mem::{VirtualAddress, PhysicalAddress};
use crate::frame_allocator::{PhysicalMemRegion, FrameAllocator, FrameAllocatorTrait,
mark_frame_bootstrap_allocated};
use crate::sync::{SpinLockIRQ, SpinLockIRQGuard};
use crate::error::KernelError;
use failure::Backtrace;
#[derive(Debug)]
pub struct KernelMemory {
tables: ActiveHierarchy
}
pub static KERNEL_MEMORY: SpinLockIRQ<KernelMemory> = SpinLockIRQ::new(KernelMemory { tables: ActiveHierarchy });
pub fn get_kernel_memory() -> SpinLockIRQGuard<'static, KernelMemory> { KERNEL_MEMORY.lock() }
impl KernelMemory {
pub fn find_virtual_space_aligned(&mut self, length: usize, alignment: usize) -> Result<VirtualAddress, KernelError> {
match self.tables.find_available_virtual_space_aligned(length, KernelLand::start_addr(), KernelLand::end_addr(), alignment) {
Some(addr) => Ok(addr),
None => Err(KernelError::VirtualMemoryExhaustion { backtrace: Backtrace::new() })
}
}
pub fn find_virtual_space(&mut self, length: usize) -> Result<VirtualAddress, KernelError> {
self.find_virtual_space_aligned(length, PAGE_SIZE)
}
pub fn map_phys_region_to(&mut self, phys: PhysicalMemRegion, address: VirtualAddress, flags: MappingAccessRights) {
assert!(KernelLand::contains_region(address, phys.size()));
self.tables.map_to_from_iterator(phys.into_iter(), address, flags);
::core::mem::forget(phys);
}
pub fn map_phys_region(&mut self, phys: PhysicalMemRegion, flags: MappingAccessRights) -> VirtualAddress {
let va = self.find_virtual_space(phys.size()).unwrap();
self.map_phys_region_to(phys, va, flags);
va
}
pub(super) unsafe fn map_phys_regions(&mut self, phys: &[PhysicalMemRegion], flags: MappingAccessRights) -> VirtualAddress {
let length = phys.iter().flatten().count() * PAGE_SIZE;
let va = self.find_virtual_space(length).unwrap();
self.tables.map_to_from_iterator(phys.iter().flatten(), va, flags);
va
}
pub(super) unsafe fn map_frame_iterator_to<I>(&mut self, iterator: I, address: VirtualAddress, flags: MappingAccessRights)
where I: Iterator<Item=PhysicalAddress> + Clone
{
assert!(KernelLand::contains_region(address,
iterator.clone().count() * PAGE_SIZE));
self.tables.map_to_from_iterator(iterator, address, flags);
}
pub(super) unsafe fn map_frame_iterator<I>(&mut self, iterator: I, flags: MappingAccessRights) -> VirtualAddress
where I: Iterator<Item=PhysicalAddress> + Clone
{
let length = iterator.clone().count() * PAGE_SIZE;
let va = self.find_virtual_space(length).unwrap();
self.tables.map_to_from_iterator(iterator, va, flags);
va
}
pub fn get_page(&mut self) -> VirtualAddress {
let pr = FrameAllocator::allocate_frame().unwrap();
self.map_phys_region(pr, MappingAccessRights::k_rw())
}
pub fn map_allocate_to(&mut self, va: VirtualAddress, length: usize, flags: MappingAccessRights) {
assert!(KernelLand::contains_region(va, length));
assert!(length % PAGE_SIZE == 0, "length must be a multiple of PAGE_SIZE");
let mut prs = FrameAllocator::allocate_frames_fragmented(length).unwrap();
self.tables.map_to_from_iterator(prs.iter().flatten(), va, flags);
while let Some(region) = prs.pop() {
::core::mem::forget(region);
}
}
pub fn get_pages(&mut self, length: usize) -> VirtualAddress {
assert!(length % PAGE_SIZE == 0, "length must be a multiple of PAGE_SIZE");
let va = self.find_virtual_space(length).unwrap();
self.map_allocate_to(va, length, MappingAccessRights::k_rw());
va
}
pub fn guard(&mut self, address: VirtualAddress, length: usize) {
assert!(length % PAGE_SIZE == 0, "length must be a multiple of PAGE_SIZE");
self.get_hierarchy().guard(address, length);
}
pub fn mapping_state(&mut self, addr: VirtualAddress) -> PageState<PhysicalAddress> {
let mut mapping= None;
let addr_aligned = VirtualAddress(crate::utils::align_down(addr.addr(), PAGE_SIZE));
assert!(KernelLand::contains_address(addr));
self.tables.for_every_entry(addr_aligned, PAGE_SIZE,
| state, _ | mapping = Some(state));
mapping.unwrap()
}
pub fn unmap(&mut self, address: VirtualAddress, length: usize) {
assert!(KernelLand::contains_region(address, length));
assert!(length % PAGE_SIZE == 0, "length must be a multiple of PAGE_SIZE");
self.tables.unmap(address, length, |paddr| {
let pr = unsafe {
PhysicalMemRegion::reconstruct(paddr, PAGE_SIZE)
};
drop(pr)
});
}
pub fn unmap_no_dealloc(&mut self, address: VirtualAddress, length: usize) {
assert!(KernelLand::contains_region(address, length));
assert!(length % PAGE_SIZE == 0, "length must be a multiple of PAGE_SIZE");
self.tables.unmap(address, length, |_paddr| { });
}
pub fn reserve_kernel_land_frames(&mut self) {
self.tables.for_every_entry(KernelLand::start_addr(),
KernelLand::length() + RecursiveTablesLand::length(),
|entry_state, length| {
if let PageState::Present(mapped_frame) = entry_state {
for offset in (0..length).step_by(PAGE_SIZE) {
mark_frame_bootstrap_allocated(mapped_frame + offset)
}
}
});
}
pub(super) fn get_hierarchy(&mut self) -> &mut ActiveHierarchy {
&mut self.tables
}
#[allow(clippy::missing_docs_in_private_items)]
pub fn dump_kernelland_state(&mut self) {
#[derive(Debug, Clone, Copy)]
enum State { Present(VirtualAddress, PhysicalAddress), Guarded(VirtualAddress), Available(VirtualAddress) }
impl State {
fn get_vaddr(&self) -> VirtualAddress {
match *self {
State::Present(addr, _) => addr,
State::Guarded(addr) => addr,
State::Available(addr) => addr,
}
}
fn update(&mut self, newstate: State) {
let old_self = *self;
let real_newstate = match (old_self, newstate) {
(State::Guarded(addr), State::Guarded(_)) => State::Guarded(addr),
(State::Available(addr), State::Available(_)) => State::Available(addr),
(State::Present(addr, phys), State::Present(newaddr, newphys))
if newphys.addr().wrapping_sub(phys.addr()) == newaddr - addr
=> State::Present(addr, phys),
(old, new) => {
old.print(new.get_vaddr() - 1);
new
}
};
*self = real_newstate;
}
fn from(state: PageState<PhysicalAddress>, addr: VirtualAddress) -> State {
match state {
PageState::Present(table) => State::Present(addr, table),
PageState::Guarded => State::Guarded(addr),
PageState::Available => State::Available(addr)
}
}
fn print(&self, end_addr: VirtualAddress) {
match *self {
State::Guarded(addr) => info!("{:#010x} - {:#010x} - GUARDED", addr, end_addr),
State::Available(addr) => info!("{:#010x} - {:#010x} - AVAILABLE", addr, end_addr),
State::Present(addr, phys) => info!("{:#010x} - {:#010x} - MAPS {:#010x} - {:#010x} ({} frames)",
addr, end_addr, phys, (phys + (end_addr - addr)), ((end_addr + 1) - addr) / PAGE_SIZE),
};
}
}
let mut address: VirtualAddress = KernelLand::start_addr();
let mut state = None;
self.tables.for_every_entry(KernelLand::start_addr(), KernelLand::length(), |entry, length| {
match state {
None => { state = Some(State::from(entry, address)) },
Some(ref mut state) => state.update(State::from(entry, address))
}
address += length;
});
match state {
Some(state) => state.print(RecursiveTablesLand::start_addr() - 1),
None => info!("Tables are empty")
}
}
}