pub use super::bookkeeping::QueryMemory;
use super::hierarchical_table::*;
use super::arch::{PAGE_SIZE, InactiveHierarchy, ActiveHierarchy};
use super::lands::{UserLand, VirtualSpaceLand};
use super::bookkeeping::UserspaceBookkeeping;
use super::mapping::{Mapping, MappingFrames};
use sunrise_libkern::{MemoryType, MemoryState, MemoryAttributes, MemoryPermissions};
use super::cross_process::CrossProcessMapping;
use super::MappingAccessRights;
use crate::mem::{VirtualAddress, PhysicalAddress};
use crate::frame_allocator::{FrameAllocator, FrameAllocatorTrait, PhysicalMemRegion};
use crate::paging::arch::Entry;
use crate::error::KernelError;
use crate::utils::{check_size_aligned, check_nonzero_length};
use crate::sync::SpinRwLock;
use alloc::{vec::Vec, sync::Arc};
use failure::Backtrace;
#[derive(Debug)]
pub struct ProcessMemory {
userspace_bookkeping: UserspaceBookkeeping,
table_hierarchy: InactiveHierarchy,
heap_base_address: VirtualAddress,
}
enum DynamicHierarchy<'a> {
Active(ActiveHierarchy),
Inactive(&'a mut InactiveHierarchy)
}
impl HierarchicalTable for () {
type EntryType = Entry;
type CacheFlusherType = NoFlush;
type ChildTableType = ();
fn entries(&mut self) -> &mut [Entry] {
unimplemented!()
}
fn table_level() -> usize {
unimplemented!()
}
fn get_child_table(&mut self, _index: usize) -> PageState<SmartHierarchicalTable<<Self as HierarchicalTable>::ChildTableType>> {
unimplemented!()
}
fn create_child_table(&mut self, _index: usize) -> SmartHierarchicalTable<<Self as HierarchicalTable>::ChildTableType> {
unimplemented!()
}
}
impl<'b> TableHierarchy for DynamicHierarchy<'b> {
type TopLevelTableType = ();
fn get_top_level_table(&mut self) -> SmartHierarchicalTable<()> {
panic!("Dynamic DynamicHierarchy reimplements everything");
}
fn map_to_from_iterator<I>(&mut self,
frames_iterator: I,
start_address: VirtualAddress,
flags: MappingAccessRights)
where I: Iterator<Item=PhysicalAddress>
{
match *self {
DynamicHierarchy::Active(ref mut hierarchy) => hierarchy.map_to_from_iterator(frames_iterator, start_address, flags),
DynamicHierarchy::Inactive(ref mut hierarchy) => hierarchy.map_to_from_iterator(frames_iterator, start_address, flags),
}
}
fn guard(&mut self, address: VirtualAddress, length: usize) {
match *self {
DynamicHierarchy::Active(ref mut hierarchy) => hierarchy.guard(address, length),
DynamicHierarchy::Inactive(ref mut hierarchy) => hierarchy.guard(address, length),
}
}
fn unmap<C>(&mut self, address: VirtualAddress, length: usize, callback: C) where C: FnMut(PhysicalAddress) {
match *self {
DynamicHierarchy::Active(ref mut hierarchy) => hierarchy.unmap(address, length, callback),
DynamicHierarchy::Inactive(ref mut hierarchy) => hierarchy.unmap(address, length, callback),
}
}
fn for_every_entry<C>(&mut self, address: VirtualAddress, length: usize, callback: C) where C: FnMut(PageState<PhysicalAddress>, usize) {
match *self {
DynamicHierarchy::Active(ref mut hierarchy) => hierarchy.for_every_entry(address, length, callback),
DynamicHierarchy::Inactive(ref mut hierarchy) => hierarchy.for_every_entry(address, length, callback),
}
}
fn find_available_virtual_space_aligned(&mut self, length: usize, start_addr: VirtualAddress, end_addr: VirtualAddress, alignment: usize) -> Option<VirtualAddress> {
match *self {
DynamicHierarchy::Active(ref mut hierarchy) => hierarchy.find_available_virtual_space_aligned(length, start_addr, end_addr, alignment),
DynamicHierarchy::Inactive(ref mut hierarchy) => hierarchy.find_available_virtual_space_aligned(length, start_addr, end_addr, alignment),
}
}
}
impl Default for ProcessMemory {
fn default() -> Self {
let heap_base_address = VirtualAddress(0x80000000);
ProcessMemory {
userspace_bookkeping: UserspaceBookkeeping::new(),
table_hierarchy: InactiveHierarchy::new(),
heap_base_address,
}
}
}
impl ProcessMemory {
fn get_hierarchy(&mut self) -> DynamicHierarchy<'_> {
if self.table_hierarchy.is_currently_active() {
unsafe {
DynamicHierarchy::Active(ActiveHierarchy)
}
} else {
DynamicHierarchy::Inactive(&mut self.table_hierarchy)
}
}
pub fn map_phys_region_to(&mut self,
phys: PhysicalMemRegion,
address: VirtualAddress,
ty: MemoryType,
flags: MappingAccessRights)
-> Result<(), KernelError> {
address.check_aligned_to(PAGE_SIZE)?;
let length = phys.size();
UserLand::check_contains_region(address, length)?;
self.userspace_bookkeping.check_vacant(address, length)?;
self.get_hierarchy().map_to_from_iterator(phys.into_iter(), address, flags);
let mapping = Mapping::new(address, MappingFrames::Owned(vec![phys]), 0, length, ty, flags)
.expect("We checked everything, but bookkeeping refuses to create the mapping");
self.userspace_bookkeping.add_mapping(mapping)
.expect("We checked everything, but bookkeeping refuses to add the mapping");
Ok(())
}
pub fn create_regular_mapping(&mut self, address: VirtualAddress, length: usize, ty: MemoryType, flags: MappingAccessRights) -> Result<(), KernelError> {
address.check_aligned_to(PAGE_SIZE)?;
check_size_aligned(length, PAGE_SIZE)?;
check_nonzero_length(length)?;
UserLand::check_contains_region(address, length)?;
self.userspace_bookkeping.check_vacant(address, length)?;
let frames = FrameAllocator::allocate_frames_fragmented(length)?;
self.get_hierarchy().map_to_from_iterator(frames.iter().flatten(), address, flags);
let frames = if ty.get_memory_state().contains(MemoryState::IS_REFERENCE_COUNTED) {
MappingFrames::Shared(Arc::new(SpinRwLock::new(frames)))
} else {
MappingFrames::Owned(frames)
};
let mapping = Mapping::new(address, frames, 0, length, ty, flags)
.expect("We checked everything, but bookkeeping refuses to create the mapping");
self.userspace_bookkeping.add_mapping(mapping)
.expect("We checked everything, but bookkeeping refuses to add the mapping");
Ok(())
}
pub fn map_partial_shared_mapping(&mut self,
shared_mapping: Arc<SpinRwLock<Vec<PhysicalMemRegion>>>,
address: VirtualAddress,
phys_offset: usize,
length: usize,
ty: MemoryType,
flags: MappingAccessRights)
-> Result<(), KernelError> {
address.check_aligned_to(PAGE_SIZE)?;
check_nonzero_length(length)?;
check_size_aligned(length, PAGE_SIZE)?;
let max_length = shared_mapping.read().iter().flatten().count() * PAGE_SIZE - phys_offset;
if max_length < length {
return Err(KernelError::InvalidSize { size: length, backtrace: Backtrace::new() })
}
UserLand::check_contains_region(address, length)?;
self.userspace_bookkeping.check_vacant(address, length)?;
let mapping = Mapping::new(address, MappingFrames::Shared(shared_mapping), phys_offset, length, ty, flags)
.expect("We checked everything, but bookkeeping refuses to create the mapping");
self.get_hierarchy().map_to_from_iterator(mapping.frames_it(), address, flags);
self.userspace_bookkeping.add_mapping(mapping)
.expect("We checked everything, but bookkeeping refuses to add the mapping");
Ok(())
}
pub fn guard(&mut self, address: VirtualAddress, length: usize, ty: MemoryType) -> Result<(), KernelError>{
UserLand::check_contains_region(address, length)?;
let mapping = Mapping::new(address, MappingFrames::None, 0, length, ty, MappingAccessRights::empty())?;
self.userspace_bookkeping.add_mapping(mapping)?;
self.get_hierarchy().guard(address, length);
Ok(())
}
pub fn unmap(&mut self, address: VirtualAddress, length: usize) -> Result<Mapping, KernelError> {
UserLand::check_contains_region(address, length)?;
let mapping = self.userspace_bookkeping.remove_mapping(address, length)?;
self.get_hierarchy().unmap(address, length, |_| {
});
Ok(mapping)
}
pub fn query_memory(&self, address: VirtualAddress) -> QueryMemory<'_> {
self.userspace_bookkeping.mapping_at(address)
}
pub fn expand_mapping(&mut self, address: VirtualAddress, new_size: usize) -> Result<(), KernelError> {
check_size_aligned(new_size, PAGE_SIZE)?;
let old_mapping_ref = self.userspace_bookkeping.occupied_mapping_at(address)?;
let (start_addr, old_size) = {
if old_mapping_ref.state().ty() != MemoryType::Heap {
return Err(KernelError::InvalidMemState { address: address, ty: old_mapping_ref.state().ty(), backtrace: Backtrace::new() });
}
if let MappingFrames::Owned(..) | MappingFrames::None = old_mapping_ref.frames() {
return Err(KernelError::InvalidAddress { address: address.addr(), backtrace: Backtrace::new() });
}
(old_mapping_ref.address(), old_mapping_ref.length())
};
UserLand::check_contains_region(start_addr, new_size)?;
if new_size < old_size {
return Err(KernelError::InvalidSize { size: new_size, backtrace: Backtrace::new() });
}
if new_size == old_size {
return Ok(())
}
let added_length = new_size - old_size;
self.userspace_bookkeping.check_vacant(start_addr + old_size, added_length)?;
let mut new_frames = FrameAllocator::allocate_frames_fragmented(added_length)?;
let old_mapping = self.userspace_bookkeping.remove_mapping(start_addr, old_size)
.expect("expand_mapping: removing the mapping failed.");
let flags = old_mapping.flags();
let new_mapping = if let MappingFrames::Shared(frames) = old_mapping.frames() {
self.get_hierarchy().map_to_from_iterator(new_frames.iter().flatten(), start_addr + old_size, flags);
frames.write().append(&mut new_frames);
Mapping::new(start_addr, MappingFrames::Shared(frames.clone()), 0, new_size, MemoryType::Heap, flags)
.expect("expand_mapping: couldn't recreate mapping")
} else {
unreachable!("We checked we could only get a MappingFrames earlier.");
};
self.userspace_bookkeping.add_mapping(new_mapping)
.expect("expand_mapping: failed re-adding the mapping to the bookkeeping");
Ok(())
}
pub fn find_available_space(&self, length: usize) -> Result<VirtualAddress, KernelError> {
self.userspace_bookkeping.find_available_space(length)
}
pub fn mirror_mapping(&self, address: VirtualAddress, length: usize) -> Result<CrossProcessMapping, KernelError> {
UserLand::check_contains_address(address)?;
let mapping = self.userspace_bookkeping.occupied_mapping_at(address)?;
let offset = address - mapping.address();
CrossProcessMapping::mirror_mapping(mapping, offset, length)
}
pub fn resize_heap(&mut self, new_size: usize) -> Result<VirtualAddress, KernelError> {
#[allow(clippy::missing_docs_in_private_items)]
enum HeapState { NoHeap, Heap(usize) };
UserLand::check_contains_region(self.heap_base_address, new_size)?;
let previous_heap_state = {
let query = self.userspace_bookkeping.mapping_at(self.heap_base_address);
let heap = query.mapping();
if let MemoryType::Unmapped = heap.state().ty() {
HeapState::NoHeap
} else {
HeapState::Heap(heap.length())
}
};
let heap_base_address = self.heap_base_address;
match previous_heap_state {
HeapState::NoHeap if new_size == 0 => (),
HeapState::NoHeap => self.create_regular_mapping(heap_base_address, new_size, MemoryType::Heap, MappingAccessRights::u_rw())?,
HeapState::Heap(old_size) if new_size < old_size => (),
HeapState::Heap(_) => self.expand_mapping(heap_base_address, new_size)?
}
Ok(self.heap_base_address)
}
pub fn switch_to(&mut self) {
self.table_hierarchy.switch_to();
}
#[allow(clippy::too_many_arguments)]
pub fn check_range(&self, addr: VirtualAddress, size: usize,
state_mask: MemoryState, state_expected: MemoryState,
perms_mask: MemoryPermissions, perms_expected: MemoryPermissions,
_attrs_mask: MemoryAttributes, _attrs_expected: MemoryAttributes,
_attrs_ignore_mask: MemoryAttributes) -> Result<(MemoryState, MemoryPermissions, MemoryAttributes), KernelError>
{
let addr_end = addr + size;
let mut cur_addr = addr;
let mut first_block_state = None;
let mut first_block_perms: Option<MemoryPermissions> = None;
loop {
let mem = self.query_memory(cur_addr);
let mapping_perms = mem.mapping().flags().into();
if *first_block_state.get_or_insert(mem.mapping().state()) != mem.mapping().state() {
return Err(KernelError::InvalidMemState {
address: cur_addr,
ty: mem.mapping().state().ty(),
backtrace: Backtrace::new()
})
}
if *first_block_perms.get_or_insert(mapping_perms) != mapping_perms {
return Err(KernelError::InvalidMemState {
address: cur_addr,
ty: mem.mapping().state().ty(),
backtrace: Backtrace::new()
})
}
if mem.mapping().state() & state_mask != state_expected ||
mapping_perms & perms_mask != perms_expected
{
return Err(KernelError::InvalidMemState {
address: cur_addr,
ty: mem.mapping().state().ty(),
backtrace: Backtrace::new()
});
}
cur_addr = mem.mapping().address() + mem.mapping().length();
if cur_addr >= addr_end {
return Ok((mem.mapping().state(), mem.mapping().flags().into(), MemoryAttributes::empty()))
}
}
}
}