#![allow(missing_docs)]
use core::ops::{Index, IndexMut};
#[path = "entry.rs"]
pub mod entry;
use self::entry::{EntryFlags as I386EntryFlags, PageState};
use super::*;
use crate::frame_alloc::{Frame, FrameAllocator, MEMORY_FRAME_SIZE};
use crate::address::{VirtualAddress, PhysicalAddress};
use core::ops::Deref;
use core::ops::DerefMut;
use core::marker::PhantomData;
pub struct PageTable {
    entries: [Entry; ENTRY_COUNT]
}
pub struct PageDirectory(PageTable);
const_assert!(::core::mem::size_of::<PageDirectory>() >= MEMORY_FRAME_SIZE);
const_assert!(::core::mem::size_of::<PageTable>() >= MEMORY_FRAME_SIZE);
const_assert!(::core::mem::size_of::<PageTable>() == ::core::mem::size_of::<PageDirectory>());
pub const DIRECTORY_RECURSIVE_ADDRESS: VirtualAddress = VirtualAddress(0xffff_f000);
impl Index<usize> for PageDirectory {
    type Output = Entry;
    fn index (&self, index: usize) -> &Entry { &self.entries()[index] }
}
impl Index<usize> for PageTable {
    type Output = Entry;
    fn index (&self, index: usize) -> &Entry { &self.entries()[index] }
}
impl IndexMut<usize> for PageDirectory {
    fn index_mut(&mut self, index: usize) -> &mut Entry { &mut self.entries_mut()[index] }
}
impl IndexMut<usize> for PageTable {
    fn index_mut(&mut self, index: usize) -> &mut Entry { &mut self.entries_mut()[index] }
}
pub trait HierarchicalTable {
    fn entries(&self) -> &[Entry; ENTRY_COUNT];
    fn entries_mut(&mut self) -> &mut [Entry; ENTRY_COUNT];
    
    fn zero(&mut self) {
        for entry in self.entries_mut().iter_mut() {
            entry.set(Frame::from_physical_addr(PhysicalAddress(0)), I386EntryFlags::empty());
        }
    }
    
    
    fn map_nth_entry<T: Flusher>(&mut self, entry: usize, frame: Frame, flags: I386EntryFlags) {
        self.entries_mut()[entry].set(frame, flags);
        T::flush_cache();
    }
    
    
    fn guard_nth_entry<T: Flusher>(&mut self, entry: usize) {
        self.entries_mut()[entry].set_guard();
        T::flush_cache();
    }
    fn flush_cache() {
        
    }
}
impl HierarchicalTable for PageTable {
    fn entries(&self) -> &[Entry; ENTRY_COUNT] { &self.entries }
    fn entries_mut(&mut self) -> &mut [Entry; ENTRY_COUNT] { &mut self.entries }
}
impl HierarchicalTable for PageDirectory {
    fn entries(&self) -> &[Entry; ENTRY_COUNT] { &self.0.entries }
    fn entries_mut(&mut self) -> &mut [Entry; ENTRY_COUNT] { &mut self.0.entries }
}
pub trait PageTableTrait : HierarchicalTable {
    type FlusherType : Flusher;
    
    
    
    #[deprecated]
    fn map_whole_table(&mut self, start_address: PhysicalAddress, flags: I386EntryFlags) {
        let mut addr = start_address.addr();
        for entry in &mut self.entries_mut()[..] {
            entry.set(Frame::from_physical_addr(PhysicalAddress(addr)), flags);
            addr += PAGE_SIZE;
        }
        Self::FlusherType::flush_cache();
    }
    
    fn map_guard_whole_table(&mut self) {
        for entry in &mut self.entries_mut()[..] {
            entry.set(Frame::from_physical_addr(PhysicalAddress(0)), I386EntryFlags::GUARD_PAGE);
        }
        Self::FlusherType::flush_cache();
    }
}
pub trait PageDirectoryTrait : HierarchicalTable {
    type PageTableType : PageTableTrait;
    type FlusherType : Flusher;
    
    fn get_table(&mut self, index: usize) -> PageState<SmartHierarchicalTable<Self::PageTableType>>;
    
    fn create_table(&mut self, index: usize) -> SmartHierarchicalTable<Self::PageTableType>;
    
    
    
    
    
    fn get_table_or_create(&mut self, index: usize) -> SmartHierarchicalTable<Self::PageTableType> {
        if !self.entries()[index].is_unused() {
            assert!(!self.entries()[index].is_guard(), "Table is guarded");
            self.get_table(index).unwrap()
        } else {
            self.create_table(index)
        }
    }
    
    
    
    
    
    
    fn map_to(&mut self, page:    Frame,
                         address: VirtualAddress,
                         flags:   I386EntryFlags) {
        assert_eq!(address.addr() % PAGE_SIZE, 0, "Address is not page aligned");
        let table_nbr = address.addr() / (ENTRY_COUNT * PAGE_SIZE);
        let table_off = address.addr() % (ENTRY_COUNT * PAGE_SIZE) / PAGE_SIZE;
        let mut table = self.get_table_or_create(table_nbr);
        assert!(table.entries()[table_off].is_unused(), "Tried to map an already mapped entry: {:?}", table.entries()[table_off]);
        table.map_nth_entry::<Self::FlusherType>(table_off, page, flags);
    }
    
    
    
    
    
    
    fn guard(&mut self, address: VirtualAddress) {
        assert_eq!(address.addr() % PAGE_SIZE, 0, "Address is not page aligned");
        let table_nbr = address.addr() / (ENTRY_COUNT * PAGE_SIZE);
        let table_off = address.addr() % (ENTRY_COUNT * PAGE_SIZE) / PAGE_SIZE;
        let mut table = self.get_table_or_create(table_nbr);
        assert!(table.entries()[table_off].is_unused(), "Tried to guard an already mapped entry {:#010x}: {:?}", address.addr(), table.entries()[table_off]);
        table.guard_nth_entry::<Self::FlusherType>(table_off);
    }
    
    
    
    
    
    fn __unmap(&mut self, page: VirtualAddress) -> PageState<Frame> {
        assert_eq!(page.addr() % PAGE_SIZE, 0, "Address is not page aligned");
        let table_nbr = page.addr() / (ENTRY_COUNT * PAGE_SIZE);
        let table_off = page.addr() % (ENTRY_COUNT * PAGE_SIZE) / PAGE_SIZE;
        
        let mut table = if self.entries()[table_nbr].is_guard() {
            
            self.entries_mut()[table_nbr].set_unused();
            let mut table = self.create_table(table_nbr);
            table.map_guard_whole_table();
            table
        } else {
            self.get_table(table_nbr)
            
                .unwrap()
            
        };
        let entry= &mut table.entries_mut()[table_off];
        assert_eq!(entry.is_unused(), false);
        let ret = entry.set_unused();
        Self::FlusherType::flush_cache();
        ret
    }
    
    
    fn find_available_virtual_space_aligned<Land: VirtualSpaceLand>(&mut self,
                                                            page_nb: usize,
                                                            alignement: usize) -> Option<VirtualAddress> {
        fn compute_address(table: usize, page: usize) -> VirtualAddress {
            VirtualAddress(table * ENTRY_COUNT * PAGE_SIZE + page * PAGE_SIZE)
        }
        fn satisfies_alignement(table: usize, page: usize, alignment: usize) -> bool {
            let mask : usize = (1 << alignment) - 1;
            compute_address(table, page).addr() & mask == 0
        }
        let mut considering_hole: bool = false;
        let mut hole_size: usize = 0;
        let mut hole_start_table: usize = 0;
        let mut hole_start_page:  usize = 0;
        let mut counter_curr_table:  usize = Land::start_table();
        let mut counter_curr_page:   usize;
        while counter_curr_table < Land::end_table() && (!considering_hole || hole_size < page_nb) {
            counter_curr_page = 0;
            match self.get_table(counter_curr_table) {
                PageState::Available => { 
                    if !considering_hole
                        && satisfies_alignement(counter_curr_table, 0, alignement) {
                        
                        considering_hole = true;
                        hole_start_table = counter_curr_table;
                        hole_start_page = 0;
                        hole_size = 0;
                    }
                    hole_size += ENTRY_COUNT;
                },
                PageState::Guarded => {
                    considering_hole = false;
                },
                PageState::Present(curr_table) => {
                    while counter_curr_page < ENTRY_COUNT && (!considering_hole || hole_size < page_nb) {
                        if curr_table.entries()[counter_curr_page].is_unused() {
                            if !considering_hole
                                && satisfies_alignement(counter_curr_table, counter_curr_page, alignement) {
                                
                                considering_hole = true;
                                hole_start_table = counter_curr_table;
                                hole_start_page = counter_curr_page;
                                hole_size = 0;
                            }
                            hole_size += 1;
                        } else {
                            
                            considering_hole = false;
                        }
                        counter_curr_page += 1;
                    }
                }
            }
            counter_curr_table += 1;
        };
        if considering_hole && hole_size >= page_nb { 
            Some(compute_address(hole_start_table, hole_start_page))
        } else { 
            None
        }
    }
}
bitflags! {
    
    pub struct EntryFlags : u32 {
        const WRITABLE =        1 << 0;
        const USER_ACCESSIBLE = 1 << 1;
    }
}
pub enum MappingType {
    Present(Frame, EntryFlags),
    Guard
}
pub trait PageTablesSet {
    
    fn map_to(&mut self, mapping: MappingType, address: VirtualAddress);
    
    fn get_phys(&mut self, address: VirtualAddress) -> PageState<PhysicalAddress>;
    
    fn find_available_virtual_space_aligned<Land: VirtualSpaceLand>(&mut self, page_nb: usize, alignement: usize) -> Option<VirtualAddress>;
    
    fn print_mapping(&mut self) {
        #[derive(Debug, Clone, Copy)]
        enum State { Present(usize, usize), Guarded(usize), Available(usize) }
        impl State {
            fn get_vaddr(&self) -> usize {
                match *self {
                    State::Present(addr, _) => addr,
                    State::Guarded(addr) => addr,
                    State::Available(addr) => addr,
                }
            }
            fn update(&mut self, newstate: State) {
                let old_self = ::core::mem::replace(self, State::Present(0, 0));
                let real_newstate = match (old_self, newstate) {
                    (State::Present(addr, phys), State::Present(newaddr, newphys)) if newphys - phys == newaddr - addr => State::Present(addr, phys),
                    (State::Present(addr, phys), State::Present(_newaddr, _newphys)) => State::Present(addr, phys),
                    (State::Guarded(addr), State::Guarded(_newaddr)) => State::Guarded(addr),
                    (State::Available(addr), State::Available(_newaddr)) => State::Available(addr),
                    (old, new) => {
                        old.print(new);
                        new
                    }
                };
                *self = real_newstate;
            }
            fn from<T: PageTablesSet + ?Sized>(set: &mut T, addr: VirtualAddress) -> State {
                match set.get_phys(addr) {
                    PageState::Present(table) => State::Present(addr.addr(), table.addr()),
                    PageState::Guarded => State::Guarded(addr.addr()),
                    _ => State::Available(addr.addr())
                }
            }
            fn print(&self, newstate: State) {
                let new_vaddr = newstate.get_vaddr();
                let _ = match *self {
                    State::Present(addr, phys) => writeln!(Serial, "{:#010x} - {:#010x} - MAPS {:#010x}-{:#010x}", addr, new_vaddr, phys, (phys + (new_vaddr - addr))),
                    State::Guarded(addr) => writeln!(Serial, "{:#010x} - {:#010x} - GUARDED", addr, new_vaddr),
                    State::Available(addr) => writeln!(Serial, "{:#010x} - {:#010x} - AVAILABLE", addr, new_vaddr),
                };
            }
        }
        let mut iter = (0..sunrise_libutils::align_down(usize::max_value(), PAGE_SIZE)).step_by(PAGE_SIZE);
        let mut state = State::from(self, VirtualAddress(iter.next().unwrap()));
        
        for vaddr in iter {
            state.update(State::from(self, VirtualAddress(vaddr)));
        }
        state.print(State::Available((ENTRY_COUNT - 1) * PAGE_SIZE * ENTRY_COUNT));
    }
    
    
    
    
    
    
    fn unmap(&mut self, page: VirtualAddress) -> PageState<Frame>;
    
    
    
    
    
    
    fn map_allocate_to(&mut self, address: VirtualAddress, flags: EntryFlags) {
        let page = FrameAllocator::alloc_frame();
        self.map_to(MappingType::Present(page, flags), address);
    }
    
    
    
    
    
    fn map_frame<Land: VirtualSpaceLand>(&mut self, frame: Frame, flags: EntryFlags) -> VirtualAddress {
        let va = self.find_available_virtual_space::<Land>(1).unwrap();
        self.map_to(MappingType::Present(frame, flags), va);
        va
    }
    
    
    
    
    
    
    fn get_page<Land: VirtualSpaceLand>(&mut self) -> VirtualAddress {
        let va = self.find_available_virtual_space::<Land>(1).unwrap();
        self.map_allocate_to(va, EntryFlags::WRITABLE);
        va
    }
    
    
    
    
    
    
    fn map_page_guard(&mut self, address: VirtualAddress) {
        
        self.map_to(MappingType::Guard, address);
    }
    
    
    
    
    
    
    
    fn map_range_page_guard(&mut self, address: VirtualAddress, page_nb: usize) {
        for current_address in (address.addr()..address.addr() + (page_nb * PAGE_SIZE)).step_by(PAGE_SIZE) {
            self.map_page_guard(VirtualAddress(current_address))
        }
    }
    
    
    
    
    
    
    
    
    
    fn map_range(&mut self, phys_addr: PhysicalAddress, address: VirtualAddress, page_nb: usize, flags: EntryFlags) {
        for addr_offset in (0..page_nb * PAGE_SIZE).step_by(PAGE_SIZE) {
            self.map_to(MappingType::Present(Frame::from_physical_addr(phys_addr + addr_offset), flags), address + addr_offset);
        }
    }
    
    
    
    
    
    
    fn map_range_allocate(&mut self, address: VirtualAddress, page_nb: usize, flags: EntryFlags) {
        let address_end = VirtualAddress(address.addr() + (page_nb * PAGE_SIZE));
        for current_address in (address.addr()..address_end.addr()).step_by(PAGE_SIZE) {
            self.map_allocate_to(VirtualAddress(current_address), flags);
        }
    }
    
    fn identity_map(&mut self, frame: Frame, flags: EntryFlags) {
        let addr = frame.address().addr();
        self.map_to(MappingType::Present(frame, flags), VirtualAddress(addr));
    }
    
    
    
    
    
    fn identity_map_region(&mut self, start_address: PhysicalAddress, region_size: usize, flags: EntryFlags) {
        assert_eq!(start_address.addr() % PAGE_SIZE, 0, "Tried to map a non paged-aligned region");
        let start = round_to_page(start_address.addr());
        let end = round_to_page_upper(start_address.addr() + region_size);
        for frame_addr in (start..end).step_by(PAGE_SIZE) {
            let frame = Frame::from_physical_addr(PhysicalAddress(frame_addr));
            self.identity_map(frame, flags);
        }
    }
    
    fn find_available_virtual_space<Land: VirtualSpaceLand>(&mut self, page_nb: usize) -> Option<VirtualAddress> {
        
        self.find_available_virtual_space_aligned::<Land>(page_nb, 0)
    }
    
    
    
    
    
    
    
    
    fn set_page_readonly(&mut self, page_address: VirtualAddress) {
        match self.unmap(page_address) {
            PageState::Available => { panic!("Tried to set read-only on unmapped entry")}
            PageState::Guarded   => { panic!("Tried to set read-only on guarded entry")}
            PageState::Present(frame)   => {
                self.map_to(MappingType::Present(frame, EntryFlags::WRITABLE), page_address);
            }
        }
    }
    
    
    
    
    
    
    fn set_region_readonly(&mut self, start_address: VirtualAddress, page_nb: usize) {
        for i in 0..page_nb {
            self.set_page_readonly(start_address + i * PAGE_SIZE);
        }
    }
}
mod detail {
    
    
    
    
    
    
    
    
    
    
    
    
    
    pub trait I386PageTablesSet {
        type PageDirectoryType: super::PageDirectoryTrait;
        
        fn get_directory(&mut self) -> super::SmartHierarchicalTable<Self::PageDirectoryType>;
    }
}
use self::detail::I386PageTablesSet;
impl<T: I386PageTablesSet> PageTablesSet for T {
    
    fn map_to(&mut self, mapping: MappingType, address: VirtualAddress) {
        let mut dir = self.get_directory();
        match mapping {
            MappingType::Present(frame, flags) => dir.map_to(frame, address, flags.into()),
            MappingType::Guard => dir.guard(address)
        }
    }
    fn get_phys(&mut self, address: VirtualAddress) -> PageState<PhysicalAddress> {
        let table_nbr = address.addr() / (ENTRY_COUNT * PAGE_SIZE);
        let table_off = address.addr() % (ENTRY_COUNT * PAGE_SIZE) / PAGE_SIZE;
        let mut directory = self.get_directory();
        let table = match directory.get_table(table_nbr) {
            PageState::Available => return PageState::Available,
            PageState::Guarded => return PageState::Guarded,
            PageState::Present(table) => table
        };
        table.entries()[table_off].pointed_frame()
    }
    
    fn find_available_virtual_space_aligned<Land: VirtualSpaceLand>(&mut self, page_nb: usize, alignement: usize) -> Option<VirtualAddress> {
         self.get_directory().find_available_virtual_space_aligned::<Land>(page_nb, alignement)
    }
    
    
    
    
    
    
    fn unmap(&mut self, page: VirtualAddress) -> PageState<Frame> {
        self.get_directory().__unmap(page)
    }
}
macro_rules! inherit_deref_index {
    ($ty:ty, $sub_ty:ty) => {
        impl Deref for $ty {
            type Target = $sub_ty;
            fn deref(&self) -> &<Self as Deref>::Target { &self.0 }
        }
        impl DerefMut for $ty {
            fn deref_mut(&mut self) -> &mut <Self as Deref>::Target { &mut self.0 }
        }
        impl Index<usize> for $ty {
            type Output = <$sub_ty as Index<usize>>::Output;
            fn index (&self, index: usize) -> &Entry { &self.0[index] }
        }
        impl IndexMut<usize> for $ty {
            fn index_mut (&mut self, index: usize) -> &mut Entry { &mut self.0[index] }
        }
    }
}
macro_rules! impl_hierachical_table {
    ($ty: ty) => {
        impl HierarchicalTable for $ty {
            fn entries(&self) -> &[Entry; ENTRY_COUNT] { self.0.entries() }
            fn entries_mut(&mut self) -> &mut [Entry; ENTRY_COUNT] { self.0.entries_mut() }
        }
    };
}
pub struct ActivePageTables ();
impl I386PageTablesSet for ActivePageTables {
    type PageDirectoryType = ActivePageDirectory;
    fn get_directory(&mut self) -> SmartHierarchicalTable<ActivePageDirectory> {
        assert!(is_paging_on(), "Paging is disabled");
        SmartHierarchicalTable::new(DIRECTORY_RECURSIVE_ADDRESS.addr() as *mut ActivePageDirectory)
    }
}
pub struct ActivePageDirectory(PageDirectory);
inherit_deref_index!(ActivePageDirectory, PageDirectory);
impl_hierachical_table!(ActivePageDirectory);
impl PageDirectoryTrait for ActivePageDirectory {
    type PageTableType = ActivePageTable;
    type FlusherType = TlbFlush;
    
    fn get_table(&mut self, index: usize) -> PageState<SmartHierarchicalTable<Self::PageTableType>> {
        self.get_table_address(index)
            .map(|addr| SmartHierarchicalTable::new(unsafe { &mut * (addr as *mut _) }))
    }
    
    fn create_table(&mut self, index: usize) -> SmartHierarchicalTable<Self::PageTableType> {
        assert!(self.entries()[index].is_unused());
        let table_frame = FrameAllocator::alloc_frame();
        self.map_nth_entry::<Self::FlusherType>(index, table_frame, I386EntryFlags::PRESENT | I386EntryFlags::WRITABLE);
        
        let mut table= self.get_table(index).unwrap();
        table.zero();
        table
    }
}
impl ActivePageDirectory {
    
    fn get_table_address(&self, index: usize) -> PageState<usize> {
        let entry_flags = self[index].flags();
        if entry_flags.contains(I386EntryFlags::PRESENT) {
            let table_address = self as *const _ as usize;
            PageState::Present((table_address << 10) | (index << 12))
        } else if entry_flags.contains(I386EntryFlags::GUARD_PAGE) {
            PageState::Guarded
        } else {
            PageState::Available
        }
    }
}
pub struct ActivePageTable(PageTable);
inherit_deref_index!(ActivePageTable, PageTable);
impl_hierachical_table!(ActivePageTable);
impl PageTableTrait for ActivePageTable { type FlusherType = TlbFlush; }
pub struct SmartHierarchicalTable<'a, T: HierarchicalTable>(*mut T, PhantomData<&'a ()>);
impl<'a, T: HierarchicalTable> SmartHierarchicalTable<'a, T> {
    fn new(inner: *mut T) -> SmartHierarchicalTable<'a, T> {
        SmartHierarchicalTable(inner, PhantomData)
    }
}
impl<'a, T: HierarchicalTable> Deref for SmartHierarchicalTable<'a, T> {
    type Target = T;
    fn deref(&self) -> &T {
        unsafe {
            self.0.as_ref().unwrap()
        }
    }
}
impl<'a, T: HierarchicalTable> DerefMut for SmartHierarchicalTable<'a, T> {
    fn deref_mut(&mut self) -> &mut T {
        unsafe {
            self.0.as_mut().unwrap()
        }
    }
}
impl<'a, T: HierarchicalTable> Drop for SmartHierarchicalTable<'a, T> {
    fn drop(&mut self) {
        unsafe {
            ::core::ptr::drop_in_place(self.0);
        }
    }
}
pub struct InactivePageTables {
    
    directory_physical_address: Frame,
}
impl I386PageTablesSet for InactivePageTables {
    type PageDirectoryType = InactivePageDirectory;
    
    fn get_directory(&mut self) -> SmartHierarchicalTable<InactivePageDirectory> {
        let frame = Frame::from_physical_addr(self.directory_physical_address.address());
        let mut active_pages = ACTIVE_PAGE_TABLES.lock();
        let va = active_pages.map_frame::<KernelLand>(frame, EntryFlags::WRITABLE);
        SmartHierarchicalTable::new(va.addr() as *mut InactivePageDirectory)
    }
}
impl Default for InactivePageTables {
    fn default() -> InactivePageTables {
        InactivePageTables::new()
    }
}
impl InactivePageTables {
    
    pub fn new() -> InactivePageTables {
        let directory_frame = FrameAllocator::alloc_frame();
        let directory_frame_dup = Frame::from_physical_addr(directory_frame.address());
        let mut pageset = InactivePageTables {
            directory_physical_address: directory_frame
        };
        {
            let mut dir = pageset.get_directory();
            dir.zero();
            dir.map_nth_entry::<NoFlush>(ENTRY_COUNT - 1, directory_frame_dup, I386EntryFlags::PRESENT | I386EntryFlags::WRITABLE);
        };
        pageset
    }
    
    
    
    
    
    
    
    
    
    
    
    
    pub unsafe fn switch_to(mut self) -> InactivePageTables {
        
        self.get_directory().copy_active_kernelspace();
        let old_pages = super::swap_cr3(self.directory_physical_address.address());
        ::core::mem::forget(self.directory_physical_address);
        InactivePageTables { directory_physical_address: Frame::from_allocated_addr(old_pages) }
    }
    
    
    
    
    
    pub fn delete(mut self) {
        self.get_directory().delete_userspace();
        
    }
}
pub struct InactivePageDirectory(PageDirectory);
inherit_deref_index!(InactivePageDirectory, PageDirectory);
impl_hierachical_table!(InactivePageDirectory);
pub struct InactivePageTable(PageTable);
inherit_deref_index!(InactivePageTable, PageTable);
impl_hierachical_table!(InactivePageTable);
impl PageDirectoryTrait for InactivePageDirectory {
    type PageTableType = InactivePageTable;
    type FlusherType = NoFlush;
    
    fn get_table(&mut self, index: usize) -> PageState<SmartHierarchicalTable<Self::PageTableType>> {
        self.entries()[index].pointed_frame().map(|frame| {
            let mut active_pages = ACTIVE_PAGE_TABLES.lock();
            
            
            let va = active_pages.map_frame::<KernelLand>(Frame::from_physical_addr(frame), EntryFlags::WRITABLE);
            SmartHierarchicalTable::new(unsafe {va.addr() as *mut InactivePageTable})
        })
    }
    
    
    fn create_table(&mut self, index: usize) -> SmartHierarchicalTable<Self::PageTableType> {
        assert!(self.entries()[index].is_unused());
        let table_frame = FrameAllocator::alloc_frame();
        let mut active_pages = ACTIVE_PAGE_TABLES.lock();
        
        let dup = Frame::from_physical_addr(table_frame.address());
        let va = active_pages.map_frame::<KernelLand>(dup, EntryFlags::WRITABLE);
        let mut mapped_table = SmartHierarchicalTable::new(unsafe {va.addr() as *mut InactivePageTable});
        mapped_table.zero();
        self.map_nth_entry::<Self::FlusherType>(index, table_frame, I386EntryFlags::PRESENT | I386EntryFlags::WRITABLE);
        mapped_table
    }
}
impl InactivePageDirectory {
    
    
    
    
    fn delete_userspace(&mut self) {
        for table_index in UserLand::start_table()..UserLand::end_table() {
            if let PageState::Present(mut table) = self.get_table(table_index) {
                
                table.free_all_frames();
            }
            
            self.entries_mut()[table_index].set_unused();
        }
    }
    
    
    fn copy_active_kernelspace(&mut self) {
        let mut lock = ACTIVE_PAGE_TABLES.lock();
        let mut active_dir = lock.get_directory();
        self.entries_mut()[KernelLand::start_table()..=KernelLand::end_table()]
            .clone_from_slice(&active_dir.entries_mut()[KernelLand::start_table()..=KernelLand::end_table()]);
    }
}
impl PageTableTrait for InactivePageTable { type FlusherType = NoFlush; }
impl InactivePageTable {
    
    fn free_all_frames(&mut self) {
        for entry in self.entries_mut().iter_mut() {
            entry.set_unused();
        }
    }
}
impl Drop for InactivePageDirectory {
    fn drop(&mut self) {
        let mut active_pages = ACTIVE_PAGE_TABLES.lock();
        active_pages.unmap(VirtualAddress(self as *mut _ as usize));
    }
}
impl Drop for InactivePageTable {
    fn drop(&mut self) {
        let mut active_pages = ACTIVE_PAGE_TABLES.lock();
        active_pages.unmap(VirtualAddress(self as *mut _ as usize));
    }
}
pub struct PagingOffPageSet {
    
    
    pub directory_physical_address: Frame,
}
impl I386PageTablesSet for PagingOffPageSet {
    type PageDirectoryType = PagingOffDirectory;
    fn get_directory(&mut self) -> SmartHierarchicalTable<<Self as I386PageTablesSet>::PageDirectoryType> {
        SmartHierarchicalTable::new(self.directory_physical_address.address().addr() as *mut PagingOffDirectory)
    }
}
impl PagingOffPageSet {
    
    
    
    
    
    pub unsafe fn paging_off_create_page_set() -> Self {
        
        let dir = FrameAllocator::alloc_frame();
        let dir_addr = dir.address().addr() as *mut PagingOffDirectory;
        (*dir_addr).init_directory();
        Self { directory_physical_address : dir }
    }
    
    
    
    
    
    pub unsafe fn enable_paging(self) {
        enable_paging(self.directory_physical_address.address());
        ::core::mem::forget(self.directory_physical_address);
    }
}
pub struct PagingOffDirectory(PageDirectory);
inherit_deref_index!(PagingOffDirectory, PageDirectory);
impl_hierachical_table!(PagingOffDirectory);
impl PageDirectoryTrait for PagingOffDirectory {
    type PageTableType = PagingOffTable;
    type FlusherType = NoFlush;
    
    fn get_table(&mut self, index: usize) -> PageState<SmartHierarchicalTable<Self::PageTableType>> {
        self.entries()[index].pointed_frame().map(|addr| {
            SmartHierarchicalTable::new(unsafe {addr.addr() as *mut PagingOffTable})
        })
    }
    
    fn create_table(&mut self, index: usize) -> SmartHierarchicalTable<Self::PageTableType> {
        let frame = FrameAllocator::alloc_frame();
        let mut table = SmartHierarchicalTable::new(
            unsafe {frame.address().addr() as *mut PagingOffTable}
        );
        table.zero();
        self.map_nth_entry::<Self::FlusherType>(index, frame, I386EntryFlags::PRESENT | I386EntryFlags::WRITABLE);
        table
    }
}
impl PagingOffDirectory {
    
    
    
    
    
    
    
    
    
    unsafe fn init_directory(&mut self) {
        self.zero();
        let self_frame = Frame::from_physical_addr(PhysicalAddress(self as *mut _ as usize));
        
        self.entries_mut()[ENTRY_COUNT - 1].set(self_frame, I386EntryFlags::PRESENT | I386EntryFlags::WRITABLE);
    }
}
pub struct PagingOffTable(PageTable);
inherit_deref_index!(PagingOffTable, PageTable);
impl_hierachical_table!(PagingOffTable);
impl PageTableTrait for PagingOffTable { type FlusherType = NoFlush; }
pub trait Flusher {
    fn flush_cache() {}
}
pub struct TlbFlush;
impl Flusher for TlbFlush { fn flush_cache() { flush_tlb(); } }
pub struct NoFlush;
impl Flusher for NoFlush { fn flush_cache() { } }