1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
use core::alloc::{GlobalAlloc, Layout};
use crate::sync::{SpinLock, Once};
use core::ops::Deref;
use core::ptr::NonNull;
use linked_list_allocator::{Heap, align_up};
use crate::paging::{PAGE_SIZE, MappingAccessRights, kernel_memory::get_kernel_memory};
use crate::frame_allocator::{FrameAllocator, FrameAllocatorTrait};
use crate::mem::VirtualAddress;
#[allow(missing_debug_implementations)]
pub struct Allocator(Once<SpinLock<Heap>>);
const RESERVED_HEAP_SIZE : usize = 512 * 1024 * 1024;
impl Allocator {
fn expand(&self, by: usize) {
let heap = self.0.call_once(Self::init);
let heap_top = heap.lock().top();
let heap_bottom = heap.lock().bottom();
let new_heap_top = align_up(by, PAGE_SIZE) + heap_top;
assert!(new_heap_top - heap_bottom < RESERVED_HEAP_SIZE, "New heap grows over reserved heap size");
debug!("EXTEND {:#010x}", new_heap_top);
for new_page in (heap_top..new_heap_top).step_by(PAGE_SIZE) {
let frame = FrameAllocator::allocate_frame()
.expect("Cannot allocate physical memory for heap expansion");
let mut active_pages = get_kernel_memory();
active_pages.unmap(VirtualAddress(new_page), PAGE_SIZE);
active_pages.map_phys_region_to(frame, VirtualAddress(new_page), MappingAccessRights::k_rw());
}
unsafe {
heap.lock().extend(align_up(by, PAGE_SIZE));
}
}
fn init() -> SpinLock<Heap> {
let mut active_pages = get_kernel_memory();
let heap_space = active_pages.find_virtual_space(RESERVED_HEAP_SIZE)
.expect("Kernel should have 512MB of virtual memory");
let frame = FrameAllocator::allocate_frame()
.expect("Cannot allocate first frame of heap");
active_pages.map_phys_region_to(frame, heap_space, MappingAccessRights::k_rw());
active_pages.guard(heap_space + PAGE_SIZE, RESERVED_HEAP_SIZE - PAGE_SIZE);
info!("Reserving {} pages at {:#010x}", RESERVED_HEAP_SIZE / PAGE_SIZE - 1, heap_space.addr() + PAGE_SIZE);
unsafe {
SpinLock::new(Heap::new(heap_space.addr(), PAGE_SIZE))
}
}
pub const fn new() -> Allocator {
Allocator(Once::new())
}
}
impl Deref for Allocator {
type Target = SpinLock<Heap>;
fn deref(&self) -> &SpinLock<Heap> {
&self.0.call_once(Self::init)
}
}
unsafe impl<'a> GlobalAlloc for Allocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let allocation = self.0.call_once(Self::init).lock().allocate_first_fit(layout);
let size = layout.size();
let alloc = match allocation {
Err(()) => {
self.expand(size);
self.0.call_once(Self::init).lock().allocate_first_fit(layout)
}
_ => allocation
}.ok().map_or(::core::ptr::null_mut(), |allocation| allocation.as_ptr());
debug!("ALLOC {:#010x?}, size {:#x}", alloc, layout.size());
alloc
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
debug!("FREE {:#010x?}, size {:#x}", ptr, layout.size());
if cfg!(debug_assertions) {
let p = ptr as usize;
for i in p..(p + layout.size()) {
*(i as *mut u8) = 0x7F;
}
}
self.0.call_once(Self::init).lock().deallocate(NonNull::new(ptr).unwrap(), layout)
}
}
#[cfg(target_os = "none")]
#[lang = "oom"]
#[no_mangle]
pub fn rust_oom(_: Layout) -> ! {
panic!("OOM")
}