From 648b0edb41aea69f4089e05e2ec853af0535600b Mon Sep 17 00:00:00 2001 From: 4lDO2 <4lDO2@protonmail.com> Date: Sun, 17 Jul 2022 14:08:25 +0200 Subject: [PATCH] Partial: migrate allocator to RMM. --- src/allocator/linked_list.rs | 4 ++-- src/allocator/mod.rs | 11 ++++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/allocator/linked_list.rs b/src/allocator/linked_list.rs index e7a371f..496f8ff 100644 --- a/src/allocator/linked_list.rs +++ b/src/allocator/linked_list.rs @@ -3,7 +3,7 @@ use core::ptr::{self, NonNull}; use linked_list_allocator::Heap; use spin::Mutex; -use crate::paging::{ActivePageTable, TableKind}; +use crate::paging::KernelMapper; static HEAP: Mutex> = Mutex::new(None); @@ -21,7 +21,7 @@ unsafe impl GlobalAlloc for Allocator { match heap.allocate_first_fit(layout) { Err(()) => { let size = heap.size(); - super::map_heap(&mut ActivePageTable::new(TableKind::Kernel), crate::KERNEL_HEAP_OFFSET + size, crate::KERNEL_HEAP_SIZE); + super::map_heap(&mut KernelMapper::lock(), crate::KERNEL_HEAP_OFFSET + size, crate::KERNEL_HEAP_SIZE); heap.extend(crate::KERNEL_HEAP_SIZE); }, other => return other.ok().map_or(ptr::null_mut(), |allocation| allocation.as_ptr()), diff --git a/src/allocator/mod.rs b/src/allocator/mod.rs index 0617843..f8be017 100644 --- a/src/allocator/mod.rs +++ b/src/allocator/mod.rs @@ -1,5 +1,5 @@ use rmm::Flusher; -use crate::paging::{ActivePageTable, Page, PageFlags, VirtualAddress, mapper::PageFlushAll, entry::EntryFlags}; +use crate::paging::{KernelMapper, Page, PageFlags, VirtualAddress, mapper::PageFlushAll, entry::EntryFlags}; #[cfg(not(feature="slab"))] pub use self::linked_list::Allocator; @@ -13,13 +13,14 @@ mod linked_list; #[cfg(feature="slab")] mod slab; -unsafe fn map_heap(active_table: &mut ActivePageTable, offset: usize, size: usize) { +unsafe fn map_heap(mapper: &mut KernelMapper, offset: usize, size: usize) { + let mapper = mapper.get_mut().expect("failed to obtain exclusive access to KernelMapper while extending heap"); let mut flush_all = PageFlushAll::new(); let heap_start_page = Page::containing_address(VirtualAddress::new(offset)); let heap_end_page = Page::containing_address(VirtualAddress::new(offset + size-1)); for page in Page::range_inclusive(heap_start_page, heap_end_page) { - let result = active_table.map(page, PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti")))) + let result = mapper.map(page.start_address(), PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti")))) .expect("failed to map kernel heap"); flush_all.consume(result); } @@ -27,12 +28,12 @@ unsafe fn map_heap(active_table: &mut ActivePageTable, offset: usize, size: usiz flush_all.flush(); } -pub unsafe fn init(active_table: &mut ActivePageTable) { +pub unsafe fn init() { let offset = crate::KERNEL_HEAP_OFFSET; let size = crate::KERNEL_HEAP_SIZE; // Map heap pages - map_heap(active_table, offset, size); + map_heap(&mut KernelMapper::lock(), offset, size); // Initialize global heap Allocator::init(offset, size);