Remove old x86_64 paging code, migrate to RMM.

This commit is contained in:
4lDO2
2022-07-17 14:11:11 +02:00
parent 302e55098c
commit 486d296d6d
5 changed files with 110 additions and 578 deletions

View File

@@ -1,200 +1,9 @@
use super::{linear_phys_to_virt, Page, PAGE_SIZE, PageFlags, PhysicalAddress, VirtualAddress};
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::memory::{allocate_frames, deallocate_frames, Enomem, Frame};
use super::RmmA;
use super::table::{Table, Level4};
pub use rmm::{Flusher, PageFlush, PageFlushAll};
pub struct Mapper<'table> {
pub(in super) p4: &'table mut Table<Level4>,
}
impl core::fmt::Debug for Mapper<'_> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "Mapper referencing P4 at {:p}", self.p4)
}
}
impl<'table> Mapper<'table> {
/// Wrap the current address space in a mapper.
///
/// # Safety
///
/// For this to be safe, the caller must have exclusive access to the pointer in the CR3
/// register.
// TODO: Find some lifetime hack we can use for ensuring exclusive access at compile time?
pub unsafe fn current() -> Mapper<'table> {
// SAFETY: We know that CR3 must be a valid frame, since the processor would triple fault
// otherwise, and the caller has ensured exclusive ownership of the KERNEL_OFFSET+CR3.
Self::from_p4_unchecked(&mut Frame::containing_address(PhysicalAddress::new(x86::controlregs::cr3() as usize)))
}
/// Wrap a top-level page table (an entire address space) in a mapper.
///
/// # Safety
///
/// For this to be safe, the caller must have exclusive access to the frame argument. The frame
/// must also be valid, and the frame must not outlive the lifetime.
pub unsafe fn from_p4_unchecked(frame: &mut Frame) -> Self {
let virt = linear_phys_to_virt(frame.start_address())
.expect("expected page table frame to fit within linear mapping");
Self {
p4: &mut *(virt.data() as *mut Table<Level4>),
}
}
pub fn p4(&self) -> &Table<Level4> {
&*self.p4
}
pub fn p4_mut(&mut self) -> &mut Table<Level4> {
&mut *self.p4
}
/// Map a page to a frame
pub fn map_to(&mut self, page: Page, frame: Frame, flags: PageFlags<RmmA>) -> PageFlush<RmmA> {
let p3 = self.p4_mut().next_table_create(page.p4_index());
let p2 = p3.next_table_create(page.p3_index());
let p1 = p2.next_table_create(page.p2_index());
assert!(p1[page.p1_index()].is_unused(),
"{:X}: Set to {:X}: {:?}, requesting {:X}: {:?}",
page.start_address().data(),
p1[page.p1_index()].address().data(), p1[page.p1_index()].flags(),
frame.start_address().data(), flags);
p1.increment_entry_count();
p1[page.p1_index()].set(frame, flags);
PageFlush::new(page.start_address())
}
/// Map a page to the next free frame
pub fn map(&mut self, page: Page, flags: PageFlags<RmmA>) -> Result<PageFlush<RmmA>, Enomem> {
let frame = allocate_frames(1).ok_or(Enomem)?;
Ok(self.map_to(page, frame, flags))
}
/// Update flags for a page
pub fn remap(&mut self, page: Page, flags: PageFlags<RmmA>) -> PageFlush<RmmA> {
let p3 = self.p4_mut().next_table_mut(page.p4_index()).expect("failed to remap: no p3");
let p2 = p3.next_table_mut(page.p3_index()).expect("failed to remap: no p2");
let p1 = p2.next_table_mut(page.p2_index()).expect("failed to remap: no p1");
let frame = p1[page.p1_index()].pointed_frame().expect("failed to remap: not mapped");
p1[page.p1_index()].set(frame, flags);
PageFlush::new(page.start_address())
}
/// Identity map a frame
pub fn identity_map(&mut self, frame: Frame, flags: PageFlags<RmmA>) -> PageFlush<RmmA> {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().data()));
self.map_to(page, frame, flags)
}
fn unmap_inner(&mut self, page: Page, keep_parents: bool) -> Frame {
let frame;
let p4 = self.p4_mut();
if let Some(p3) = p4.next_table_mut(page.p4_index()) {
if let Some(p2) = p3.next_table_mut(page.p3_index()) {
if let Some(p1) = p2.next_table_mut(page.p2_index()) {
frame = if let Some(frame) = p1[page.p1_index()].pointed_frame() {
frame
} else {
panic!("unmap_inner({:X}): frame not found", page.start_address().data())
};
p1.decrement_entry_count();
p1[page.p1_index()].set_unused();
if keep_parents || ! p1.is_unused() {
return frame;
}
} else {
panic!("unmap_inner({:X}): p1 not found", page.start_address().data());
}
if let Some(p1_frame) = p2[page.p2_index()].pointed_frame() {
//println!("unmap_inner: Free p1 {:?}", p1_frame);
p2.decrement_entry_count();
p2[page.p2_index()].set_unused();
deallocate_frames(p1_frame, 1);
} else {
panic!("unmap_inner({:X}): p1_frame not found", page.start_address().data());
}
if ! p2.is_unused() {
return frame;
}
} else {
panic!("unmap_inner({:X}): p2 not found", page.start_address().data());
}
if let Some(p2_frame) = p3[page.p3_index()].pointed_frame() {
//println!("unmap_inner: Free p2 {:?}", p2_frame);
p3.decrement_entry_count();
p3[page.p3_index()].set_unused();
deallocate_frames(p2_frame, 1);
} else {
panic!("unmap_inner({:X}): p2_frame not found", page.start_address().data());
}
if ! p3.is_unused() {
return frame;
}
} else {
panic!("unmap_inner({:X}): p3 not found", page.start_address().data());
}
if let Some(p3_frame) = p4[page.p4_index()].pointed_frame() {
//println!("unmap_inner: Free p3 {:?}", p3_frame);
p4.decrement_entry_count();
p4[page.p4_index()].set_unused();
deallocate_frames(p3_frame, 1);
} else {
panic!("unmap_inner({:X}): p3_frame not found", page.start_address().data());
}
frame
}
/// Unmap a page
pub fn unmap(&mut self, page: Page) -> PageFlush<RmmA> {
let frame = self.unmap_inner(page, false);
deallocate_frames(frame, 1);
PageFlush::new(page.start_address())
}
/// Unmap a page, return frame without free
pub fn unmap_return(&mut self, page: Page, keep_parents: bool) -> (PageFlush<RmmA>, Frame) {
let frame = self.unmap_inner(page, keep_parents);
(PageFlush::new(page.start_address()), frame)
}
pub fn translate_page(&self, page: Page) -> Option<Frame> {
self.translate_page_and_flags(page).map(|(frame, _)| frame)
}
pub fn translate_page_flags(&self, page: Page) -> Option<PageFlags<RmmA>> {
self.translate_page_and_flags(page).map(|(_, flags)| flags)
}
pub fn translate_page_and_flags(&self, page: Page) -> Option<(Frame, PageFlags<RmmA>)> {
self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.map(|p1| &p1[page.p1_index()])
.and_then(|entry| Some((entry.pointed_frame()?, entry.flags())))
}
/// Translate a virtual address to a physical one
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address.data() % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| PhysicalAddress::new(frame.start_address().data() + offset))
}
}
pub struct InactiveFlusher { _inner: () }
impl InactiveFlusher {
// TODO: cpu id

View File

@@ -1,16 +1,11 @@
//! # Paging
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::ops::{Deref, DerefMut};
use core::{mem, ptr};
use spin::Mutex;
use x86::msr;
use crate::memory::Frame;
use self::entry::EntryFlags;
use self::mapper::{Mapper, PageFlushAll};
use self::table::{Level4, Table};
use self::mapper::PageFlushAll;
pub use rmm::{
Arch as RmmArch,
@@ -22,47 +17,17 @@ pub use rmm::{
X8664Arch as RmmA,
};
pub type PageMapper = rmm::PageMapper<RmmA, crate::arch::rmm::LockedAllocator>;
pub use crate::rmm::KernelMapper;
pub mod entry;
pub mod mapper;
pub mod table;
pub mod temporary_page;
/// Number of entries per page table
pub const ENTRY_COUNT: usize = 512;
pub const ENTRY_COUNT: usize = RmmA::PAGE_ENTRIES;
/// Size of pages
pub const PAGE_SIZE: usize = 4096;
//TODO: This is a rudimentary recursive mutex used to naively fix multi_core issues, replace it!
pub struct PageTableLock {
cpu_id: usize,
count: usize,
}
pub static PAGE_TABLE_LOCK: Mutex<PageTableLock> = Mutex::new(PageTableLock {
cpu_id: 0,
count: 0,
});
fn page_table_lock() {
let cpu_id = crate::cpu_id();
loop {
{
let mut lock = PAGE_TABLE_LOCK.lock();
if lock.count == 0 || lock.cpu_id == cpu_id {
lock.cpu_id = cpu_id;
lock.count += 1;
return;
}
}
crate::arch::interrupt::pause();
}
}
fn page_table_unlock() {
let mut lock = PAGE_TABLE_LOCK.lock();
lock.count -= 1;
}
pub const PAGE_SIZE: usize = RmmA::PAGE_SIZE;
/// Setup page attribute table
unsafe fn init_pat() {
@@ -97,7 +62,7 @@ unsafe fn init_pat() {
}
/// Map percpu
unsafe fn map_percpu(cpu_id: usize, mapper: &mut Mapper) -> PageFlushAll<RmmA> {
unsafe fn map_percpu(cpu_id: usize, mapper: &mut PageMapper) -> PageFlushAll<RmmA> {
extern "C" {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
@@ -118,7 +83,7 @@ unsafe fn map_percpu(cpu_id: usize, mapper: &mut Mapper) -> PageFlushAll<RmmA> {
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
let result = mapper.map(
page,
page.start_address(),
PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti"))),
)
.expect("failed to allocate page table frames while mapping percpu");
@@ -162,7 +127,7 @@ unsafe fn init_tcb(cpu_id: usize) -> usize {
/// Returns page table and thread control block offset
pub unsafe fn init(
cpu_id: usize,
) -> (ActivePageTable, usize) {
) -> usize {
extern "C" {
/// The starting byte of the text (code) data segment.
static mut __text_start: u8;
@@ -192,170 +157,30 @@ pub unsafe fn init(
init_pat();
let mut active_table = ActivePageTable::new_unlocked(TableKind::User);
let flush_all = map_percpu(cpu_id, &mut active_table);
let flush_all = map_percpu(cpu_id, KernelMapper::lock_manually(cpu_id).get_mut().expect("expected KernelMapper not to be locked re-entrant in paging::init"));
flush_all.flush();
return (active_table, init_tcb(cpu_id));
return init_tcb(cpu_id);
}
pub unsafe fn init_ap(
cpu_id: usize,
bsp_table: usize,
bsp_table: &mut KernelMapper,
) -> usize {
init_pat();
let mut active_table = ActivePageTable::new_unlocked(TableKind::User);
let mut new_table = InactivePageTable::from_address(bsp_table);
{
let flush_all = map_percpu(cpu_id, &mut new_table.mapper());
// The flush can be ignored as this is not the active table. See later active_table.switch
let flush_all = map_percpu(cpu_id, bsp_table.get_mut().expect("KernelMapper locked re-entrant for AP"));
// The flush can be ignored as this is not the active table. See later make_current().
flush_all.ignore();
};
// This switches the active table, which is setup by the bootloader, to a correct table
// setup by the lambda above. This will also flush the TLB
active_table.switch(new_table);
bsp_table.make_current();
init_tcb(cpu_id)
}
#[derive(Debug)]
pub struct ActivePageTable {
mapper: Mapper<'static>,
locked: bool,
}
impl Deref for ActivePageTable {
type Target = Mapper<'static>;
fn deref(&self) -> &Mapper<'static> {
&self.mapper
}
}
impl DerefMut for ActivePageTable {
fn deref_mut(&mut self) -> &mut Mapper<'static> {
&mut self.mapper
}
}
impl ActivePageTable {
pub unsafe fn new(_table_kind: TableKind) -> ActivePageTable {
page_table_lock();
ActivePageTable {
mapper: Mapper::current(),
locked: true,
}
}
pub unsafe fn new_unlocked(_table_kind: TableKind) -> ActivePageTable {
ActivePageTable {
mapper: Mapper::current(),
locked: false,
}
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
let old_table = InactivePageTable {
frame: Frame::containing_address(unsafe {
RmmA::table()
})
};
unsafe {
// Activate new page table
RmmA::set_table(new_table.frame.start_address());
// Update mapper to new page table
self.mapper = Mapper::current();
}
old_table
}
pub fn flush(&mut self, page: Page) {
unsafe {
RmmA::invalidate(page.start_address());
}
}
pub fn flush_all(&mut self) {
unsafe {
RmmA::invalidate_all();
}
}
pub unsafe fn address(&self) -> usize {
RmmA::table().data()
}
pub fn mapper<'a>(&'a mut self) -> Mapper<'a> {
Mapper {
p4: self.p4,
}
}
}
impl Drop for ActivePageTable {
fn drop(&mut self) {
if self.locked {
page_table_unlock();
self.locked = false;
}
}
}
pub struct InactivePageTable {
frame: Frame,
}
impl InactivePageTable {
/// Create a new inactive page table, located at a given frame.
///
/// # Safety
///
/// For this to be safe, the caller must have exclusive access to the corresponding virtual
/// address of the frame.
pub unsafe fn new(
frame: Frame,
) -> InactivePageTable {
// FIXME: Use active_table to ensure that the newly-allocated frame be linearly mapped, in
// case it is outside the pre-mapped physical address range, or if such a range is too
// large to fit the whole physical address space in the virtual address space.
{
let table = linear_phys_to_virt(frame.start_address())
.expect("cannot initialize InactivePageTable (currently) without the frame being linearly mapped");
// now we are able to zero the table
// SAFETY: The caller must ensure exclusive access to the pointed-to virtual address of
// the frame.
(&mut *(table.data() as *mut Table::<Level4>)).zero();
}
InactivePageTable { frame }
}
pub unsafe fn from_address(address: usize) -> InactivePageTable {
InactivePageTable {
frame: Frame::containing_address(PhysicalAddress::new(address)),
}
}
pub fn mapper<'inactive_table>(&'inactive_table mut self) -> Mapper<'inactive_table> {
unsafe { Mapper::from_p4_unchecked(&mut self.frame) }
}
pub unsafe fn address(&self) -> usize {
self.frame.start_address().data()
}
}
pub fn linear_phys_to_virt(physical: PhysicalAddress) -> Option<VirtualAddress> {
physical.data().checked_add(crate::PHYS_OFFSET).map(VirtualAddress::new)
}
pub fn linear_virt_to_phys(virt: VirtualAddress) -> Option<PhysicalAddress> {
virt.data().checked_sub(crate::PHYS_OFFSET).map(PhysicalAddress::new)
}
/// Page
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Page {
@@ -426,3 +251,12 @@ impl Iterator for PageIter {
}
}
}
/// Round down to the nearest multiple of page size
pub fn round_down_pages(number: usize) -> usize {
number - number % PAGE_SIZE
}
/// Round up to the nearest multiple of page size
pub fn round_up_pages(number: usize) -> usize {
round_down_pages(number + PAGE_SIZE - 1)
}

View File

@@ -1,131 +0,0 @@
//! # Page table
//! Code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
use crate::memory::allocate_frames;
use crate::paging::{linear_phys_to_virt, VirtualAddress};
use super::{ENTRY_COUNT, PageFlags};
use super::entry::{Entry, EntryFlags};
pub trait TableLevel {}
pub enum Level4 {}
pub enum Level3 {}
pub enum Level2 {}
pub enum Level1 {}
impl TableLevel for Level4 {}
impl TableLevel for Level3 {}
impl TableLevel for Level2 {}
impl TableLevel for Level1 {}
pub trait HierarchicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl HierarchicalLevel for Level4 {
type NextLevel = Level3;
}
impl HierarchicalLevel for Level3 {
type NextLevel = Level2;
}
impl HierarchicalLevel for Level2 {
type NextLevel = Level1;
}
#[repr(C, align(4096))]
pub struct Table<L: TableLevel> {
entries: [Entry; ENTRY_COUNT],
level: PhantomData<L>,
}
impl<L> Table<L> where L: TableLevel {
pub fn is_unused(&self) -> bool {
self.entry_count() == 0
}
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
entry.set_zero();
}
}
/// Set number of entries in first table entry
fn set_entry_count(&mut self, count: u64) {
debug_assert!(count <= ENTRY_COUNT as u64, "count can't be greater than ENTRY_COUNT");
self.entries[0].set_counter_bits(count)
}
/// Get number of entries in first table entry
fn entry_count(&self) -> u64 {
self.entries[0].counter_bits()
}
pub fn increment_entry_count(&mut self) {
let current_count = self.entry_count();
self.set_entry_count(current_count + 1);
}
pub fn decrement_entry_count(&mut self) {
let current_count = self.entry_count();
self.set_entry_count(current_count - 1);
}
}
impl<L> Table<L> where L: HierarchicalLevel {
pub fn next_table(&self, index: usize) -> Option<&Table<L::NextLevel>> {
self.next_table_address(index).map(|address| unsafe { &*(address.data() as *const _) })
}
pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table<L::NextLevel>> {
self.next_table_address(index).map(|address| unsafe { &mut *(address.data() as *mut _) })
}
pub fn next_table_create(&mut self, index: usize) -> &mut Table<L::NextLevel> {
if self.next_table(index).is_none() {
assert!(!self[index].flags().has_flag(EntryFlags::HUGE_PAGE.bits()),
"next_table_create does not support huge pages");
let frame = allocate_frames(1).expect("no frames available");
self.increment_entry_count();
//TODO: RISC-V will not like this
self[index].set(frame, PageFlags::new_table().execute(true).write(true).user(true) /* Allow users to go down the page table, implement permissions at the page level */);
self.next_table_mut(index).unwrap().zero();
}
self.next_table_mut(index).unwrap()
}
fn next_table_address(&self, index: usize) -> Option<VirtualAddress> {
let entry = &self[index];
let entry_flags = entry.flags();
entry.pointed_frame().and_then(|next_table_frame| {
if entry_flags.has_flag(EntryFlags::HUGE_PAGE.bits()) {
return None;
}
let next_table_physaddr = next_table_frame.start_address();
let next_table_virtaddr = linear_phys_to_virt(next_table_physaddr)
.expect("expected page table frame to fit within linear mapping");
Some(next_table_virtaddr)
})
}
}
impl<L> Index<usize> for Table<L> where L: TableLevel {
type Output = Entry;
fn index(&self, index: usize) -> &Entry {
&self.entries[index]
}
}
impl<L> IndexMut<usize> for Table<L> where L: TableLevel {
fn index_mut(&mut self, index: usize) -> &mut Entry {
&mut self.entries[index]
}
}

View File

@@ -1,42 +0,0 @@
//! Temporarily map a page
//! From [Phil Opp's Blog](http://os.phil-opp.com/remap-the-kernel.html)
use crate::memory::Frame;
use super::{ActivePageTable, Page, PageFlags, RmmA, VirtualAddress};
use super::table::{Table, Level1};
pub struct TemporaryPage {
page: Page,
}
impl TemporaryPage {
pub fn new(page: Page) -> TemporaryPage {
TemporaryPage { page }
}
pub fn start_address (&self) -> VirtualAddress {
self.page.start_address()
}
/// Maps the temporary page to the given frame in the active table.
/// Returns the start address of the temporary page.
pub fn map(&mut self, frame: Frame, flags: PageFlags<RmmA>, active_table: &mut ActivePageTable) -> VirtualAddress {
assert!(active_table.translate_page(self.page).is_none(), "temporary page is already mapped");
let result = active_table.map_to(self.page, frame, flags);
result.flush();
self.page.start_address()
}
/// Maps the temporary page to the given page table frame in the active
/// table. Returns a reference to the now mapped table.
pub fn map_table_frame(&mut self, frame: Frame, flags: PageFlags<RmmA>, active_table: &mut ActivePageTable) -> &mut Table<Level1> {
unsafe { &mut *(self.map(frame, flags, active_table).data() as *mut Table<Level1>) }
}
/// Unmaps the temporary page in the active table.
pub fn unmap(&mut self, active_table: &mut ActivePageTable) {
let (result, _frame) = active_table.unmap_return(self.page, true);
result.flush();
}
}

View File

@@ -2,6 +2,7 @@ use core::{
cmp,
mem,
slice,
sync::atomic::{self, AtomicUsize, Ordering},
};
use rmm::{
KILOBYTE,
@@ -20,7 +21,7 @@ use rmm::{
X8664Arch as RmmA,
};
use spin::Mutex;
use spin::{Mutex, MutexGuard};
extern "C" {
/// The starting byte of the text (code) data segment.
@@ -210,21 +211,15 @@ unsafe fn inner<A: Arch>(
BuddyAllocator::<A>::new(bump_allocator).expect("failed to create BuddyAllocator")
}
pub struct LockedAllocator {
inner: Mutex<Option<BuddyAllocator<RmmA>>>,
}
// There can only be one allocator (at the moment), so making this a ZST is great!
#[derive(Clone, Copy)]
pub struct LockedAllocator;
impl LockedAllocator {
const fn new() -> Self {
Self {
inner: Mutex::new(None)
}
}
}
static INNER_ALLOCATOR: Mutex<Option<BuddyAllocator<RmmA>>> = Mutex::new(None);
impl FrameAllocator for LockedAllocator {
unsafe fn allocate(&mut self, count: FrameCount) -> Option<PhysicalAddress> {
if let Some(ref mut allocator) = *self.inner.lock() {
if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock() {
allocator.allocate(count)
} else {
None
@@ -232,38 +227,105 @@ impl FrameAllocator for LockedAllocator {
}
unsafe fn free(&mut self, address: PhysicalAddress, count: FrameCount) {
if let Some(ref mut allocator) = *self.inner.lock() {
if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock() {
allocator.free(address, count)
}
}
unsafe fn usage(&self) -> FrameUsage {
if let Some(ref allocator) = *self.inner.lock() {
if let Some(ref allocator) = *INNER_ALLOCATOR.lock() {
allocator.usage()
} else {
FrameUsage::new(FrameCount::new(0), FrameCount::new(0))
}
}
}
impl core::fmt::Debug for LockedAllocator {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match INNER_ALLOCATOR.try_lock().as_deref() {
Some(Some(alloc)) => write!(f, "[locked allocator: {:?}]", unsafe { alloc.usage() }),
Some(None) => write!(f, "[uninitialized lock allocator]"),
None => write!(f, "[failed to lock]"),
}
}
}
static mut AREAS: [MemoryArea; 512] = [MemoryArea {
base: PhysicalAddress::new(0),
size: 0,
}; 512];
pub static mut FRAME_ALLOCATOR: LockedAllocator = LockedAllocator::new();
pub static FRAME_ALLOCATOR: LockedAllocator = LockedAllocator;
pub unsafe fn mapper_new(table_addr: PhysicalAddress) -> PageMapper<'static, RmmA, LockedAllocator> {
PageMapper::new(table_addr, &mut FRAME_ALLOCATOR)
const NO_PROCESSOR: usize = !0;
static LOCK_OWNER: AtomicUsize = AtomicUsize::new(NO_PROCESSOR);
static LOCK_COUNT: AtomicUsize = AtomicUsize::new(0);
// TODO: Support, perhaps via const generics, embedding address checking in PageMapper, thereby
// statically enforcing that the kernel mapper can only map things in the kernel half, and vice
// versa.
/// A guard to the global lock protecting the upper 128 TiB of kernel address space.
///
/// NOTE: Use this with great care! Since heap allocations may also require this lock when the heap
/// needs to be expended, it must not be held while memory allocations are done!
// TODO: Make the lock finer-grained so that e.g. the heap part can be independent from e.g.
// PHYS_PML4?
pub struct KernelMapper {
mapper: crate::paging::PageMapper,
ro: bool,
}
impl KernelMapper {
fn lock_inner(current_processor: usize) -> bool {
loop {
match LOCK_OWNER.compare_exchange_weak(NO_PROCESSOR, current_processor, Ordering::Acquire, Ordering::Relaxed) {
Ok(_) => break,
// already owned by this hardware thread
Err(id) if id == current_processor => break,
// either CAS failed, or some other hardware thread holds the lock
Err(_) => core::hint::spin_loop(),
}
}
//TODO: global paging lock?
pub unsafe fn mapper_create() -> Option<PageMapper<'static, RmmA, LockedAllocator>> {
PageMapper::create(&mut FRAME_ALLOCATOR)
let prev_count = LOCK_COUNT.fetch_add(1, Ordering::Relaxed);
atomic::compiler_fence(Ordering::Acquire);
prev_count > 0
}
pub unsafe fn lock_for_manual_mapper(current_processor: usize, mapper: crate::paging::PageMapper) -> Self {
let ro = Self::lock_inner(current_processor);
Self {
mapper,
ro,
}
}
pub fn lock_manually(current_processor: usize) -> Self {
unsafe { Self::lock_for_manual_mapper(current_processor, PageMapper::new(RmmA::table(), FRAME_ALLOCATOR)) }
}
pub fn lock() -> Self {
Self::lock_manually(crate::cpu_id())
}
pub fn get_mut(&mut self) -> Option<&mut crate::paging::PageMapper> {
if self.ro {
None
} else {
Some(&mut self.mapper)
}
}
}
impl core::ops::Deref for KernelMapper {
type Target = crate::paging::PageMapper;
pub unsafe fn mapper_current() -> PageMapper<'static, RmmA, LockedAllocator> {
PageMapper::current(&mut FRAME_ALLOCATOR)
fn deref(&self) -> &Self::Target {
&self.mapper
}
}
impl Drop for KernelMapper {
fn drop(&mut self) {
if LOCK_COUNT.fetch_sub(1, Ordering::Relaxed) == 0 {
LOCK_OWNER.store(NO_PROCESSOR, Ordering::Release);
}
atomic::compiler_fence(Ordering::Release);
}
}
pub unsafe fn init(
@@ -388,5 +450,5 @@ pub unsafe fn init(
acpi_base, acpi_size_aligned,
initfs_base, initfs_size_aligned,
);
*FRAME_ALLOCATOR.inner.lock() = Some(allocator);
*INNER_ALLOCATOR.lock() = Some(allocator);
}