diff --git a/src/arch/x86_64/consts.rs b/src/arch/x86_64/consts.rs index 5409caf..8f46749 100644 --- a/src/arch/x86_64/consts.rs +++ b/src/arch/x86_64/consts.rs @@ -21,6 +21,9 @@ /// Size of kernel heap pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB + /// Offset of temporary mapping for misc kernel bring-up actions + pub const KERNEL_TMP_MISC_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE; + /// Offset to kernel percpu variables //TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE; pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000; diff --git a/src/arch/x86_64/interrupt/trace.rs b/src/arch/x86_64/interrupt/trace.rs index f14d198..4b224d7 100644 --- a/src/arch/x86_64/interrupt/trace.rs +++ b/src/arch/x86_64/interrupt/trace.rs @@ -2,7 +2,7 @@ use core::{mem, str}; use goblin::elf::sym; use rustc_demangle::demangle; -use crate::paging::{ActivePageTable, VirtualAddress}; +use crate::paging::{ActivePageTable, PageTableType, VirtualAddress}; /// Get a stack trace //TODO: Check for stack being mapped before dereferencing @@ -13,7 +13,7 @@ pub unsafe fn stack_trace() { println!("TRACE: {:>016X}", rbp); //Maximum 64 frames - let active_table = ActivePageTable::new(); + let active_table = ActivePageTable::new(PageTableType::User); for _frame in 0..64 { if let Some(rip_rbp) = rbp.checked_add(mem::size_of::()) { if active_table.translate(VirtualAddress::new(rbp)).is_some() && active_table.translate(VirtualAddress::new(rip_rbp)).is_some() { diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs index 1b509fe..cbad3e0 100644 --- a/src/arch/x86_64/paging/mod.rs +++ b/src/arch/x86_64/paging/mod.rs @@ -12,7 +12,7 @@ use self::entry::EntryFlags; use self::mapper::{Mapper, MapperFlushAll}; use self::temporary_page::TemporaryPage; -pub use rmm::{PhysicalAddress, VirtualAddress}; +pub use rmm::PhysicalAddress; pub mod entry; pub mod mapper; @@ -186,7 +186,7 @@ pub unsafe fn init( init_pat(); - let mut active_table = ActivePageTable::new_unlocked(); + let mut active_table = ActivePageTable::new_unlocked(PageTableType::User); let flush_all = map_tss(cpu_id, &mut active_table); flush_all.flush(&mut active_table); @@ -200,7 +200,7 @@ pub unsafe fn init_ap( ) -> usize { init_pat(); - let mut active_table = ActivePageTable::new_unlocked(); + let mut active_table = ActivePageTable::new_unlocked(PageTableType::User); let mut new_table = InactivePageTable::from_address(bsp_table); @@ -227,6 +227,11 @@ pub struct ActivePageTable { locked: bool, } +pub enum PageTableType { + User, + Kernel +} + impl Deref for ActivePageTable { type Target = Mapper; @@ -242,7 +247,7 @@ impl DerefMut for ActivePageTable { } impl ActivePageTable { - pub unsafe fn new() -> ActivePageTable { + pub unsafe fn new(_table_type: PageTableType) -> ActivePageTable { page_table_lock(); ActivePageTable { mapper: Mapper::new(), @@ -250,7 +255,7 @@ impl ActivePageTable { } } - pub unsafe fn new_unlocked() -> ActivePageTable { + pub unsafe fn new_unlocked(_table_type: PageTableType) -> ActivePageTable { ActivePageTable { mapper: Mapper::new(), locked: false, @@ -376,6 +381,34 @@ impl InactivePageTable { } } +/// A virtual address. +#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] +pub struct VirtualAddress(usize); + +#[derive(Debug, PartialEq)] +pub enum VirtualAddressType { + User, + Kernel +} + +impl VirtualAddress { + pub fn new(address: usize) -> Self { + VirtualAddress(address) + } + + pub fn data(&self) -> usize { + self.0 + } + + pub fn get_type(&self) -> VirtualAddressType { + if ((self.0 >> 48) & 0xffff) == 0xffff { + VirtualAddressType::Kernel + } else { + VirtualAddressType::User + } + } +} + /// Page #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct Page { diff --git a/src/arch/x86_64/rmm.rs b/src/arch/x86_64/rmm.rs index 3cc1905..1a5ad28 100644 --- a/src/arch/x86_64/rmm.rs +++ b/src/arch/x86_64/rmm.rs @@ -8,6 +8,7 @@ use rmm::{ FrameCount, FrameUsage, MemoryArea, + PageFlags, PageMapper, PhysicalAddress, VirtualAddress, @@ -27,7 +28,7 @@ extern "C" { static mut __rodata_end: u8; } -unsafe fn page_flags(virt: VirtualAddress) -> usize { +unsafe fn page_flags(virt: VirtualAddress) -> PageFlags { let virt_addr = virt.data(); // Test for being inside a region @@ -40,13 +41,13 @@ unsafe fn page_flags(virt: VirtualAddress) -> usize { if in_section!(text) { // Remap text read-only, execute - 0 + PageFlags::new().write(false).execute(true) } else if in_section!(rodata) { // Remap rodata read-only, no execute - A::ENTRY_FLAG_NO_EXEC + PageFlags::new().write(false).execute(false) } else { - // Remap everything else writable, no execute - A::ENTRY_FLAG_WRITABLE | A::ENTRY_FLAG_NO_EXEC + // Remap everything else read-write, no execute + PageFlags::new().write(true).execute(false) } } @@ -101,7 +102,7 @@ unsafe fn inner(areas: &'static [MemoryArea], kernel_base: usize, kerne //TODO: remove backwards compatible recursive mapping mapper.table().set_entry(511, rmm::PageEntry::new( - mapper.table().phys().data() | A::ENTRY_FLAG_WRITABLE | A::ENTRY_FLAG_PRESENT | A::ENTRY_FLAG_NO_EXEC + mapper.table().phys().data() | A::ENTRY_FLAG_READWRITE | A::ENTRY_FLAG_PRESENT | A::ENTRY_FLAG_NO_EXEC )); println!("Table: {:X}", mapper.table().phys().data()); diff --git a/src/context/arch/x86_64.rs b/src/context/arch/x86_64.rs index 64b0268..8c20ca8 100644 --- a/src/context/arch/x86_64.rs +++ b/src/context/arch/x86_64.rs @@ -54,7 +54,7 @@ impl Context { } } - pub fn get_page_table(&mut self) -> usize { + pub fn get_page_utable(&mut self) -> usize { self.cr3 } @@ -102,7 +102,7 @@ impl Context { self.fx = address; } - pub fn set_page_table(&mut self, address: usize) { + pub fn set_page_utable(&mut self, address: usize) { self.cr3 = address; } diff --git a/src/context/list.rs b/src/context/list.rs index 704642e..68fa204 100644 --- a/src/context/list.rs +++ b/src/context/list.rs @@ -97,6 +97,7 @@ impl ContextList { } context.arch.set_page_utable(unsafe { ActivePageTable::new(PageTableType::User).address() }); + #[cfg(target_arch = "aarch64")] context.arch.set_page_ktable(unsafe { ActivePageTable::new(PageTableType::Kernel).address() }); context.arch.set_fx(fx.as_ptr() as usize); context.arch.set_stack(stack.as_ptr() as usize + offset); diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 3710001..b79bb39 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -363,14 +363,22 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table"); InactivePageTable::new(frame, &mut active_utable, &mut temporary_upage) }; + context.arch.set_page_utable(unsafe { new_utable.address() }); + #[cfg(target_arch = "aarch64")] let mut new_ktable = { - let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table"); - InactivePageTable::new(frame, &mut active_ktable, &mut temporary_kpage) + let mut new_ktable = { + let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table"); + InactivePageTable::new(frame, &mut active_ktable, &mut temporary_kpage) + }; + context.arch.set_page_ktable(unsafe { new_ktable.address() }); + new_ktable }; - context.arch.set_page_utable(unsafe { new_utable.address() }); - context.arch.set_page_ktable(unsafe { new_ktable.address() }); + #[cfg(target_arch = "x86_64")] + let mut new_ktable = unsafe { + InactivePageTable::from_address(new_utable.address()) + }; // Copy kernel image mapping { @@ -531,7 +539,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { } } } - + // Setup user TLS if let Some(mut tls) = tls_opt { // Copy TLS mapping