diff --git a/src/allocator/linked_list.rs b/src/allocator/linked_list.rs index 0b18ac7..1c8acb6 100644 --- a/src/allocator/linked_list.rs +++ b/src/allocator/linked_list.rs @@ -3,7 +3,7 @@ use core::ptr::{self, NonNull}; use linked_list_allocator::Heap; use spin::Mutex; -use crate::paging::ActivePageTable; +use crate::paging::{ActivePageTable, PageTableType}; static HEAP: Mutex> = Mutex::new(None); @@ -32,7 +32,7 @@ unsafe impl GlobalAlloc for Allocator { panic!("__rust_allocate: heap not initialized"); }; - super::map_heap(&mut ActivePageTable::new(), crate::KERNEL_HEAP_OFFSET + size, crate::KERNEL_HEAP_SIZE); + super::map_heap(&mut ActivePageTable::new(PageTableType::Kernel), crate::KERNEL_HEAP_OFFSET + size, crate::KERNEL_HEAP_SIZE); if let Some(ref mut heap) = *HEAP.lock() { heap.extend(crate::KERNEL_HEAP_SIZE); diff --git a/src/arch/aarch64/device/gic.rs b/src/arch/aarch64/device/gic.rs index 582854b..92c3724 100644 --- a/src/arch/aarch64/device/gic.rs +++ b/src/arch/aarch64/device/gic.rs @@ -57,7 +57,7 @@ pub struct GicDistIf { impl GicDistIf { unsafe fn init(&mut self) { // Map in the Distributor interface - let mut active_table = ActivePageTable::new(/* TODO PageTableType::Kernel */); + let mut active_table = ActivePageTable::new(PageTableType::Kernel); let start_frame = Frame::containing_address(PhysicalAddress::new(0x08000000)); let end_frame = Frame::containing_address(PhysicalAddress::new(0x08000000 + 0x10000 - 1)); diff --git a/src/arch/aarch64/device/rtc.rs b/src/arch/aarch64/device/rtc.rs index de95ab4..e547523 100644 --- a/src/arch/aarch64/device/rtc.rs +++ b/src/arch/aarch64/device/rtc.rs @@ -29,7 +29,7 @@ struct Pl031rtc { impl Pl031rtc { unsafe fn init(&mut self) { - let mut active_table = ActivePageTable::new(/* TODO PageTableType::Kernel */); + let mut active_table = ActivePageTable::new(PageTableType::Kernel); let start_frame = Frame::containing_address(PhysicalAddress::new(0x09010000)); let end_frame = Frame::containing_address(PhysicalAddress::new(0x09010000 + 0x1000 - 1)); diff --git a/src/arch/aarch64/device/serial.rs b/src/arch/aarch64/device/serial.rs index a852fde..1bba91e 100644 --- a/src/arch/aarch64/device/serial.rs +++ b/src/arch/aarch64/device/serial.rs @@ -16,7 +16,7 @@ pub unsafe fn init() { } let (base, size) = device_tree::diag_uart_range(crate::KERNEL_DTB_OFFSET, crate::KERNEL_DTB_MAX_SIZE).unwrap(); - let mut active_ktable = unsafe { ActivePageTable::new(/* TODO PageTableType::Kernel */) }; + let mut active_ktable = unsafe { ActivePageTable::new(PageTableType::Kernel) }; let mut flush_all = MapperFlushAll::new(); let start_frame = Frame::containing_address(PhysicalAddress::new(base)); diff --git a/src/arch/aarch64/interrupt/trace.rs b/src/arch/aarch64/interrupt/trace.rs index 7db7bf1..37eb101 100644 --- a/src/arch/aarch64/interrupt/trace.rs +++ b/src/arch/aarch64/interrupt/trace.rs @@ -12,7 +12,7 @@ pub unsafe fn stack_trace() { println!("TRACE: {:>016x}", fp); //Maximum 64 frames - let active_table = ActivePageTable::new(/* TODO PageTableType::Kernel */); + let active_table = ActivePageTable::new(PageTableType::User); for _frame in 0..64 { if let Some(pc_fp) = fp.checked_add(mem::size_of::()) { if active_table.translate(VirtualAddress::new(fp)).is_some() && active_table.translate(VirtualAddress::new(pc_fp)).is_some() { diff --git a/src/arch/aarch64/paging/mapper.rs b/src/arch/aarch64/paging/mapper.rs index 9828b99..4768452 100644 --- a/src/arch/aarch64/paging/mapper.rs +++ b/src/arch/aarch64/paging/mapper.rs @@ -3,7 +3,7 @@ use core::ptr::Unique; use crate::memory::{allocate_frames, deallocate_frames, Frame}; -use super::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress, VAddrType}; +use super::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress, VirtualAddressType}; use super::entry::{EntryFlags, PageDescriptorFlags}; use super::table::{self, Table, Level4}; @@ -112,10 +112,10 @@ impl Mapper { if flags.contains(EntryFlags::NO_EXECUTE) { match page.start_address().get_type() { - VAddrType::User => { + VirtualAddressType::User => { translated_flags.insert(PageDescriptorFlags::UXN); }, - VAddrType::Kernel => { + VirtualAddressType::Kernel => { translated_flags.insert(PageDescriptorFlags::PXN); }, } @@ -165,10 +165,10 @@ impl Mapper { if flags.contains(EntryFlags::NO_EXECUTE) { match page.start_address().get_type() { - VAddrType::User => { + VirtualAddressType::User => { translated_flags.insert(PageDescriptorFlags::UXN); }, - VAddrType::Kernel => { + VirtualAddressType::Kernel => { translated_flags.insert(PageDescriptorFlags::PXN); }, } diff --git a/src/arch/aarch64/paging/mod.rs b/src/arch/aarch64/paging/mod.rs index b52c79d..29bf820 100644 --- a/src/arch/aarch64/paging/mod.rs +++ b/src/arch/aarch64/paging/mod.rs @@ -169,7 +169,7 @@ pub unsafe fn init( init_mair(); - let mut active_table = ActivePageTable::new_unlocked(); + let mut active_table = ActivePageTable::new_unlocked(PageTableType::Kernel); let flush_all = map_tss(cpu_id, &mut active_table); flush_all.flush(&mut active_table); @@ -183,12 +183,12 @@ pub unsafe fn init_ap( ) -> usize { init_mair(); - let mut active_table = ActivePageTable::new_unlocked(); + let mut active_table = ActivePageTable::new_unlocked(PageTableType::Kernel); let mut new_table = InactivePageTable::from_address(bsp_table); let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new( - crate::USER_TMP_MISC_OFFSET, + crate::KERNEL_TMP_MISC_OFFSET, ))); active_table.with(&mut new_table, &mut temporary_page, |mapper| { @@ -230,8 +230,7 @@ impl DerefMut for ActivePageTable { impl ActivePageTable { //TODO: table_type argument - pub unsafe fn new() -> ActivePageTable { - let table_type = PageTableType::Kernel; + pub unsafe fn new(table_type: PageTableType) -> ActivePageTable { page_table_lock(); ActivePageTable { mapper: Mapper::new(match table_type { @@ -243,8 +242,7 @@ impl ActivePageTable { } //TODO: table_type argument - pub unsafe fn new_unlocked() -> ActivePageTable { - let table_type = PageTableType::Kernel; + pub unsafe fn new_unlocked(table_type: PageTableType) -> ActivePageTable { ActivePageTable { mapper: Mapper::new(match table_type { PageTableType::User => MapperType::User, @@ -394,7 +392,7 @@ impl PhysicalAddress { #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct VirtualAddress(usize); -pub enum VAddrType { +pub enum VirtualAddressType { User, Kernel } @@ -408,11 +406,11 @@ impl VirtualAddress { self.0 } - pub fn get_type(&self) -> VAddrType { + pub fn get_type(&self) -> VirtualAddressType { if ((self.0 >> 48) & 0xffff) == 0xffff { - VAddrType::Kernel + VirtualAddressType::Kernel } else { - VAddrType::User + VirtualAddressType::User } } } diff --git a/src/context/list.rs b/src/context/list.rs index d56b0e2..3c880fe 100644 --- a/src/context/list.rs +++ b/src/context/list.rs @@ -4,7 +4,7 @@ use alloc::collections::BTreeMap; use core::alloc::{GlobalAlloc, Layout}; use core::{iter, mem}; use core::sync::atomic::Ordering; -use crate::paging; +use crate::paging::{ActivePageTable, PageTableType}; use spin::RwLock; use crate::syscall::error::{Result, Error, EAGAIN}; diff --git a/src/context/memory.rs b/src/context/memory.rs index bd24a24..e050467 100644 --- a/src/context/memory.rs +++ b/src/context/memory.rs @@ -15,7 +15,7 @@ use crate::arch::paging::PAGE_SIZE; use crate::context::file::FileDescriptor; use crate::ipi::{ipi, IpiKind, IpiTarget}; use crate::memory::Frame; -use crate::paging::{ActivePageTable, InactivePageTable, Page, PageIter, PhysicalAddress, VirtualAddress}; +use crate::paging::{ActivePageTable, InactivePageTable, PageTableType, Page, PageIter, PhysicalAddress, VirtualAddress, VirtualAddressType}; use crate::paging::entry::EntryFlags; use crate::paging::mapper::MapperFlushAll; use crate::paging::temporary_page::TemporaryPage; @@ -312,7 +312,10 @@ impl Grant { } pub fn physmap(from: PhysicalAddress, to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match to.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -339,7 +342,10 @@ impl Grant { } pub fn map(to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match to.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -365,7 +371,10 @@ impl Grant { } pub fn map_inactive(from: VirtualAddress, to: VirtualAddress, size: usize, flags: EntryFlags, desc_opt: Option, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) -> Grant { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match to.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; //TODO: Do not allocate let mut frames = VecDeque::with_capacity(size/PAGE_SIZE); @@ -406,7 +415,10 @@ impl Grant { pub fn secret_clone(&self, new_start: VirtualAddress) -> Grant { assert!(self.mapped); - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match new_start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -463,7 +475,10 @@ impl Grant { pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) { assert!(self.mapped); - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match new_start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -499,7 +514,11 @@ impl Grant { pub fn unmap(mut self) { assert!(self.mapped); - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start_address().get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; + let mut flush_all = MapperFlushAll::new(); @@ -528,7 +547,10 @@ impl Grant { pub fn unmap_inactive(mut self, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) { assert!(self.mapped); - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start_address().get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; active_table.with(new_table, temporary_page, |mapper| { let start_page = Page::containing_address(self.start_address()); @@ -703,7 +725,10 @@ impl Memory { } fn map(&mut self, clear: bool) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -723,7 +748,10 @@ impl Memory { } fn unmap(&mut self) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -738,7 +766,10 @@ impl Memory { /// A complicated operation to move a piece of memory to a new page table /// It also allows for changing the address at the same time pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match new_start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -760,7 +791,10 @@ impl Memory { } pub fn remap(&mut self, new_flags: EntryFlags) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -775,7 +809,10 @@ impl Memory { } pub fn resize(&mut self, new_size: usize, clear: bool) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; //TODO: Calculate page changes to minimize operations if new_size > self.size { diff --git a/src/ptrace.rs b/src/ptrace.rs index 318cf5c..930b10e 100644 --- a/src/ptrace.rs +++ b/src/ptrace.rs @@ -9,7 +9,7 @@ use crate::{ entry::EntryFlags, mapper::MapperFlushAll, temporary_page::TemporaryPage, - ActivePageTable, InactivePageTable, Page, PAGE_SIZE, VirtualAddress + ActivePageTable, InactivePageTable, PageTableType, Page, PAGE_SIZE, VirtualAddress } }, common::unique::Unique, @@ -458,7 +458,7 @@ where F: FnOnce(*mut u8) -> Result<()> // in `proc:/mem`, or return a partial read/write. let start = Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET)); - let mut active_page_table = unsafe { ActivePageTable::new() }; + let mut active_page_table = unsafe { ActivePageTable::new(PageTableType::User) }; let mut target_page_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) }; diff --git a/src/scheme/memory.rs b/src/scheme/memory.rs index fe8810e..5fc4e55 100644 --- a/src/scheme/memory.rs +++ b/src/scheme/memory.rs @@ -1,7 +1,7 @@ use crate::context; use crate::context::memory::{entry_flags, Grant}; use crate::memory::{free_frames, used_frames, PAGE_SIZE}; -use crate::paging::{ActivePageTable, VirtualAddress}; +use crate::paging::{ActivePageTable, PageTableType, VirtualAddress, VirtualAddressType}; use crate::syscall::data::{Map, OldMap, StatVfs}; use crate::syscall::error::*; use crate::syscall::flag::MapFlags; @@ -48,7 +48,10 @@ impl Scheme for MemoryScheme { // Make sure it's *absolutely* not mapped already // TODO: Keep track of all allocated memory so this isn't necessary - let active_table = unsafe { ActivePageTable::new() }; + let active_table = match VirtualAddress::new(map.address).get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; for page in region.pages() { if active_table.translate_page(page).is_some() { diff --git a/src/syscall/driver.rs b/src/syscall/driver.rs index efbfe3b..f6e39b6 100644 --- a/src/syscall/driver.rs +++ b/src/syscall/driver.rs @@ -1,6 +1,6 @@ use crate::interrupt::InterruptStack; use crate::memory::{allocate_frames_complex, deallocate_frames, Frame}; -use crate::paging::{ActivePageTable, PhysicalAddress, VirtualAddress}; +use crate::paging::{ActivePageTable, PageTableType, PhysicalAddress, VirtualAddress, VirtualAddressType}; use crate::paging::entry::EntryFlags; use crate::context; use crate::context::memory::{Grant, Region}; @@ -153,7 +153,11 @@ pub fn physunmap(virtual_address: usize) -> Result { pub fn virttophys(virtual_address: usize) -> Result { enforce_root()?; - let active_table = unsafe { ActivePageTable::new() }; + let active_table = match VirtualAddress::new(virtual_address).get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; + match active_table.translate(VirtualAddress::new(virtual_address)) { Some(physical_address) => Ok(physical_address.data()), None => Err(Error::new(EFAULT)) diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 978f867..535f68d 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -19,7 +19,7 @@ use crate::memory::allocate_frames; use crate::paging::entry::EntryFlags; use crate::paging::mapper::MapperFlushAll; use crate::paging::temporary_page::TemporaryPage; -use crate::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, PAGE_SIZE}; +use crate::paging::{ActivePageTable, InactivePageTable, PageTableType, Page, VirtualAddress, PAGE_SIZE}; use crate::{ptrace, syscall}; use crate::scheme::FileHandle; use crate::start::usermode; @@ -338,7 +338,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { context.arch = arch; - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = unsafe { ActivePageTable::new(PageTableType::User) }; let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET))); @@ -1277,7 +1277,7 @@ pub fn mprotect(address: usize, size: usize, flags: MapFlags) -> Result { let end_offset = size.checked_sub(1).ok_or(Error::new(EFAULT))?; let end_address = address.checked_add(end_offset).ok_or(Error::new(EFAULT))?; - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = unsafe { ActivePageTable::new(PageTableType::User) }; let mut flush_all = MapperFlushAll::new(); diff --git a/src/syscall/validate.rs b/src/syscall/validate.rs index 3961feb..5806a53 100644 --- a/src/syscall/validate.rs +++ b/src/syscall/validate.rs @@ -1,6 +1,6 @@ use core::{mem, slice}; -use crate::paging::{ActivePageTable, Page, VirtualAddress}; +use crate::paging::{ActivePageTable, PageTableType, Page, VirtualAddress, VirtualAddressType}; use crate::paging::entry::EntryFlags; use crate::syscall::error::*; @@ -8,7 +8,10 @@ fn validate(address: usize, size: usize, flags: EntryFlags) -> Result<()> { let end_offset = size.checked_sub(1).ok_or(Error::new(EFAULT))?; let end_address = address.checked_add(end_offset).ok_or(Error::new(EFAULT))?; - let active_table = unsafe { ActivePageTable::new() }; + let active_table = match VirtualAddress::new(address).get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let start_page = Page::containing_address(VirtualAddress::new(address)); let end_page = Page::containing_address(VirtualAddress::new(end_address));