diff --git a/src/memory/mod.rs b/src/memory/mod.rs index e72f5af..f8bd8b1 100644 --- a/src/memory/mod.rs +++ b/src/memory/mod.rs @@ -3,7 +3,7 @@ use core::cmp; -use crate::arch::rmm::FRAME_ALLOCATOR; +use crate::arch::rmm::LockedAllocator; pub use crate::paging::{PAGE_SIZE, PhysicalAddress}; use rmm::{ @@ -26,21 +26,21 @@ pub struct MemoryArea { /// Get the number of frames available pub fn free_frames() -> usize { unsafe { - FRAME_ALLOCATOR.usage().free().data() + LockedAllocator.usage().free().data() } } /// Get the number of frames used pub fn used_frames() -> usize { unsafe { - FRAME_ALLOCATOR.usage().used().data() + LockedAllocator.usage().used().data() } } /// Allocate a range of frames pub fn allocate_frames(count: usize) -> Option { unsafe { - FRAME_ALLOCATOR.allocate(FrameCount::new(count)).map(|phys| { + LockedAllocator.allocate(FrameCount::new(count)).map(|phys| { Frame::containing_address(PhysicalAddress::new(phys.data())) }) } @@ -65,7 +65,7 @@ pub fn allocate_frames_complex(count: usize, flags: PhysallocFlags, strategy: Op /// Deallocate a range of frames frame pub fn deallocate_frames(frame: Frame, count: usize) { unsafe { - FRAME_ALLOCATOR.free( + LockedAllocator.free( rmm::PhysicalAddress::new(frame.start_address().data()), FrameCount::new(count) ); @@ -103,6 +103,11 @@ impl Frame { pub fn range_inclusive(start: Frame, end: Frame) -> FrameIter { FrameIter { start, end } } + pub fn next_by(&self, n: usize) -> Self { + Self { + number: self.number + n, + } + } } pub struct FrameIter { diff --git a/src/ptrace.rs b/src/ptrace.rs index 302646e..f1f0118 100644 --- a/src/ptrace.rs +++ b/src/ptrace.rs @@ -7,10 +7,7 @@ use rmm::Arch; use crate::{ arch::{ interrupt::InterruptStack, - paging::{ - mapper::PageFlushAll, - ActivePageTable, InactivePageTable, Page, PAGE_SIZE, TableKind, VirtualAddress - } + paging::{PAGE_SIZE, VirtualAddress}, }, common::unique::Unique, context::{self, signal, Context, ContextId, memory::AddrSpace}, @@ -34,12 +31,8 @@ use alloc::{ btree_map::Entry }, sync::Arc, - vec::Vec -}; -use core::{ - cmp, - sync::atomic::Ordering }; +use core::cmp; use spin::{Mutex, Once, RwLock, RwLockReadGuard, RwLockWriteGuard}; // ____ _ @@ -473,8 +466,6 @@ fn page_aligned_chunks(mut start: usize, mut len: usize) -> impl Iterator impl Iterator> + '_ { - let mut table = unsafe { InactivePageTable::from_address(addrspace.frame.utable.start_address().data()) }; - // TODO: Iterate over grants instead to avoid yielding None too many times. What if // context_memory is used for an entire process's address space, where the stack is at the very // end? Alternatively we can skip pages recursively, i.e. first skip unpopulated PML4s and then @@ -485,8 +476,10 @@ pub fn context_memory(addrspace: &mut AddrSpace, offset: VirtualAddress, len: us //log::info!("ADDR {:p} LEN {:#0x}", page as *const u8, len); - let frame = table.mapper().translate_page(Page::containing_address(VirtualAddress::new(addr)))?; - let start = RmmA::phys_to_virt(frame.start_address()).data() + addr % crate::memory::PAGE_SIZE; + // FIXME: verify flags before giving out slice + let (address, _flags) = addrspace.table.utable.translate(VirtualAddress::new(addr))?; + + let start = RmmA::phys_to_virt(address).data() + addr % crate::memory::PAGE_SIZE; Some(core::ptr::slice_from_raw_parts_mut(start as *mut u8, len)) }) }