Partial: migrate remaining parts to RMM.

This commit is contained in:
4lDO2
2022-07-17 14:14:20 +02:00
parent dc8ce1c22b
commit e60321d4a0
2 changed files with 16 additions and 18 deletions

View File

@@ -3,7 +3,7 @@
use core::cmp;
use crate::arch::rmm::FRAME_ALLOCATOR;
use crate::arch::rmm::LockedAllocator;
pub use crate::paging::{PAGE_SIZE, PhysicalAddress};
use rmm::{
@@ -26,21 +26,21 @@ pub struct MemoryArea {
/// Get the number of frames available
pub fn free_frames() -> usize {
unsafe {
FRAME_ALLOCATOR.usage().free().data()
LockedAllocator.usage().free().data()
}
}
/// Get the number of frames used
pub fn used_frames() -> usize {
unsafe {
FRAME_ALLOCATOR.usage().used().data()
LockedAllocator.usage().used().data()
}
}
/// Allocate a range of frames
pub fn allocate_frames(count: usize) -> Option<Frame> {
unsafe {
FRAME_ALLOCATOR.allocate(FrameCount::new(count)).map(|phys| {
LockedAllocator.allocate(FrameCount::new(count)).map(|phys| {
Frame::containing_address(PhysicalAddress::new(phys.data()))
})
}
@@ -65,7 +65,7 @@ pub fn allocate_frames_complex(count: usize, flags: PhysallocFlags, strategy: Op
/// Deallocate a range of frames frame
pub fn deallocate_frames(frame: Frame, count: usize) {
unsafe {
FRAME_ALLOCATOR.free(
LockedAllocator.free(
rmm::PhysicalAddress::new(frame.start_address().data()),
FrameCount::new(count)
);
@@ -103,6 +103,11 @@ impl Frame {
pub fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
FrameIter { start, end }
}
pub fn next_by(&self, n: usize) -> Self {
Self {
number: self.number + n,
}
}
}
pub struct FrameIter {

View File

@@ -7,10 +7,7 @@ use rmm::Arch;
use crate::{
arch::{
interrupt::InterruptStack,
paging::{
mapper::PageFlushAll,
ActivePageTable, InactivePageTable, Page, PAGE_SIZE, TableKind, VirtualAddress
}
paging::{PAGE_SIZE, VirtualAddress},
},
common::unique::Unique,
context::{self, signal, Context, ContextId, memory::AddrSpace},
@@ -34,12 +31,8 @@ use alloc::{
btree_map::Entry
},
sync::Arc,
vec::Vec
};
use core::{
cmp,
sync::atomic::Ordering
};
use core::cmp;
use spin::{Mutex, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
// ____ _
@@ -473,8 +466,6 @@ fn page_aligned_chunks(mut start: usize, mut len: usize) -> impl Iterator<Item =
}
pub fn context_memory(addrspace: &mut AddrSpace, offset: VirtualAddress, len: usize) -> impl Iterator<Item = Option<*mut [u8]>> + '_ {
let mut table = unsafe { InactivePageTable::from_address(addrspace.frame.utable.start_address().data()) };
// TODO: Iterate over grants instead to avoid yielding None too many times. What if
// context_memory is used for an entire process's address space, where the stack is at the very
// end? Alternatively we can skip pages recursively, i.e. first skip unpopulated PML4s and then
@@ -485,8 +476,10 @@ pub fn context_memory(addrspace: &mut AddrSpace, offset: VirtualAddress, len: us
//log::info!("ADDR {:p} LEN {:#0x}", page as *const u8, len);
let frame = table.mapper().translate_page(Page::containing_address(VirtualAddress::new(addr)))?;
let start = RmmA::phys_to_virt(frame.start_address()).data() + addr % crate::memory::PAGE_SIZE;
// FIXME: verify flags before giving out slice
let (address, _flags) = addrspace.table.utable.translate(VirtualAddress::new(addr))?;
let start = RmmA::phys_to_virt(address).data() + addr % crate::memory::PAGE_SIZE;
Some(core::ptr::slice_from_raw_parts_mut(start as *mut u8, len))
})
}