diff --git a/src/allocator/mod.rs b/src/allocator/mod.rs
index 0d7c939..dfc618a 100644
--- a/src/allocator/mod.rs
+++ b/src/allocator/mod.rs
@@ -1,5 +1,4 @@
-use crate::paging::{ActivePageTable, Page, PageFlags, VirtualAddress};
-use crate::paging::mapper::PageFlushAll;
+use crate::paging::{ActivePageTable, Page, PageFlags, VirtualAddress, mapper::PageFlushAll, entry::EntryFlags};
#[cfg(not(feature="slab"))]
pub use self::linked_list::Allocator;
@@ -19,7 +18,8 @@ unsafe fn map_heap(active_table: &mut ActivePageTable, offset: usize, size: usiz
let heap_start_page = Page::containing_address(VirtualAddress::new(offset));
let heap_end_page = Page::containing_address(VirtualAddress::new(offset + size-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
- let result = active_table.map(page, PageFlags::new().write(true));
+ let result = active_table.map(page, PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti"))))
+ .expect("failed to map kernel heap");
flush_all.consume(result);
}
diff --git a/src/arch/x86_64/consts.rs b/src/arch/x86_64/consts.rs
index 2d9ac40..2ac10c2 100644
--- a/src/arch/x86_64/consts.rs
+++ b/src/arch/x86_64/consts.rs
@@ -1,22 +1,17 @@
// Because the memory map is so important to not be aliased, it is defined here, in one place
// The lower 256 PML4 entries are reserved for userspace
// Each PML4 entry references up to 512 GB of memory
-// The top (511) PML4 is reserved for recursive mapping
// The second from the top (510) PML4 is reserved for the kernel
/// The size of a single PML4
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
pub const PML4_MASK: usize = 0x0000_ff80_0000_0000;
- /// Offset of recursive paging
- pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
- pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK)/PML4_SIZE;
-
/// Offset of kernel
pub const KERNEL_OFFSET: usize = 0xFFFF_8000_0000_0000; //TODO: better calculation
pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK)/PML4_SIZE;
/// Offset to kernel heap
- pub const KERNEL_HEAP_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
+ pub const KERNEL_HEAP_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK)/PML4_SIZE;
/// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB
diff --git a/src/arch/x86_64/paging/entry.rs b/src/arch/x86_64/paging/entry.rs
index 09f0afd..4092ab1 100644
--- a/src/arch/x86_64/paging/entry.rs
+++ b/src/arch/x86_64/paging/entry.rs
@@ -13,6 +13,7 @@ bitflags! {
pub struct EntryFlags: usize {
const NO_CACHE = 1 << 4;
const HUGE_PAGE = 1 << 7;
+ const GLOBAL = 1 << 8;
}
}
diff --git a/src/arch/x86_64/paging/mapper.rs b/src/arch/x86_64/paging/mapper.rs
index da45a06..babefcc 100644
--- a/src/arch/x86_64/paging/mapper.rs
+++ b/src/arch/x86_64/paging/mapper.rs
@@ -1,32 +1,55 @@
-use core::ptr::Unique;
+use super::{linear_phys_to_virt, Page, PAGE_SIZE, PageFlags, PhysicalAddress, VirtualAddress};
+use crate::memory::{allocate_frames, deallocate_frames, Enomem, Frame};
-use crate::memory::{allocate_frames, deallocate_frames, Frame};
-
-use super::{Page, PAGE_SIZE, PageFlags, PhysicalAddress, VirtualAddress};
-use super::table::{self, Table, Level4};
use super::RmmA;
+use super::table::{Table, Level4};
pub use rmm::{PageFlush, PageFlushAll};
-#[derive(Debug)]
-pub struct Mapper {
- p4: Unique
>,
+pub struct Mapper<'table> {
+ p4: &'table mut Table,
}
-impl Mapper {
- /// Create a new page table
- pub unsafe fn new() -> Mapper {
- Mapper {
- p4: Unique::new_unchecked(table::P4),
+impl core::fmt::Debug for Mapper<'_> {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ write!(f, "Mapper referencing P4 at {:p}", self.p4)
+ }
+}
+
+impl<'table> Mapper<'table> {
+ /// Wrap the current address space in a mapper.
+ ///
+ /// # Safety
+ ///
+ /// For this to be safe, the caller must have exclusive access to the pointer in the CR3
+ /// register.
+ // TODO: Find some lifetime hack we can use for ensuring exclusive access at compile time?
+ pub unsafe fn current() -> Mapper<'table> {
+ // SAFETY: We know that CR3 must be a valid frame, since the processor would triple fault
+ // otherwise, and the caller has ensured exclusive ownership of the KERNEL_OFFSET+CR3.
+ Self::from_p4_unchecked(&mut Frame::containing_address(PhysicalAddress::new(x86::controlregs::cr3() as usize)))
+ }
+ /// Wrap a top-level page table (an entire address space) in a mapper.
+ ///
+ /// # Safety
+ ///
+ /// For this to be safe, the caller must have exclusive access to the frame argument. The frame
+ /// must also be valid, and the frame must not outlive the lifetime.
+ pub unsafe fn from_p4_unchecked(frame: &mut Frame) -> Self {
+ let virt = linear_phys_to_virt(frame.start_address())
+ .expect("expected page table frame to fit within linear mapping");
+
+ Self {
+ p4: &mut *(virt.data() as *mut Table),
}
}
pub fn p4(&self) -> &Table {
- unsafe { self.p4.as_ref() }
+ &*self.p4
}
pub fn p4_mut(&mut self) -> &mut Table {
- unsafe { self.p4.as_mut() }
+ &mut *self.p4
}
/// Map a page to a frame
@@ -46,9 +69,9 @@ impl Mapper {
}
/// Map a page to the next free frame
- pub fn map(&mut self, page: Page, flags: PageFlags) -> PageFlush {
- let frame = allocate_frames(1).expect("out of frames");
- self.map_to(page, frame, flags)
+ pub fn map(&mut self, page: Page, flags: PageFlags) -> Result, Enomem> {
+ let frame = allocate_frames(1).ok_or(Enomem)?;
+ Ok(self.map_to(page, frame, flags))
}
/// Update flags for a page
diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs
index ebf1e5f..70d1f0d 100644
--- a/src/arch/x86_64/paging/mod.rs
+++ b/src/arch/x86_64/paging/mod.rs
@@ -8,8 +8,9 @@ use x86::msr;
use crate::memory::Frame;
+use self::entry::EntryFlags;
use self::mapper::{Mapper, PageFlushAll};
-use self::temporary_page::TemporaryPage;
+use self::table::{Level4, Table};
pub use rmm::{
Arch as RmmArch,
@@ -94,8 +95,8 @@ unsafe fn init_pat() {
);
}
-/// Map TSS
-unsafe fn map_tss(cpu_id: usize, mapper: &mut Mapper) -> PageFlushAll {
+/// Map percpu
+unsafe fn map_percpu(cpu_id: usize, mapper: &mut Mapper) -> PageFlushAll {
extern "C" {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
@@ -115,7 +116,11 @@ unsafe fn map_tss(cpu_id: usize, mapper: &mut Mapper) -> PageFlushAll {
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
- let result = mapper.map(page, PageFlags::new().write(true));
+ let result = mapper.map(
+ page,
+ PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti"))),
+ )
+ .expect("failed to allocate page table frames while mapping percpu");
flush_all.consume(result);
}
flush_all
@@ -188,7 +193,7 @@ pub unsafe fn init(
let mut active_table = ActivePageTable::new_unlocked(TableKind::User);
- let flush_all = map_tss(cpu_id, &mut active_table);
+ let flush_all = map_percpu(cpu_id, &mut active_table);
flush_all.flush();
return (active_table, init_tcb(cpu_id));
@@ -204,15 +209,11 @@ pub unsafe fn init_ap(
let mut new_table = InactivePageTable::from_address(bsp_table);
- let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(
- crate::USER_TMP_MISC_OFFSET,
- )));
-
- active_table.with(&mut new_table, &mut temporary_page, |mapper| {
- let flush_all = map_tss(cpu_id, mapper);
+ {
+ let flush_all = map_percpu(cpu_id, &mut new_table.mapper());
// The flush can be ignored as this is not the active table. See later active_table.switch
flush_all.ignore();
- });
+ };
// This switches the active table, which is setup by the bootloader, to a correct table
// setup by the lambda above. This will also flush the TLB
@@ -223,20 +224,20 @@ pub unsafe fn init_ap(
#[derive(Debug)]
pub struct ActivePageTable {
- mapper: Mapper,
+ mapper: Mapper<'static>,
locked: bool,
}
impl Deref for ActivePageTable {
- type Target = Mapper;
+ type Target = Mapper<'static>;
- fn deref(&self) -> &Mapper {
+ fn deref(&self) -> &Mapper<'static> {
&self.mapper
}
}
impl DerefMut for ActivePageTable {
- fn deref_mut(&mut self) -> &mut Mapper {
+ fn deref_mut(&mut self) -> &mut Mapper<'static> {
&mut self.mapper
}
}
@@ -245,14 +246,14 @@ impl ActivePageTable {
pub unsafe fn new(_table_kind: TableKind) -> ActivePageTable {
page_table_lock();
ActivePageTable {
- mapper: Mapper::new(),
+ mapper: Mapper::current(),
locked: true,
}
}
pub unsafe fn new_unlocked(_table_kind: TableKind) -> ActivePageTable {
ActivePageTable {
- mapper: Mapper::new(),
+ mapper: Mapper::current(),
locked: false,
}
}
@@ -281,47 +282,6 @@ impl ActivePageTable {
}
}
- pub fn with(
- &mut self,
- table: &mut InactivePageTable,
- temporary_page: &mut TemporaryPage,
- f: F,
- ) where
- F: FnOnce(&mut Mapper),
- {
- {
- let backup = Frame::containing_address(unsafe {
- RmmA::table()
- });
-
- // map temporary_page to current p4 table
- let p4_table = temporary_page.map_table_frame(
- backup.clone(),
- PageFlags::new_table().write(true), //TODO: RISC-V will not like this
- self,
- );
-
- // overwrite recursive mapping
- self.p4_mut()[crate::RECURSIVE_PAGE_PML4].set(
- table.frame.clone(),
- PageFlags::new_table().write(true), //TODO: RISC-V will not like this
- );
- self.flush_all();
-
- // execute f in the new context
- f(self);
-
- // restore recursive mapping to original p4 table
- p4_table[crate::RECURSIVE_PAGE_PML4].set(
- backup,
- PageFlags::new_table().write(true), //TODO: RISC-V will not like this
- );
- self.flush_all();
- }
-
- temporary_page.unmap(self);
- }
-
pub unsafe fn address(&self) -> usize {
RmmA::table().data()
}
@@ -341,28 +301,30 @@ pub struct InactivePageTable {
}
impl InactivePageTable {
- pub fn new(
+ /// Create a new inactive page table, located at a given frame.
+ ///
+ /// # Safety
+ ///
+ /// For this to be safe, the caller must have exclusive access to the corresponding virtual
+ /// address of the frame.
+ pub unsafe fn new(
+ _active_table: &mut ActivePageTable,
frame: Frame,
- active_table: &mut ActivePageTable,
- temporary_page: &mut TemporaryPage,
) -> InactivePageTable {
+ // FIXME: Use active_table to ensure that the newly-allocated frame be linearly mapped, in
+ // case it is outside the pre-mapped physical address range, or if such a range is too
+ // large to fit the whole physical address space in the virtual address space.
{
- let table = temporary_page.map_table_frame(
- frame.clone(),
- PageFlags::new_table().write(true), //TODO: RISC-V will not like this
- active_table,
- );
+ let table = linear_phys_to_virt(frame.start_address())
+ .expect("cannot initialize InactivePageTable (currently) without the frame being linearly mapped");
// now we are able to zero the table
- table.zero();
- // set up recursive mapping for the table
- table[crate::RECURSIVE_PAGE_PML4].set(
- frame.clone(),
- PageFlags::new_table().write(true), //TODO: RISC-V will not like this
- );
- }
- temporary_page.unmap(active_table);
- InactivePageTable { frame: frame }
+ // SAFETY: The caller must ensure exclusive access to the pointed-to virtual address of
+ // the frame.
+ (&mut *(table.data() as *mut Table::)).zero();
+ }
+
+ InactivePageTable { frame }
}
pub unsafe fn from_address(address: usize) -> InactivePageTable {
@@ -371,11 +333,22 @@ impl InactivePageTable {
}
}
+ pub fn mapper<'inactive_table>(&'inactive_table mut self) -> Mapper<'inactive_table> {
+ unsafe { Mapper::from_p4_unchecked(&mut self.frame) }
+ }
pub unsafe fn address(&self) -> usize {
self.frame.start_address().data()
}
}
+pub fn linear_phys_to_virt(physical: PhysicalAddress) -> Option {
+ physical.data().checked_add(crate::KERNEL_OFFSET).map(VirtualAddress::new)
+}
+pub fn linear_virt_to_phys(virt: VirtualAddress) -> Option {
+ virt.data().checked_sub(crate::KERNEL_OFFSET).map(PhysicalAddress::new)
+}
+
+
/// Page
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Page {
diff --git a/src/arch/x86_64/paging/table.rs b/src/arch/x86_64/paging/table.rs
index 7023b11..e6467bf 100644
--- a/src/arch/x86_64/paging/table.rs
+++ b/src/arch/x86_64/paging/table.rs
@@ -5,12 +5,11 @@ use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
use crate::memory::allocate_frames;
+use crate::paging::{linear_phys_to_virt, VirtualAddress};
use super::{ENTRY_COUNT, PageFlags};
use super::entry::{Entry, EntryFlags};
-pub const P4: *mut Table = (crate::RECURSIVE_PAGE_OFFSET | 0x7f_ffff_f000) as *mut _;
-
pub trait TableLevel {}
pub enum Level4 {}
@@ -39,7 +38,7 @@ impl HierarchicalLevel for Level2 {
type NextLevel = Level1;
}
-#[repr(packed(4096))]
+#[repr(C, align(4096))]
pub struct Table {
entries: [Entry; ENTRY_COUNT],
level: PhantomData,
@@ -84,11 +83,11 @@ impl Table where L: TableLevel {
impl Table where L: HierarchicalLevel {
pub fn next_table(&self, index: usize) -> Option<&Table> {
- self.next_table_address(index).map(|address| unsafe { &*(address as *const _) })
+ self.next_table_address(index).map(|address| unsafe { &*(address.data() as *const _) })
}
pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table> {
- self.next_table_address(index).map(|address| unsafe { &mut *(address as *mut _) })
+ self.next_table_address(index).map(|address| unsafe { &mut *(address.data() as *mut _) })
}
pub fn next_table_create(&mut self, index: usize) -> &mut Table {
@@ -104,14 +103,20 @@ impl Table where L: HierarchicalLevel {
self.next_table_mut(index).unwrap()
}
- fn next_table_address(&self, index: usize) -> Option {
- let entry_flags = self[index].flags();
- if entry_flags.has_present() && !entry_flags.has_flag(EntryFlags::HUGE_PAGE.bits()) {
- let table_address = self as *const _ as usize;
- Some((table_address << 9) | (index << 12))
- } else {
- None
- }
+ fn next_table_address(&self, index: usize) -> Option {
+ let entry = &self[index];
+ let entry_flags = entry.flags();
+
+ entry.pointed_frame().and_then(|next_table_frame| {
+ if entry_flags.has_flag(EntryFlags::HUGE_PAGE.bits()) {
+ return None;
+ }
+ let next_table_physaddr = next_table_frame.start_address();
+ let next_table_virtaddr = linear_phys_to_virt(next_table_physaddr)
+ .expect("expected page table frame to fit within linear mapping");
+
+ Some(next_table_virtaddr)
+ })
}
}
diff --git a/src/arch/x86_64/rmm.rs b/src/arch/x86_64/rmm.rs
index 970dbd0..90f0eb4 100644
--- a/src/arch/x86_64/rmm.rs
+++ b/src/arch/x86_64/rmm.rs
@@ -100,11 +100,6 @@ unsafe fn inner(areas: &'static [MemoryArea], kernel_base: usize, kerne
flush.ignore(); // Not the active table
}
- //TODO: remove backwards compatible recursive mapping
- mapper.table().set_entry(511, rmm::PageEntry::new(
- mapper.table().phys().data() | A::ENTRY_FLAG_READWRITE | A::ENTRY_FLAG_PRESENT | A::ENTRY_FLAG_NO_EXEC
- ));
-
println!("Table: {:X}", mapper.table().phys().data());
for i in 0..512 {
if let Some(entry) = mapper.table().entry(i) {
diff --git a/src/context/context.rs b/src/context/context.rs
index bf34057..6223641 100644
--- a/src/context/context.rs
+++ b/src/context/context.rs
@@ -17,7 +17,7 @@ use crate::arch::{interrupt::InterruptStack, paging::PAGE_SIZE};
use crate::common::unique::Unique;
use crate::context::arch;
use crate::context::file::{FileDescriptor, FileDescription};
-use crate::context::memory::{UserGrants, Memory, SharedMemory, Tls};
+use crate::context::memory::{UserGrants, Memory, SharedMemory};
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::scheme::{SchemeNamespace, FileHandle};
use crate::sync::WaitMap;
diff --git a/src/context/memory.rs b/src/context/memory.rs
index 75c5421..dc7b4ee 100644
--- a/src/context/memory.rs
+++ b/src/context/memory.rs
@@ -1,4 +1,4 @@
-use alloc::collections::{BTreeMap, BTreeSet, VecDeque};
+use alloc::collections::{BTreeMap, BTreeSet};
use alloc::sync::{Arc, Weak};
use core::borrow::Borrow;
use core::cmp::{self, Eq, Ordering, PartialEq, PartialOrd};
@@ -15,9 +15,8 @@ use crate::arch::paging::PAGE_SIZE;
use crate::context::file::FileDescriptor;
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::memory::Frame;
-use crate::paging::{ActivePageTable, InactivePageTable, Page, PageFlags, PageIter, PhysicalAddress, RmmA, VirtualAddress};
use crate::paging::mapper::PageFlushAll;
-use crate::paging::temporary_page::TemporaryPage;
+use crate::paging::{ActivePageTable, InactivePageTable, Page, PageFlags, PageIter, PhysicalAddress, RmmA, VirtualAddress};
/// Round down to the nearest multiple of page size
pub fn round_down_pages(number: usize) -> usize {
@@ -260,9 +259,9 @@ impl PartialEq for Region {
impl Eq for Region {}
impl PartialOrd for Region {
-fn partial_cmp(&self, other: &Self) -> Option {
- self.start.partial_cmp(&other.start)
-}
+ fn partial_cmp(&self, other: &Self) -> Option {
+ self.start.partial_cmp(&other.start)
+ }
}
impl Ord for Region {
fn cmp(&self, other: &Self) -> Ordering {
@@ -340,7 +339,9 @@ impl Grant {
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
- let result = active_table.map(page, flags);
+ let result = active_table
+ .map(page, flags)
+ .expect("TODO: handle ENOMEM in Grant::map");
flush_all.consume(result);
}
@@ -358,37 +359,31 @@ impl Grant {
}
}
- pub fn map_inactive(from: VirtualAddress, to: VirtualAddress, size: usize, flags: PageFlags, desc_opt: Option, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) -> Grant {
- let active_table = unsafe { ActivePageTable::new(from.kind()) };
+ pub fn map_inactive(src: VirtualAddress, dst: VirtualAddress, size: usize, flags: PageFlags, desc_opt: Option, inactive_table: &mut InactivePageTable) -> Grant {
+ let active_table = unsafe { ActivePageTable::new(src.kind()) };
+ let mut inactive_mapper = inactive_table.mapper();
- //TODO: Do not allocate
- let mut frames = VecDeque::with_capacity(size/PAGE_SIZE);
+ let src_start_page = Page::containing_address(src);
+ let src_end_page = Page::containing_address(VirtualAddress::new(src.data() + size - 1));
+ let src_range = Page::range_inclusive(src_start_page, src_end_page);
- let start_page = Page::containing_address(from);
- let end_page = Page::containing_address(VirtualAddress::new(from.data() + size - 1));
- for page in Page::range_inclusive(start_page, end_page) {
- let frame = active_table.translate_page(page).expect("grant references unmapped memory");
- frames.push_back(frame);
+ let dst_start_page = Page::containing_address(dst);
+ let dst_end_page = Page::containing_address(VirtualAddress::new(dst.data() + size - 1));
+ let dst_range = Page::range_inclusive(dst_start_page, dst_end_page);
+
+ for (src_page, dst_page) in src_range.zip(dst_range) {
+ let frame = active_table.translate_page(src_page).expect("grant references unmapped memory");
+
+ let inactive_flush = inactive_mapper.map_to(dst_page, frame, flags);
+ // Ignore result due to mapping on inactive table
+ unsafe { inactive_flush.ignore(); }
}
- let mut active_table = unsafe { ActivePageTable::new(to.kind()) };
-
- active_table.with(new_table, temporary_page, |mapper| {
- let start_page = Page::containing_address(to);
- let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
- for page in Page::range_inclusive(start_page, end_page) {
- let frame = frames.pop_front().expect("grant did not find enough frames");
- let result = mapper.map_to(page, frame, flags);
- // Ignore result due to mapping on inactive table
- unsafe { result.ignore(); }
- }
- });
-
ipi(IpiKind::Tlb, IpiTarget::Other);
Grant {
region: Region {
- start: to,
+ start: dst,
size,
},
flags,
@@ -415,7 +410,8 @@ impl Grant {
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
if self.owned {
- let result = active_table.map(new_page, PageFlags::new().write(true));
+ let result = active_table.map(new_page, PageFlags::new().write(true))
+ .expect("TODO: handle ENOMEM in Grant::secret_clone");
flush_all.consume(result);
} else {
let result = active_table.map_to(new_page, frame, flags);
@@ -456,7 +452,7 @@ impl Grant {
}
}
- pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
+ pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable) {
assert!(self.mapped);
let mut active_table = unsafe { ActivePageTable::new(new_start.kind()) };
@@ -471,12 +467,10 @@ impl Grant {
let (result, frame) = active_table.unmap_return(page, false);
flush_all.consume(result);
- active_table.with(new_table, temporary_page, |mapper| {
- let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
- let result = mapper.map_to(new_page, frame, flags);
- // Ignore result due to mapping on inactive table
- unsafe { result.ignore(); }
- });
+ let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
+ let result = new_table.mapper().map_to(new_page, frame, flags);
+ // Ignore result due to mapping on inactive table
+ unsafe { result.ignore(); }
}
flush_all.flush();
@@ -522,24 +516,20 @@ impl Grant {
self.mapped = false;
}
- pub fn unmap_inactive(mut self, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
+ pub fn unmap_inactive(mut self, new_table: &mut InactivePageTable) {
assert!(self.mapped);
- let mut active_table = unsafe { ActivePageTable::new(self.start_address().kind()) };
-
- active_table.with(new_table, temporary_page, |mapper| {
- let start_page = Page::containing_address(self.start_address());
- let end_page = Page::containing_address(self.final_address());
- for page in Page::range_inclusive(start_page, end_page) {
- let (result, frame) = mapper.unmap_return(page, false);
- if self.owned {
- //TODO: make sure this frame can be safely freed, physical use counter
- crate::memory::deallocate_frames(frame, 1);
- }
- // This is not the active table, so the flush can be ignored
- unsafe { result.ignore(); }
+ let start_page = Page::containing_address(self.start_address());
+ let end_page = Page::containing_address(self.final_address());
+ for page in Page::range_inclusive(start_page, end_page) {
+ let (result, frame) = new_table.mapper().unmap_return(page, false);
+ if self.owned {
+ //TODO: make sure this frame can be safely freed, physical use counter
+ crate::memory::deallocate_frames(frame, 1);
}
- });
+ // This is not the active table, so the flush can be ignored
+ unsafe { result.ignore(); }
+ }
ipi(IpiKind::Tlb, IpiTarget::Other);
@@ -705,7 +695,9 @@ impl Memory {
let flush_all = PageFlushAll::new();
for page in self.pages() {
- let result = active_table.map(page, self.flags);
+ let result = active_table
+ .map(page, self.flags)
+ .expect("TODO: handle ENOMEM in Memory::map");
flush_all.consume(result);
}
@@ -734,7 +726,9 @@ impl Memory {
/// A complicated operation to move a piece of memory to a new page table
/// It also allows for changing the address at the same time
- pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
+ pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable) {
+ let mut inactive_mapper = new_table.mapper();
+
let mut active_table = unsafe { ActivePageTable::new(new_start.kind()) };
let flush_all = PageFlushAll::new();
@@ -743,12 +737,10 @@ impl Memory {
let (result, frame) = active_table.unmap_return(page, false);
flush_all.consume(result);
- active_table.with(new_table, temporary_page, |mapper| {
- let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.start.data() + new_start.data()));
- let result = mapper.map_to(new_page, frame, self.flags);
- // This is not the active table, so the flush can be ignored
- unsafe { result.ignore(); }
- });
+ let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.start.data() + new_start.data()));
+ let result = inactive_mapper.map_to(new_page, frame, self.flags);
+ // This is not the active table, so the flush can be ignored
+ unsafe { result.ignore(); }
}
flush_all.flush();
@@ -782,7 +774,9 @@ impl Memory {
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size - 1));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_none() {
- let result = active_table.map(page, self.flags);
+ let result = active_table
+ .map(page, self.flags)
+ .expect("TODO: Handle OOM in Memory::resize");
flush_all.consume(result);
}
}
@@ -819,24 +813,7 @@ impl Drop for Memory {
}
}
-#[derive(Debug)]
-pub struct Tls {
- pub master: VirtualAddress,
- pub file_size: usize,
- pub mem: Memory,
- pub offset: usize,
-}
-
-impl Tls {
- /// Load TLS data from master
- pub unsafe fn load(&mut self) {
- intrinsics::copy(
- self.master.data() as *const u8,
- (self.mem.start_address().data() + self.offset) as *mut u8,
- self.file_size
- );
- }
-}
+pub const DANGLING: usize = 1 << (usize::BITS - 2);
#[cfg(tests)]
mod tests {
diff --git a/src/memory/mod.rs b/src/memory/mod.rs
index f2f0929..9894666 100644
--- a/src/memory/mod.rs
+++ b/src/memory/mod.rs
@@ -118,3 +118,6 @@ impl Iterator for FrameIter {
}
}
}
+
+#[derive(Debug)]
+pub struct Enomem;
diff --git a/src/ptrace.rs b/src/ptrace.rs
index b4a3be2..7f7711a 100644
--- a/src/ptrace.rs
+++ b/src/ptrace.rs
@@ -7,7 +7,6 @@ use crate::{
interrupt::InterruptStack,
paging::{
mapper::PageFlushAll,
- temporary_page::TemporaryPage,
ActivePageTable, InactivePageTable, Page, PAGE_SIZE, TableKind, VirtualAddress
}
},
@@ -465,8 +464,9 @@ where F: FnOnce(*mut u8) -> Result<()>
// Find the physical frames for all pages
let mut frames = Vec::new();
- let mut result = None;
- active_page_table.with(&mut target_page_table, &mut TemporaryPage::new(start), |mapper| {
+ {
+ let mapper = target_page_table.mapper();
+
let mut inner = || -> Result<()> {
let start = Page::containing_address(offset);
let end = Page::containing_address(VirtualAddress::new(offset.data() + len - 1));
@@ -478,9 +478,8 @@ where F: FnOnce(*mut u8) -> Result<()>
}
Ok(())
};
- result = Some(inner());
- });
- result.expect("with(...) callback should always be called")?;
+ inner()?;
+ }
// Map all the physical frames into linear pages
let pages = frames.len();
diff --git a/src/scheme/user.rs b/src/scheme/user.rs
index 1bbbcf2..5d56472 100644
--- a/src/scheme/user.rs
+++ b/src/scheme/user.rs
@@ -8,10 +8,9 @@ use spin::{Mutex, RwLock};
use crate::context::{self, Context};
use crate::context::file::FileDescriptor;
-use crate::context::memory::{page_flags, round_down_pages, Grant, Region};
+use crate::context::memory::{DANGLING, page_flags, round_down_pages, Grant, Region};
use crate::event;
-use crate::paging::{PAGE_SIZE, InactivePageTable, Page, VirtualAddress};
-use crate::paging::temporary_page::TemporaryPage;
+use crate::paging::{PAGE_SIZE, InactivePageTable, VirtualAddress};
use crate::scheme::{AtomicSchemeId, SchemeId};
use crate::sync::{WaitQueue, WaitMap};
use crate::syscall::data::{Map, OldMap, Packet, Stat, StatVfs, TimeSpec};
@@ -124,7 +123,7 @@ impl UserInner {
).map(|addr| addr.data())
}
- fn capture_inner(context_weak: &Weak>, to_address: usize, address: usize, size: usize, flags: MapFlags, desc_opt: Option)
+ fn capture_inner(context_weak: &Weak>, dst_address: usize, address: usize, size: usize, flags: MapFlags, desc_opt: Option)
-> Result {
// TODO: More abstractions over grant creation!
@@ -138,57 +137,50 @@ impl UserInner {
// if they ever tried to access this dangling address.
// Set the most significant bit.
- let dangling: usize = 1 << (core::mem::size_of::() * 8 - 1);
-
- return Ok(VirtualAddress::new(dangling));
+ return Ok(VirtualAddress::new(DANGLING));
}
let context_lock = context_weak.upgrade().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) };
- let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET)));
let mut grants = context.grants.write();
- let from_address = round_down_pages(address);
- let offset = address - from_address;
- let from_region = Region::new(VirtualAddress::new(from_address), offset + size).round();
- let to_region = grants.find_free_at(VirtualAddress::new(to_address), from_region.size(), flags)?;
+ let src_address = round_down_pages(address);
+ let offset = address - src_address;
+ let src_region = Region::new(VirtualAddress::new(src_address), offset + size).round();
+ let dst_region = grants.find_free_at(VirtualAddress::new(dst_address), src_region.size(), flags)?;
//TODO: Use syscall_head and syscall_tail to avoid leaking data
grants.insert(Grant::map_inactive(
- from_region.start_address(),
- to_region.start_address(),
- from_region.size(),
+ src_region.start_address(),
+ dst_region.start_address(),
+ src_region.size(),
page_flags(flags),
desc_opt,
&mut new_table,
- &mut temporary_page
));
- Ok(VirtualAddress::new(to_region.start_address().data() + offset))
+ Ok(VirtualAddress::new(dst_region.start_address().data() + offset))
}
pub fn release(&self, address: usize) -> Result<()> {
- if address == 0 {
- Ok(())
- } else {
- let context_lock = self.context.upgrade().ok_or(Error::new(ESRCH))?;
- let mut context = context_lock.write();
-
- let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) };
- let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET)));
-
- let mut grants = context.grants.write();
-
- if let Some(region) = grants.contains(VirtualAddress::new(address)).map(Region::from) {
- grants.take(®ion).unwrap().unmap_inactive(&mut new_table, &mut temporary_page);
- return Ok(());
- }
-
- Err(Error::new(EFAULT))
+ if address == DANGLING {
+ return Ok(());
}
+ let context_lock = self.context.upgrade().ok_or(Error::new(ESRCH))?;
+ let mut context = context_lock.write();
+
+ let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) };
+ let mut grants = context.grants.write();
+
+ let region = match grants.contains(VirtualAddress::new(address)).map(Region::from) {
+ Some(region) => region,
+ None => return Err(Error::new(EFAULT)),
+ };
+ grants.take(®ion).unwrap().unmap_inactive(&mut new_table);
+ Ok(())
}
pub fn read(&self, buf: &mut [u8]) -> Result {
diff --git a/src/syscall/process.rs b/src/syscall/process.rs
index 97d613f..2f863d9 100644
--- a/src/syscall/process.rs
+++ b/src/syscall/process.rs
@@ -20,7 +20,6 @@ use crate::interrupt;
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::memory::allocate_frames;
use crate::paging::mapper::PageFlushAll;
-use crate::paging::temporary_page::TemporaryPage;
use crate::paging::{ActivePageTable, InactivePageTable, Page, PageFlags, TableKind, VirtualAddress, PAGE_SIZE};
use crate::{ptrace, syscall};
use crate::scheme::FileHandle;
@@ -88,10 +87,20 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result {
arch = context.arch.clone();
if let Some(ref fx) = context.kfx {
- let mut new_fx = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(1024, 16)) as *mut [u8; 1024]) };
- for (new_b, b) in new_fx.iter_mut().zip(fx.iter()) {
- *new_b = *b;
- }
+ let mut new_fx = unsafe {
+ let new_fx_ptr = crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(1024, 16));
+ if new_fx_ptr.is_null() {
+ // FIXME: It's mildly ironic that the only place where clone can fail with
+ // ENOMEM, is when copying 1024 bytes to merely store vector registers.
+ // Although in order to achieve full kernel-panic immunity, we'll need to
+ // completely phase out all usage of liballoc data structures, and use our
+ // own library/port liballoc, since panicking on OOM is not good for a
+ // kernel.
+ return Err(Error::new(ENOMEM));
+ }
+ new_fx_ptr.copy_from_nonoverlapping(fx.as_ptr(), fx.len());
+ Box::from_raw(new_fx_ptr as *mut [u8; 1024])
+ };
kfx_opt = Some(new_fx);
}
@@ -332,12 +341,11 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result {
let mut active_utable = unsafe { ActivePageTable::new(TableKind::User) };
let mut active_ktable = unsafe { ActivePageTable::new(TableKind::Kernel) };
- let mut temporary_upage = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET)));
- let mut temporary_kpage = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::KERNEL_TMP_MISC_OFFSET)));
-
- let mut new_utable = {
- let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table");
- InactivePageTable::new(frame, &mut active_utable, &mut temporary_upage)
+ let mut new_utable = unsafe {
+ let frame = allocate_frames(1).ok_or(Error::new(ENOMEM))?;
+ // SAFETY: This is safe because the frame is exclusive, owned, and valid, as we
+ // have just allocated it.
+ InactivePageTable::new(&mut active_utable, frame)
};
context.arch.set_page_utable(unsafe { new_utable.address() });
@@ -345,7 +353,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result {
let mut new_ktable = {
let mut new_ktable = {
let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table");
- InactivePageTable::new(frame, &mut active_ktable, &mut temporary_kpage)
+ InactivePageTable::new(frame, &mut active_ktable)
};
context.arch.set_page_ktable(unsafe { new_ktable.address() });
new_ktable
@@ -360,35 +368,29 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result {
{
let frame = active_ktable.p4()[crate::KERNEL_PML4].pointed_frame().expect("kernel image not mapped");
let flags = active_ktable.p4()[crate::KERNEL_PML4].flags();
- active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| {
- mapper.p4_mut()[crate::KERNEL_PML4].set(frame, flags);
- });
+
+ new_ktable.mapper().p4_mut()[crate::KERNEL_PML4].set(frame, flags);
}
// Copy kernel heap mapping
{
let frame = active_ktable.p4()[crate::KERNEL_HEAP_PML4].pointed_frame().expect("kernel heap not mapped");
let flags = active_ktable.p4()[crate::KERNEL_HEAP_PML4].flags();
- active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| {
- mapper.p4_mut()[crate::KERNEL_HEAP_PML4].set(frame, flags);
- });
+
+ new_ktable.mapper().p4_mut()[crate::KERNEL_HEAP_PML4].set(frame, flags);
}
// Copy physmap mapping
{
let frame = active_ktable.p4()[crate::PHYS_PML4].pointed_frame().expect("physmap not mapped");
let flags = active_ktable.p4()[crate::PHYS_PML4].flags();
- active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| {
- mapper.p4_mut()[crate::PHYS_PML4].set(frame, flags);
- });
+ new_ktable.mapper().p4_mut()[crate::PHYS_PML4].set(frame, flags);
}
// Copy kernel percpu (similar to TLS) mapping.
{
let frame = active_ktable.p4()[crate::KERNEL_PERCPU_PML4].pointed_frame().expect("kernel TLS not mapped");
let flags = active_ktable.p4()[crate::KERNEL_PERCPU_PML4].flags();
- active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| {
- mapper.p4_mut()[crate::KERNEL_PERCPU_PML4].set(frame, flags);
- });
+ new_ktable.mapper().p4_mut()[crate::KERNEL_PERCPU_PML4].set(frame, flags);
}
if let Some(fx) = kfx_opt.take() {
@@ -414,9 +416,8 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result {
if ! image.is_empty() {
let frame = active_utable.p4()[crate::USER_PML4].pointed_frame().expect("user image not mapped");
let flags = active_utable.p4()[crate::USER_PML4].flags();
- active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| {
- mapper.p4_mut()[crate::USER_PML4].set(frame, flags);
- });
+
+ new_utable.mapper().p4_mut()[crate::USER_PML4].set(frame, flags);
}
context.image = image;
@@ -424,9 +425,8 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result {
if ! grants.read().is_empty() {
let frame = active_utable.p4()[crate::USER_GRANT_PML4].pointed_frame().expect("user grants not mapped");
let flags = active_utable.p4()[crate::USER_GRANT_PML4].flags();
- active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| {
- mapper.p4_mut()[crate::USER_GRANT_PML4].set(frame, flags);
- });
+
+ new_utable.mapper().p4_mut()[crate::USER_GRANT_PML4].set(frame, flags);
}
context.grants = grants;
} else {
@@ -434,7 +434,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result {
for memory_shared in image.iter_mut() {
memory_shared.with(|memory| {
let start = VirtualAddress::new(memory.start_address().data() - crate::USER_TMP_OFFSET + crate::USER_OFFSET);
- memory.move_to(start, &mut new_utable, &mut temporary_upage);
+ memory.move_to(start, &mut new_utable);
});
}
context.image = image;
@@ -446,7 +446,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result {
for mut grant in old_grants.inner.into_iter() {
let start = VirtualAddress::new(grant.start_address().data() + crate::USER_GRANT_OFFSET - crate::USER_TMP_GRANT_OFFSET);
- grant.move_to(start, &mut new_utable, &mut temporary_upage);
+ grant.move_to(start, &mut new_utable);
grants.insert(grant);
}
}
@@ -458,12 +458,11 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result {
if flags.contains(CLONE_STACK) {
let frame = active_utable.p4()[crate::USER_STACK_PML4].pointed_frame().expect("user stack not mapped");
let flags = active_utable.p4()[crate::USER_STACK_PML4].flags();
- active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| {
- mapper.p4_mut()[crate::USER_STACK_PML4].set(frame, flags);
- });
+
+ new_utable.mapper().p4_mut()[crate::USER_STACK_PML4].set(frame, flags);
} else {
stack_shared.with(|stack| {
- stack.move_to(VirtualAddress::new(crate::USER_STACK_OFFSET), &mut new_utable, &mut temporary_upage);
+ stack.move_to(VirtualAddress::new(crate::USER_STACK_OFFSET), &mut new_utable);
});
}
context.stack = Some(stack_shared);
@@ -471,7 +470,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result {
// Setup user sigstack
if let Some(mut sigstack) = sigstack_opt {
- sigstack.move_to(VirtualAddress::new(crate::USER_SIGSTACK_OFFSET), &mut new_utable, &mut temporary_upage);
+ sigstack.move_to(VirtualAddress::new(crate::USER_SIGSTACK_OFFSET), &mut new_utable);
context.sigstack = Some(sigstack);
}
@@ -563,9 +562,8 @@ fn empty(context: &mut context::Context, reaping: bool) {
log::error!("{}: {}: Grant should not exist: {:?}", context.id.into(), *context.name.read(), grant);
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) };
- let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET)));
- grant.unmap_inactive(&mut new_table, &mut temporary_page);
+ grant.unmap_inactive(&mut new_table);
} else {
grant.unmap();
}