Implement exec, and change UserGrant allocator.
This commit is contained in:
@@ -16,7 +16,9 @@ pub unsafe fn stack_trace() {
|
||||
let active_table = ActivePageTable::new(TableKind::User);
|
||||
for _frame in 0..64 {
|
||||
if let Some(rip_rbp) = rbp.checked_add(mem::size_of::<usize>()) {
|
||||
if active_table.translate(VirtualAddress::new(rbp)).is_some() && active_table.translate(VirtualAddress::new(rip_rbp)).is_some() {
|
||||
let rbp_virt = VirtualAddress::new(rbp);
|
||||
let rip_rbp_virt = VirtualAddress::new(rip_rbp);
|
||||
if rbp_virt.is_canonical() && rip_rbp_virt.is_canonical() && active_table.translate(rbp_virt).is_some() && active_table.translate(rip_rbp_virt).is_some() {
|
||||
let rip = *(rip_rbp as *const usize);
|
||||
if rip == 0 {
|
||||
println!(" {:>016X}: EMPTY RETURN", rbp);
|
||||
|
||||
@@ -311,7 +311,6 @@ impl InactivePageTable {
|
||||
/// For this to be safe, the caller must have exclusive access to the corresponding virtual
|
||||
/// address of the frame.
|
||||
pub unsafe fn new(
|
||||
_active_table: &mut ActivePageTable,
|
||||
frame: Frame,
|
||||
) -> InactivePageTable {
|
||||
// FIXME: Use active_table to ensure that the newly-allocated frame be linearly mapped, in
|
||||
@@ -394,8 +393,11 @@ impl Page {
|
||||
}
|
||||
|
||||
pub fn next(self) -> Page {
|
||||
self.next_by(1)
|
||||
}
|
||||
pub fn next_by(self, n: usize) -> Page {
|
||||
Self {
|
||||
number: self.number + 1,
|
||||
number: self.number + n,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,11 +46,7 @@ pub struct Table<L: TableLevel> {
|
||||
|
||||
impl<L> Table<L> where L: TableLevel {
|
||||
pub fn is_unused(&self) -> bool {
|
||||
if self.entry_count() > 0 {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
self.entry_count() == 0
|
||||
}
|
||||
|
||||
pub fn zero(&mut self) {
|
||||
|
||||
@@ -16,7 +16,7 @@ use crate::arch::{interrupt::InterruptStack, paging::PAGE_SIZE};
|
||||
use crate::common::unique::Unique;
|
||||
use crate::context::arch;
|
||||
use crate::context::file::{FileDescriptor, FileDescription};
|
||||
use crate::context::memory::{UserGrants, Memory, SharedMemory};
|
||||
use crate::context::memory::UserGrants;
|
||||
use crate::ipi::{ipi, IpiKind, IpiTarget};
|
||||
use crate::scheme::{SchemeNamespace, FileHandle};
|
||||
use crate::sync::WaitMap;
|
||||
|
||||
@@ -100,9 +100,13 @@ impl ContextList {
|
||||
context.arch.set_context_handle();
|
||||
}
|
||||
|
||||
context.arch.set_page_utable(unsafe { ActivePageTable::new(TableKind::User).address() });
|
||||
let mut new_tables = super::memory::setup_new_utable()?;
|
||||
new_tables.take();
|
||||
|
||||
context.arch.set_page_utable(unsafe { new_tables.new_utable.address() });
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
context.arch.set_page_ktable(unsafe { ActivePageTable::new(TableKind::Kernel).address() });
|
||||
context.arch.set_page_ktable(unsafe { new_tables.new_ktable.address() });
|
||||
|
||||
context.arch.set_fx(fx.as_ptr() as usize);
|
||||
context.arch.set_stack(stack.as_ptr() as usize + offset);
|
||||
context.kfx = Some(fx);
|
||||
|
||||
@@ -4,19 +4,20 @@ use core::borrow::Borrow;
|
||||
use core::cmp::{self, Eq, Ordering, PartialEq, PartialOrd};
|
||||
use core::fmt::{self, Debug};
|
||||
use core::intrinsics;
|
||||
use core::ops::{Deref, DerefMut};
|
||||
use core::ops::Deref;
|
||||
use spin::Mutex;
|
||||
use syscall::{
|
||||
flag::MapFlags,
|
||||
error::*,
|
||||
};
|
||||
use rmm::Arch as _;
|
||||
|
||||
use crate::arch::paging::PAGE_SIZE;
|
||||
use crate::context::file::FileDescriptor;
|
||||
use crate::ipi::{ipi, IpiKind, IpiTarget};
|
||||
use crate::memory::Frame;
|
||||
use crate::paging::mapper::PageFlushAll;
|
||||
use crate::paging::{ActivePageTable, InactivePageTable, Page, PageFlags, PageIter, PhysicalAddress, RmmA, VirtualAddress};
|
||||
use crate::paging::{ActivePageTable, InactivePageTable, Page, PageFlags, PageIter, PhysicalAddress, RmmA, TableKind, VirtualAddress};
|
||||
|
||||
/// Round down to the nearest multiple of page size
|
||||
pub fn round_down_pages(number: usize) -> usize {
|
||||
@@ -46,14 +47,31 @@ impl Drop for UnmapResult {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
#[derive(Debug)]
|
||||
pub struct UserGrants {
|
||||
pub inner: BTreeSet<Grant>,
|
||||
inner: BTreeSet<Grant>,
|
||||
holes: BTreeMap<VirtualAddress, usize>,
|
||||
// TODO: Would an additional map ordered by (size,start) to allow for O(log n) allocations be
|
||||
// beneficial?
|
||||
|
||||
//TODO: technically VirtualAddress is from a scheme's context!
|
||||
pub funmap: BTreeMap<Region, VirtualAddress>,
|
||||
}
|
||||
|
||||
impl Default for UserGrants {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl UserGrants {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
inner: BTreeSet::new(),
|
||||
holes: core::iter::once((VirtualAddress::new(0), crate::PML4_SIZE * 256)).collect::<BTreeMap<_, _>>(),
|
||||
funmap: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
/// Returns the grant, if any, which occupies the specified address
|
||||
pub fn contains(&self, address: VirtualAddress) -> Option<&Grant> {
|
||||
let byte = Region::byte(address);
|
||||
@@ -73,28 +91,30 @@ impl UserGrants {
|
||||
.take_while(move |region| !region.intersect(requested).is_empty())
|
||||
}
|
||||
/// Return a free region with the specified size
|
||||
pub fn find_free(&self, size: usize) -> Region {
|
||||
// Get last used region
|
||||
let last = self.inner.iter().next_back().map(Region::from).unwrap_or(Region::new(VirtualAddress::new(0), 0));
|
||||
// At the earliest, start at grant offset
|
||||
// TODO
|
||||
let address = last.start_address().data() - size;
|
||||
// TODO: Alignment (x86_64: 4 KiB, 2 MiB, or 1 GiB).
|
||||
pub fn find_free(&self, size: usize) -> Option<Region> {
|
||||
// Get first available hole, but do reserve the page starting from zero as most compiled
|
||||
// language cannot handle null pointers safely even if they do point to valid memory. If an
|
||||
// application absolutely needs to map the 0th page, they will have to do so explicitly via
|
||||
// MAP_FIXED/MAP_FIXED_NOREPLACE.
|
||||
|
||||
let (hole_start, hole_size) = self.holes.iter().find(|(hole_offset, hole_size)| size <= if hole_offset.data() == 0 { hole_size.saturating_sub(PAGE_SIZE) } else { **hole_size })?;
|
||||
// Create new region
|
||||
Region::new(VirtualAddress::new(address), size)
|
||||
Some(Region::new(VirtualAddress::new(cmp::max(hole_start.data(), PAGE_SIZE)), size))
|
||||
}
|
||||
/// Return a free region, respecting the user's hinted address and flags. Address may be null.
|
||||
pub fn find_free_at(&mut self, address: VirtualAddress, size: usize, flags: MapFlags) -> Result<Region> {
|
||||
if address == VirtualAddress::new(0) {
|
||||
// Free hands!
|
||||
return Ok(self.find_free(size));
|
||||
return self.find_free(size).ok_or(Error::new(ENOMEM));
|
||||
}
|
||||
|
||||
// The user wished to have this region...
|
||||
let mut requested = Region::new(address, size);
|
||||
|
||||
if
|
||||
requested.end_address().data() >= crate::PML4_SIZE * 256 // There are 256 PML4 entries reserved for userspace
|
||||
&& address.data() % PAGE_SIZE != 0
|
||||
requested.end_address().data() > crate::PML4_SIZE * 256 // There are 256 PML4 entries reserved for userspace
|
||||
|| address.data() % PAGE_SIZE != 0
|
||||
{
|
||||
// ... but it was invalid
|
||||
return Err(Error::new(EINVAL));
|
||||
@@ -111,22 +131,76 @@ impl UserGrants {
|
||||
return Err(Error::new(EOPNOTSUPP));
|
||||
} else {
|
||||
// TODO: Find grant close to requested address?
|
||||
requested = self.find_free(requested.size());
|
||||
requested = self.find_free(requested.size()).ok_or(Error::new(ENOMEM))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(requested)
|
||||
}
|
||||
}
|
||||
impl Deref for UserGrants {
|
||||
type Target = BTreeSet<Grant>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner
|
||||
fn reserve(&mut self, grant: &Region) {
|
||||
let previous_hole = self.holes.range_mut(..grant.start_address()).next_back();
|
||||
|
||||
if let Some((hole_offset, hole_size)) = previous_hole {
|
||||
let prev_hole_end = hole_offset.data() + *hole_size;
|
||||
|
||||
// Note that prev_hole_end cannot exactly equal grant.start_address, since that would
|
||||
// imply there is another grant at that position already, as it would otherwise have
|
||||
// been larger.
|
||||
|
||||
if prev_hole_end > grant.start_address().data() {
|
||||
// hole_offset must be below (but never equal to) the start address due to the
|
||||
// `..grant.start_address()` limit; hence, all we have to do is to shrink the
|
||||
// previous offset.
|
||||
*hole_size = grant.start_address().data() - hole_offset.data();
|
||||
}
|
||||
if prev_hole_end > grant.end_address().data() {
|
||||
// The grant is splitting this hole in two, so insert the new one at the end.
|
||||
self.holes.insert(grant.end_address(), prev_hole_end - grant.end_address().data());
|
||||
}
|
||||
}
|
||||
|
||||
// Next hole
|
||||
if let Some(hole_size) = self.holes.remove(&grant.start_address()) {
|
||||
let remainder = hole_size - grant.size();
|
||||
if remainder > 0 {
|
||||
self.holes.insert(grant.end_address(), remainder);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl DerefMut for UserGrants {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.inner
|
||||
fn unreserve(&mut self, grant: &Region) {
|
||||
// The size of any possible hole directly after the to-be-freed region.
|
||||
let exactly_after_size = self.holes.remove(&grant.end_address());
|
||||
|
||||
// There was a range that began exactly prior to the to-be-freed region, so simply
|
||||
// increment the size such that it occupies the grant too. If in additional there was a
|
||||
// grant directly after the grant, include it too in the size.
|
||||
if let Some((hole_offset, hole_size)) = self.holes.range_mut(..grant.start_address()).next_back().filter(|(offset, size)| offset.data() + **size == grant.start_address().data()) {
|
||||
*hole_size = grant.end_address().data() - hole_offset.data() + exactly_after_size.unwrap_or(0);
|
||||
} else {
|
||||
// There was no free region directly before the to-be-freed region, however will
|
||||
// now unconditionally insert a new free region where the grant was, and add that extra
|
||||
// size if there was something after it.
|
||||
self.holes.insert(grant.start_address(), grant.size() + exactly_after_size.unwrap_or(0));
|
||||
}
|
||||
}
|
||||
pub fn insert(&mut self, grant: Grant) {
|
||||
self.reserve(&grant);
|
||||
self.inner.insert(grant);
|
||||
}
|
||||
pub fn remove(&mut self, region: &Region) -> bool {
|
||||
self.take(region).is_some()
|
||||
}
|
||||
pub fn take(&mut self, region: &Region) -> Option<Grant> {
|
||||
let grant = self.inner.take(region)?;
|
||||
self.unreserve(region);
|
||||
Some(grant)
|
||||
}
|
||||
pub fn iter(&self) -> impl Iterator<Item = &Grant> + '_ {
|
||||
self.inner.iter()
|
||||
}
|
||||
pub fn is_empty(&self) -> bool { self.inner.is_empty() }
|
||||
pub fn into_iter(self) -> impl Iterator<Item = Grant> {
|
||||
self.inner.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,9 +297,9 @@ impl Region {
|
||||
|
||||
/// Return all pages containing a chunk of the region
|
||||
pub fn pages(&self) -> PageIter {
|
||||
Page::range_inclusive(
|
||||
Page::range_exclusive(
|
||||
Page::containing_address(self.start_address()),
|
||||
Page::containing_address(self.final_address())
|
||||
Page::containing_address(self.end_address())
|
||||
)
|
||||
}
|
||||
|
||||
@@ -382,6 +456,15 @@ impl Grant {
|
||||
desc_opt: None,
|
||||
}
|
||||
}
|
||||
pub fn zeroed_inactive(dst: Page, page_count: usize, flags: PageFlags<RmmA>, table: &mut InactivePageTable) -> Result<Grant> {
|
||||
let mut inactive_mapper = table.mapper();
|
||||
|
||||
for page in Page::range_exclusive(dst, dst.next_by(page_count)) {
|
||||
let flush = inactive_mapper.map(page, flags).map_err(|_| Error::new(ENOMEM))?;
|
||||
unsafe { flush.ignore(); }
|
||||
}
|
||||
Ok(Grant { region: Region { start: dst.start_address(), size: page_count * PAGE_SIZE }, flags, mapped: true, owned: true, desc_opt: None })
|
||||
}
|
||||
|
||||
pub fn map_inactive(src: VirtualAddress, dst: VirtualAddress, size: usize, flags: PageFlags<RmmA>, desc_opt: Option<GrantFileRef>, inactive_table: &mut InactivePageTable) -> Grant {
|
||||
let active_table = unsafe { ActivePageTable::new(src.kind()) };
|
||||
@@ -418,55 +501,44 @@ impl Grant {
|
||||
}
|
||||
|
||||
/// This function should only be used in clone!
|
||||
pub fn secret_clone(&self, new_start: VirtualAddress) -> Grant {
|
||||
pub(crate) fn secret_clone(&self, inactive_table: &mut InactivePageTable) -> Grant {
|
||||
assert!(self.mapped);
|
||||
|
||||
let mut active_table = unsafe { ActivePageTable::new(new_start.kind()) };
|
||||
let active_table = unsafe { ActivePageTable::new(TableKind::User) };
|
||||
let mut inactive_mapper = inactive_table.mapper();
|
||||
|
||||
let flush_all = PageFlushAll::new();
|
||||
|
||||
let start_page = Page::containing_address(self.region.start);
|
||||
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.data() + self.region.size - 1));
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
for page in self.pages() {
|
||||
//TODO: One function to do both?
|
||||
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
|
||||
let frame = active_table.translate_page(page).expect("grant references unmapped memory");
|
||||
let old_frame = active_table.translate_page(page).expect("grant references unmapped memory");
|
||||
|
||||
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
|
||||
if self.owned {
|
||||
let result = active_table.map(new_page, PageFlags::new().write(true))
|
||||
let frame = if self.owned {
|
||||
// TODO: CoW paging
|
||||
let new_frame = crate::memory::allocate_frames(1)
|
||||
.expect("TODO: handle ENOMEM in Grant::secret_clone");
|
||||
flush_all.consume(result);
|
||||
|
||||
unsafe {
|
||||
// We might as well use self.start_address() directly, but if we were to
|
||||
// introduce SMAP it would help to only move to/from kernel memory, and we are
|
||||
// copying physical frames anyway.
|
||||
let src_pointer = RmmA::phys_to_virt(old_frame.start_address()).data() as *const u8;
|
||||
let dst_pointer = RmmA::phys_to_virt(new_frame.start_address()).data() as *mut u8;
|
||||
dst_pointer.copy_from_nonoverlapping(src_pointer, PAGE_SIZE);
|
||||
}
|
||||
|
||||
new_frame
|
||||
} else {
|
||||
let result = active_table.map_to(new_page, frame, flags);
|
||||
flush_all.consume(result);
|
||||
}
|
||||
}
|
||||
old_frame
|
||||
};
|
||||
|
||||
flush_all.flush();
|
||||
|
||||
if self.owned {
|
||||
unsafe {
|
||||
intrinsics::copy(self.region.start.data() as *const u8, new_start.data() as *mut u8, self.region.size);
|
||||
}
|
||||
|
||||
let flush_all = PageFlushAll::new();
|
||||
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
//TODO: One function to do both?
|
||||
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
|
||||
|
||||
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
|
||||
let result = active_table.remap(new_page, flags);
|
||||
flush_all.consume(result);
|
||||
}
|
||||
|
||||
flush_all.flush();
|
||||
let flush = inactive_mapper.map_to(page, frame, flags);
|
||||
// SAFETY: This happens within an inactive table.
|
||||
unsafe { flush.ignore() }
|
||||
}
|
||||
|
||||
Grant {
|
||||
region: Region {
|
||||
start: new_start,
|
||||
start: self.region.start,
|
||||
size: self.region.size,
|
||||
},
|
||||
flags: self.flags,
|
||||
@@ -476,32 +548,6 @@ impl Grant {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable) {
|
||||
assert!(self.mapped);
|
||||
|
||||
let mut active_table = unsafe { ActivePageTable::new(new_start.kind()) };
|
||||
|
||||
let flush_all = PageFlushAll::new();
|
||||
|
||||
let start_page = Page::containing_address(self.region.start);
|
||||
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.data() + self.region.size - 1));
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
//TODO: One function to do both?
|
||||
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
|
||||
let (result, frame) = active_table.unmap_return(page, false);
|
||||
flush_all.consume(result);
|
||||
|
||||
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
|
||||
let result = new_table.mapper().map_to(new_page, frame, flags);
|
||||
// Ignore result due to mapping on inactive table
|
||||
unsafe { result.ignore(); }
|
||||
}
|
||||
|
||||
flush_all.flush();
|
||||
|
||||
self.region.start = new_start;
|
||||
}
|
||||
|
||||
pub fn flags(&self) -> PageFlags<RmmA> {
|
||||
self.flags
|
||||
}
|
||||
@@ -511,12 +557,9 @@ impl Grant {
|
||||
|
||||
let mut active_table = unsafe { ActivePageTable::new(self.start_address().kind()) };
|
||||
|
||||
|
||||
let flush_all = PageFlushAll::new();
|
||||
|
||||
let start_page = Page::containing_address(self.start_address());
|
||||
let end_page = Page::containing_address(self.final_address());
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
for page in self.pages() {
|
||||
let (result, frame) = active_table.unmap_return(page, false);
|
||||
if self.owned {
|
||||
//TODO: make sure this frame can be safely freed, physical use counter
|
||||
@@ -533,13 +576,11 @@ impl Grant {
|
||||
UnmapResult { file_desc: self.desc_opt.take() }
|
||||
}
|
||||
|
||||
pub fn unmap_inactive(mut self, new_table: &mut InactivePageTable) -> UnmapResult {
|
||||
pub fn unmap_inactive(mut self, other_table: &mut InactivePageTable) -> UnmapResult {
|
||||
assert!(self.mapped);
|
||||
|
||||
let start_page = Page::containing_address(self.start_address());
|
||||
let end_page = Page::containing_address(self.final_address());
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
let (result, frame) = new_table.mapper().unmap_return(page, false);
|
||||
for page in self.pages() {
|
||||
let (result, frame) = other_table.mapper().unmap_return(page, false);
|
||||
if self.owned {
|
||||
//TODO: make sure this frame can be safely freed, physical use counter
|
||||
crate::memory::deallocate_frames(frame, 1);
|
||||
@@ -594,6 +635,34 @@ impl Grant {
|
||||
|
||||
Some((before_grant, self, after_grant))
|
||||
}
|
||||
pub fn move_to_address_space(&mut self, new_start: Page, new_page_table: &mut InactivePageTable, flags: PageFlags<RmmA>, flush_all: &mut PageFlushAll<RmmA>) -> Grant {
|
||||
assert!(self.mapped);
|
||||
|
||||
let mut active_table = unsafe { ActivePageTable::new(TableKind::User) };
|
||||
let mut new_mapper = new_page_table.mapper();
|
||||
let keep_parents = false;
|
||||
|
||||
for (i, page) in self.pages().enumerate() {
|
||||
unsafe {
|
||||
let (flush, frame) = active_table.unmap_return(page, keep_parents);
|
||||
flush_all.consume(flush);
|
||||
|
||||
let flush = new_mapper.map_to(new_start.next_by(i), frame, flags);
|
||||
flush.ignore();
|
||||
}
|
||||
}
|
||||
|
||||
let was_owned = core::mem::replace(&mut self.owned, false);
|
||||
self.mapped = false;
|
||||
|
||||
Self {
|
||||
region: Region::new(new_start.start_address(), self.region.size),
|
||||
flags,
|
||||
mapped: true,
|
||||
owned: was_owned,
|
||||
desc_opt: self.desc_opt.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Grant {
|
||||
@@ -632,203 +701,85 @@ impl Drop for Grant {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum SharedMemory {
|
||||
Owned(Arc<Mutex<Memory>>),
|
||||
Borrowed(Weak<Mutex<Memory>>)
|
||||
}
|
||||
|
||||
impl SharedMemory {
|
||||
pub fn with<F, T>(&self, f: F) -> T where F: FnOnce(&mut Memory) -> T {
|
||||
match *self {
|
||||
SharedMemory::Owned(ref memory_lock) => {
|
||||
let mut memory = memory_lock.lock();
|
||||
f(&mut *memory)
|
||||
},
|
||||
SharedMemory::Borrowed(ref memory_weak) => {
|
||||
let memory_lock = memory_weak.upgrade().expect("SharedMemory::Borrowed no longer valid");
|
||||
let mut memory = memory_lock.lock();
|
||||
f(&mut *memory)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn borrow(&self) -> SharedMemory {
|
||||
match *self {
|
||||
SharedMemory::Owned(ref memory_lock) => SharedMemory::Borrowed(Arc::downgrade(memory_lock)),
|
||||
SharedMemory::Borrowed(ref memory_lock) => SharedMemory::Borrowed(memory_lock.clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Memory {
|
||||
start: VirtualAddress,
|
||||
size: usize,
|
||||
flags: PageFlags<RmmA>,
|
||||
}
|
||||
|
||||
impl Memory {
|
||||
pub fn new(start: VirtualAddress, size: usize, flags: PageFlags<RmmA>, clear: bool) -> Self {
|
||||
let mut memory = Memory {
|
||||
start,
|
||||
size,
|
||||
flags,
|
||||
};
|
||||
|
||||
memory.map(clear);
|
||||
|
||||
memory
|
||||
}
|
||||
|
||||
pub fn to_shared(self) -> SharedMemory {
|
||||
SharedMemory::Owned(Arc::new(Mutex::new(self)))
|
||||
}
|
||||
|
||||
pub fn start_address(&self) -> VirtualAddress {
|
||||
self.start
|
||||
}
|
||||
|
||||
pub fn size(&self) -> usize {
|
||||
self.size
|
||||
}
|
||||
|
||||
pub fn flags(&self) -> PageFlags<RmmA> {
|
||||
self.flags
|
||||
}
|
||||
|
||||
pub fn pages(&self) -> PageIter {
|
||||
let start_page = Page::containing_address(self.start);
|
||||
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size - 1));
|
||||
Page::range_inclusive(start_page, end_page)
|
||||
}
|
||||
|
||||
fn map(&mut self, clear: bool) {
|
||||
let mut active_table = unsafe { ActivePageTable::new(self.start.kind()) };
|
||||
|
||||
let flush_all = PageFlushAll::new();
|
||||
|
||||
for page in self.pages() {
|
||||
let result = active_table
|
||||
.map(page, self.flags)
|
||||
.expect("TODO: handle ENOMEM in Memory::map");
|
||||
flush_all.consume(result);
|
||||
}
|
||||
|
||||
flush_all.flush();
|
||||
|
||||
if clear {
|
||||
assert!(self.flags.has_write());
|
||||
unsafe {
|
||||
intrinsics::write_bytes(self.start_address().data() as *mut u8, 0, self.size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn unmap(&mut self) {
|
||||
let mut active_table = unsafe { ActivePageTable::new(self.start.kind()) };
|
||||
|
||||
let flush_all = PageFlushAll::new();
|
||||
|
||||
for page in self.pages() {
|
||||
let result = active_table.unmap(page);
|
||||
flush_all.consume(result);
|
||||
}
|
||||
|
||||
flush_all.flush();
|
||||
}
|
||||
|
||||
/// A complicated operation to move a piece of memory to a new page table
|
||||
/// It also allows for changing the address at the same time
|
||||
pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable) {
|
||||
let mut inactive_mapper = new_table.mapper();
|
||||
|
||||
let mut active_table = unsafe { ActivePageTable::new(new_start.kind()) };
|
||||
|
||||
let flush_all = PageFlushAll::new();
|
||||
|
||||
for page in self.pages() {
|
||||
let (result, frame) = active_table.unmap_return(page, false);
|
||||
flush_all.consume(result);
|
||||
|
||||
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.start.data() + new_start.data()));
|
||||
let result = inactive_mapper.map_to(new_page, frame, self.flags);
|
||||
// This is not the active table, so the flush can be ignored
|
||||
unsafe { result.ignore(); }
|
||||
}
|
||||
|
||||
flush_all.flush();
|
||||
|
||||
self.start = new_start;
|
||||
}
|
||||
|
||||
pub fn remap(&mut self, new_flags: PageFlags<RmmA>) {
|
||||
let mut active_table = unsafe { ActivePageTable::new(self.start.kind()) };
|
||||
|
||||
let flush_all = PageFlushAll::new();
|
||||
|
||||
for page in self.pages() {
|
||||
let result = active_table.remap(page, new_flags);
|
||||
flush_all.consume(result);
|
||||
}
|
||||
|
||||
flush_all.flush();
|
||||
|
||||
self.flags = new_flags;
|
||||
}
|
||||
|
||||
pub fn resize(&mut self, new_size: usize, clear: bool) {
|
||||
let mut active_table = unsafe { ActivePageTable::new(self.start.kind()) };
|
||||
|
||||
//TODO: Calculate page changes to minimize operations
|
||||
if new_size > self.size {
|
||||
let flush_all = PageFlushAll::new();
|
||||
|
||||
let start_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size));
|
||||
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size - 1));
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
if active_table.translate_page(page).is_none() {
|
||||
let result = active_table
|
||||
.map(page, self.flags)
|
||||
.expect("TODO: Handle OOM in Memory::resize");
|
||||
flush_all.consume(result);
|
||||
}
|
||||
}
|
||||
|
||||
flush_all.flush();
|
||||
|
||||
if clear {
|
||||
unsafe {
|
||||
intrinsics::write_bytes((self.start.data() + self.size) as *mut u8, 0, new_size - self.size);
|
||||
}
|
||||
}
|
||||
} else if new_size < self.size {
|
||||
let flush_all = PageFlushAll::new();
|
||||
|
||||
let start_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size));
|
||||
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size - 1));
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
if active_table.translate_page(page).is_some() {
|
||||
let result = active_table.unmap(page);
|
||||
flush_all.consume(result);
|
||||
}
|
||||
}
|
||||
|
||||
flush_all.flush();
|
||||
}
|
||||
|
||||
self.size = new_size;
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Memory {
|
||||
fn drop(&mut self) {
|
||||
self.unmap();
|
||||
}
|
||||
}
|
||||
|
||||
pub const DANGLING: usize = 1 << (usize::BITS - 2);
|
||||
|
||||
pub struct NewTables {
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
pub new_ktable: InactivePageTable,
|
||||
pub new_utable: InactivePageTable,
|
||||
|
||||
taken: bool,
|
||||
}
|
||||
impl NewTables {
|
||||
pub fn take(&mut self) {
|
||||
self.taken = true;
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for NewTables {
|
||||
fn drop(&mut self) {
|
||||
if self.taken { return }
|
||||
|
||||
unsafe {
|
||||
use crate::memory::deallocate_frames;
|
||||
deallocate_frames(Frame::containing_address(PhysicalAddress::new(self.new_utable.address())), 1);
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
deallocate_frames(Frame::containing_address(PhysicalAddress::new(self.new_ktable.address())), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates a new identically mapped ktable and empty utable (same memory on x86_64).
|
||||
pub fn setup_new_utable() -> Result<NewTables> {
|
||||
let mut new_utable = unsafe { InactivePageTable::new(crate::memory::allocate_frames(1).ok_or(Error::new(ENOMEM))?) };
|
||||
|
||||
let mut new_ktable = if cfg!(target_arch = "aarch64") {
|
||||
unsafe { InactivePageTable::new(crate::memory::allocate_frames(1).ok_or(Error::new(ENOMEM))?) }
|
||||
} else {
|
||||
unsafe { InactivePageTable::from_address(new_utable.address()) }
|
||||
};
|
||||
|
||||
let active_ktable = unsafe { ActivePageTable::new(TableKind::Kernel) };
|
||||
|
||||
// Copy kernel image mapping
|
||||
{
|
||||
let frame = active_ktable.p4()[crate::KERNEL_PML4].pointed_frame().expect("kernel image not mapped");
|
||||
let flags = active_ktable.p4()[crate::KERNEL_PML4].flags();
|
||||
|
||||
new_ktable.mapper().p4_mut()[crate::KERNEL_PML4].set(frame, flags);
|
||||
}
|
||||
|
||||
// Copy kernel heap mapping
|
||||
{
|
||||
let frame = active_ktable.p4()[crate::KERNEL_HEAP_PML4].pointed_frame().expect("kernel heap not mapped");
|
||||
let flags = active_ktable.p4()[crate::KERNEL_HEAP_PML4].flags();
|
||||
|
||||
new_ktable.mapper().p4_mut()[crate::KERNEL_HEAP_PML4].set(frame, flags);
|
||||
}
|
||||
|
||||
// Copy physmap mapping
|
||||
{
|
||||
let frame = active_ktable.p4()[crate::PHYS_PML4].pointed_frame().expect("physmap not mapped");
|
||||
let flags = active_ktable.p4()[crate::PHYS_PML4].flags();
|
||||
new_ktable.mapper().p4_mut()[crate::PHYS_PML4].set(frame, flags);
|
||||
}
|
||||
// Copy kernel percpu (similar to TLS) mapping.
|
||||
{
|
||||
let frame = active_ktable.p4()[crate::KERNEL_PERCPU_PML4].pointed_frame().expect("kernel TLS not mapped");
|
||||
let flags = active_ktable.p4()[crate::KERNEL_PERCPU_PML4].flags();
|
||||
new_ktable.mapper().p4_mut()[crate::KERNEL_PERCPU_PML4].set(frame, flags);
|
||||
}
|
||||
|
||||
Ok(NewTables {
|
||||
taken: false,
|
||||
new_utable,
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
new_ktable,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
#[cfg(tests)]
|
||||
mod tests {
|
||||
// TODO: Get these tests working
|
||||
|
||||
@@ -19,21 +19,6 @@ pub unsafe fn debugger() {
|
||||
if let Some((a, b, c, d, e, f)) = context.syscall {
|
||||
println!("syscall: {}", crate::syscall::debug::format_call(a, b, c, d, e, f));
|
||||
}
|
||||
if ! context.image.is_empty() {
|
||||
println!("image:");
|
||||
for shared_memory in context.image.iter() {
|
||||
shared_memory.with(|memory| {
|
||||
let region = crate::context::memory::Region::new(
|
||||
memory.start_address(),
|
||||
memory.size()
|
||||
);
|
||||
println!(
|
||||
" virt 0x{:016x}:0x{:016x} size 0x{:08x}",
|
||||
region.start_address().data(), region.final_address().data(), region.size()
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
{
|
||||
let grants = context.grants.read();
|
||||
if ! grants.is_empty() {
|
||||
|
||||
@@ -190,6 +190,8 @@ pub extern fn userspace_init() {
|
||||
if bytes_read == 0 { break }
|
||||
total_bytes_read += bytes_read;
|
||||
}
|
||||
data.truncate(total_bytes_read);
|
||||
|
||||
let _ = syscall::close(fd);
|
||||
|
||||
crate::syscall::process::usermode_bootstrap(data.into_boxed_slice());
|
||||
|
||||
@@ -34,8 +34,8 @@ impl MemoryScheme {
|
||||
let active_table = unsafe { ActivePageTable::new(rmm::TableKind::User) };
|
||||
|
||||
for page in region.pages() {
|
||||
if active_table.translate_page(page).is_some() {
|
||||
println!("page at {:#x} was already mapped", page.start_address().data());
|
||||
if let Some(flags) = active_table.translate_page_flags(page).filter(|flags| flags.has_present()) {
|
||||
println!("page at {:#x} was already mapped, flags: {:?}", page.start_address().data(), flags);
|
||||
return Err(Error::new(EEXIST))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,6 +101,7 @@ enum Operation {
|
||||
Regs(RegsKind),
|
||||
Trace,
|
||||
Static(&'static str),
|
||||
Name,
|
||||
}
|
||||
impl Operation {
|
||||
fn needs_child_process(self) -> bool {
|
||||
@@ -109,6 +110,7 @@ impl Operation {
|
||||
Self::Regs(_) => true,
|
||||
Self::Trace => true,
|
||||
Self::Static(_) => false,
|
||||
Self::Name => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -248,6 +250,7 @@ impl Scheme for ProcScheme {
|
||||
Some("regs/env") => Operation::Regs(RegsKind::Env),
|
||||
Some("trace") => Operation::Trace,
|
||||
Some("exe") => Operation::Static("exe"),
|
||||
Some("name") => Operation::Name,
|
||||
_ => return Err(Error::new(EINVAL))
|
||||
};
|
||||
|
||||
@@ -519,6 +522,13 @@ impl Scheme for ProcScheme {
|
||||
// Return read events
|
||||
Ok(read * mem::size_of::<PtraceEvent>())
|
||||
}
|
||||
Operation::Name => match &*context::contexts().current().ok_or(Error::new(ESRCH))?.read().name.read() {
|
||||
name => {
|
||||
let to_copy = cmp::min(buf.len(), name.len());
|
||||
buf[..to_copy].copy_from_slice(&name.as_bytes()[..to_copy]);
|
||||
Ok(to_copy)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -704,6 +714,11 @@ impl Scheme for ProcScheme {
|
||||
|
||||
Ok(mem::size_of::<u64>())
|
||||
},
|
||||
Operation::Name => {
|
||||
let utf8 = alloc::string::String::from_utf8(buf.to_vec()).map_err(|_| Error::new(EINVAL))?.into_boxed_str();
|
||||
*context::contexts().current().ok_or(Error::new(ESRCH))?.read().name.write() = utf8;
|
||||
Ok(buf.len())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -741,6 +756,7 @@ impl Scheme for ProcScheme {
|
||||
Operation::Regs(RegsKind::Env) => "regs/env",
|
||||
Operation::Trace => "trace",
|
||||
Operation::Static(path) => path,
|
||||
Operation::Name => "name",
|
||||
});
|
||||
|
||||
let len = cmp::min(path.len(), buf.len());
|
||||
|
||||
@@ -52,6 +52,7 @@ impl SysScheme {
|
||||
files.insert("scheme_num", Box::new(scheme_num::resource));
|
||||
files.insert("syscall", Box::new(syscall::resource));
|
||||
files.insert("uname", Box::new(uname::resource));
|
||||
files.insert("env", Box::new(|| Ok(Vec::from(unsafe { crate::INIT_ENV }))));
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
files.insert("spurious_irq", Box::new(irq::spurious_irq_resource));
|
||||
|
||||
|
||||
@@ -152,6 +152,10 @@ impl UserInner {
|
||||
let src_region = Region::new(VirtualAddress::new(src_address), offset + size).round();
|
||||
let dst_region = grants.find_free_at(VirtualAddress::new(dst_address), src_region.size(), flags)?;
|
||||
|
||||
/*if !dst_region.intersect(Region::new(VirtualAddress::new(0x39d000), 1)).is_empty() {
|
||||
dbg!(dst_region);
|
||||
}*/
|
||||
|
||||
//TODO: Use syscall_head and syscall_tail to avoid leaking data
|
||||
grants.insert(Grant::map_inactive(
|
||||
src_region.start_address(),
|
||||
@@ -166,20 +170,21 @@ impl UserInner {
|
||||
}
|
||||
|
||||
pub fn release(&self, address: usize) -> Result<()> {
|
||||
//dbg!(address);
|
||||
if address == DANGLING {
|
||||
return Ok(());
|
||||
}
|
||||
let context_lock = self.context.upgrade().ok_or(Error::new(ESRCH))?;
|
||||
let mut context = context_lock.write();
|
||||
|
||||
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) };
|
||||
let mut other_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) };
|
||||
let mut grants = context.grants.write();
|
||||
|
||||
let region = match grants.contains(VirtualAddress::new(address)).map(Region::from) {
|
||||
Some(region) => region,
|
||||
None => return Err(Error::new(EFAULT)),
|
||||
None => return Err(Error::new(EFAULT)),
|
||||
};
|
||||
grants.take(®ion).unwrap().unmap_inactive(&mut new_table);
|
||||
grants.take(®ion).unwrap().unmap_inactive(&mut other_table);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -191,29 +191,15 @@ pub fn format_call(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) -
|
||||
"exit({})",
|
||||
b
|
||||
),
|
||||
//TODO: Cleanup, do not allocate
|
||||
/*SYS_EXEC => format!(
|
||||
"exec({}, {:?}, {:?})",
|
||||
b,
|
||||
SYS_EXEC => format!(
|
||||
"exec({:#x?}, {:p}, {:p})",
|
||||
validate_slice(
|
||||
c as *const [usize; 2],
|
||||
d
|
||||
).map(|slice| {
|
||||
slice.iter().map(|a|
|
||||
validate_slice(a[0] as *const u8, a[1]).ok()
|
||||
.and_then(|s| ::core::str::from_utf8(s).ok())
|
||||
).collect::<Vec<Option<&str>>>()
|
||||
}),
|
||||
validate_slice(
|
||||
e as *const [usize; 2],
|
||||
f
|
||||
).map(|slice| {
|
||||
slice.iter().map(|a|
|
||||
validate_slice(a[0] as *const u8, a[1]).ok()
|
||||
.and_then(|s| ::core::str::from_utf8(s).ok())
|
||||
).collect::<Vec<Option<&str>>>()
|
||||
})
|
||||
),*/
|
||||
b as *const crate::syscall::data::ExecMemRange,
|
||||
c,
|
||||
),
|
||||
d as *const u8,
|
||||
e as *const u8,
|
||||
),
|
||||
SYS_FUTEX => format!(
|
||||
"futex({:#X} [{:?}], {}, {}, {}, {})",
|
||||
b,
|
||||
|
||||
@@ -25,7 +25,7 @@ pub use self::process::*;
|
||||
pub use self::time::*;
|
||||
pub use self::validate::*;
|
||||
|
||||
use self::data::{Map, SigAction, Stat, TimeSpec};
|
||||
use self::data::{ExecMemRange, Map, SigAction, Stat, TimeSpec};
|
||||
use self::error::{Error, Result, ENOSYS};
|
||||
use self::flag::{CloneFlags, MapFlags, PhysmapFlags, WaitFlags};
|
||||
use self::number::*;
|
||||
@@ -129,6 +129,8 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
|
||||
SYS_GETPID => getpid().map(ContextId::into),
|
||||
SYS_GETPGID => getpgid(ContextId::from(b)).map(ContextId::into),
|
||||
SYS_GETPPID => getppid().map(ContextId::into),
|
||||
|
||||
SYS_EXEC => exec(validate_slice(b as *const ExecMemRange, c)?, d, e),
|
||||
SYS_CLONE => {
|
||||
let b = CloneFlags::from_bits_truncate(b);
|
||||
|
||||
@@ -209,12 +211,12 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
|
||||
}
|
||||
}
|
||||
|
||||
let debug = {
|
||||
/*let debug = {
|
||||
let contexts = crate::context::contexts();
|
||||
if let Some(context_lock) = contexts.current() {
|
||||
let context = context_lock.read();
|
||||
let name = context.name.read();
|
||||
if true || name.contains("redoxfs") {
|
||||
if name.contains("redoxfs") {
|
||||
if a == SYS_CLOCK_GETTIME || a == SYS_YIELD {
|
||||
false
|
||||
} else if (a == SYS_WRITE || a == SYS_FSYNC) && (b == 1 || b == 2) {
|
||||
@@ -238,7 +240,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
|
||||
}
|
||||
|
||||
println!("{}", debug::format_call(a, b, c, d, e, f));
|
||||
}
|
||||
}*/
|
||||
|
||||
// The next lines set the current syscall in the context struct, then once the inner() function
|
||||
// completes, we set the current syscall to none.
|
||||
@@ -263,7 +265,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
/*if debug {
|
||||
let contexts = crate::context::contexts();
|
||||
if let Some(context_lock) = contexts.current() {
|
||||
let context = context_lock.read();
|
||||
@@ -280,7 +282,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
|
||||
println!("Err({} ({:#X}))", err, err.errno);
|
||||
}
|
||||
}
|
||||
}
|
||||
}*/
|
||||
|
||||
// errormux turns Result<usize> into -errno
|
||||
Error::mux(result)
|
||||
|
||||
@@ -9,23 +9,25 @@ use core::alloc::{GlobalAlloc, Layout};
|
||||
use core::convert::TryFrom;
|
||||
use core::ops::DerefMut;
|
||||
use core::{intrinsics, mem, str};
|
||||
use crate::context::file::{FileDescription, FileDescriptor};
|
||||
|
||||
use spin::{RwLock, RwLockWriteGuard};
|
||||
|
||||
use crate::context::file::{FileDescription, FileDescriptor};
|
||||
use crate::context::memory::{UserGrants, Region};
|
||||
use crate::context::{Context, ContextId, WaitpidKey};
|
||||
use crate::context::memory::{Grant, Region, NewTables, page_flags, setup_new_utable, UserGrants};
|
||||
|
||||
use crate::context;
|
||||
#[cfg(not(feature="doc"))]
|
||||
use crate::elf::{self, program_header};
|
||||
use crate::interrupt;
|
||||
use crate::ipi::{ipi, IpiKind, IpiTarget};
|
||||
use crate::memory::allocate_frames;
|
||||
use crate::memory::{allocate_frames, Frame, PhysicalAddress};
|
||||
use crate::paging::mapper::PageFlushAll;
|
||||
use crate::paging::{ActivePageTable, InactivePageTable, Page, PageFlags, TableKind, VirtualAddress, PAGE_SIZE};
|
||||
use crate::paging::{ActivePageTable, InactivePageTable, Page, PageFlags, RmmA, TableKind, VirtualAddress, PAGE_SIZE};
|
||||
use crate::{ptrace, syscall};
|
||||
use crate::scheme::FileHandle;
|
||||
use crate::start::usermode;
|
||||
use crate::syscall::data::{SigAction, Stat};
|
||||
use crate::syscall::data::{ExecMemRange, SigAction, Stat};
|
||||
use crate::syscall::error::*;
|
||||
use crate::syscall::flag::{wifcontinued, wifstopped, AT_ENTRY, AT_NULL, AT_PHDR, AT_PHENT, AT_PHNUM, CloneFlags,
|
||||
CLONE_FILES, CLONE_FS, CLONE_SIGHAND, CLONE_STACK, CLONE_VFORK, CLONE_VM,
|
||||
@@ -141,16 +143,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
|
||||
}
|
||||
}
|
||||
|
||||
if flags.contains(CLONE_VM) {
|
||||
grants = Arc::clone(&context.grants);
|
||||
} else {
|
||||
let mut grants_set = UserGrants::default();
|
||||
for grant in context.grants.read().iter() {
|
||||
let start = VirtualAddress::new(grant.start_address().data() + crate::USER_TMP_GRANT_OFFSET - crate::USER_GRANT_OFFSET);
|
||||
grants_set.insert(grant.secret_clone(start));
|
||||
}
|
||||
grants = Arc::new(RwLock::new(grants_set));
|
||||
}
|
||||
grants = Arc::clone(&context.grants);
|
||||
|
||||
if flags.contains(CLONE_VM) {
|
||||
name = Arc::clone(&context.name);
|
||||
@@ -197,7 +190,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
|
||||
// If not cloning virtual memory, use fmap to re-obtain every grant where possible
|
||||
if !flags.contains(CLONE_VM) {
|
||||
let grants = Arc::get_mut(&mut grants).ok_or(Error::new(EBUSY))?.get_mut();
|
||||
let old_grants = mem::take(&mut grants.inner);
|
||||
let old_grants = mem::take(grants);
|
||||
|
||||
// TODO: Find some way to do this without having to allocate.
|
||||
|
||||
@@ -296,59 +289,27 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
|
||||
context.arch.gsbase = x86::bits64::segmentation::rdgsbase() as usize;
|
||||
}
|
||||
|
||||
let mut active_utable = unsafe { ActivePageTable::new(TableKind::User) };
|
||||
let active_ktable = unsafe { ActivePageTable::new(TableKind::Kernel) };
|
||||
if flags.contains(CloneFlags::CLONE_VM) {
|
||||
// Reuse same CR3, same grants, everything.
|
||||
context.grants = grants;
|
||||
} else {
|
||||
// TODO: Handle ENOMEM
|
||||
let mut new_tables = setup_new_utable().expect("failed to allocate new page tables for cloned process");
|
||||
|
||||
let mut new_utable = unsafe {
|
||||
let frame = allocate_frames(1).ok_or(Error::new(ENOMEM))?;
|
||||
// SAFETY: This is safe because the frame is exclusive, owned, and valid, as we
|
||||
// have just allocated it.
|
||||
InactivePageTable::new(&mut active_utable, frame)
|
||||
};
|
||||
context.arch.set_page_utable(unsafe { new_utable.address() });
|
||||
let mut new_grants = UserGrants::new();
|
||||
for old_grant in grants.read().iter() {
|
||||
new_grants.insert(old_grant.secret_clone(&mut new_tables.new_utable));
|
||||
}
|
||||
context.grants = Arc::new(RwLock::new(new_grants));
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
let mut new_ktable = {
|
||||
let mut new_ktable = {
|
||||
let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table");
|
||||
InactivePageTable::new(frame, &mut active_ktable)
|
||||
};
|
||||
context.arch.set_page_ktable(unsafe { new_ktable.address() });
|
||||
new_ktable
|
||||
};
|
||||
drop(grants);
|
||||
|
||||
#[cfg(not(target_arch = "aarch64"))]
|
||||
let mut new_ktable = unsafe {
|
||||
InactivePageTable::from_address(new_utable.address())
|
||||
};
|
||||
new_tables.take();
|
||||
|
||||
// Copy kernel image mapping
|
||||
{
|
||||
let frame = active_ktable.p4()[crate::KERNEL_PML4].pointed_frame().expect("kernel image not mapped");
|
||||
let flags = active_ktable.p4()[crate::KERNEL_PML4].flags();
|
||||
context.arch.set_page_utable(unsafe { new_tables.new_utable.address() });
|
||||
|
||||
new_ktable.mapper().p4_mut()[crate::KERNEL_PML4].set(frame, flags);
|
||||
}
|
||||
|
||||
// Copy kernel heap mapping
|
||||
{
|
||||
let frame = active_ktable.p4()[crate::KERNEL_HEAP_PML4].pointed_frame().expect("kernel heap not mapped");
|
||||
let flags = active_ktable.p4()[crate::KERNEL_HEAP_PML4].flags();
|
||||
|
||||
new_ktable.mapper().p4_mut()[crate::KERNEL_HEAP_PML4].set(frame, flags);
|
||||
}
|
||||
|
||||
// Copy physmap mapping
|
||||
{
|
||||
let frame = active_ktable.p4()[crate::PHYS_PML4].pointed_frame().expect("physmap not mapped");
|
||||
let flags = active_ktable.p4()[crate::PHYS_PML4].flags();
|
||||
new_ktable.mapper().p4_mut()[crate::PHYS_PML4].set(frame, flags);
|
||||
}
|
||||
// Copy kernel percpu (similar to TLS) mapping.
|
||||
{
|
||||
let frame = active_ktable.p4()[crate::KERNEL_PERCPU_PML4].pointed_frame().expect("kernel TLS not mapped");
|
||||
let flags = active_ktable.p4()[crate::KERNEL_PERCPU_PML4].flags();
|
||||
new_ktable.mapper().p4_mut()[crate::KERNEL_PERCPU_PML4].set(frame, flags);
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
context.arch.set_page_ktable(unsafe { new_tables.new_ktable.address() });
|
||||
}
|
||||
|
||||
if let Some(fx) = kfx_opt.take() {
|
||||
@@ -391,6 +352,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
context.name = name;
|
||||
|
||||
context.cwd = cwd;
|
||||
@@ -437,7 +399,7 @@ fn empty<'lock>(context_lock: &'lock RwLock<Context>, mut context: RwLockWriteGu
|
||||
let mut grants_guard = grants_lock_mut.get_mut();
|
||||
|
||||
let grants = mem::replace(&mut *grants_guard, UserGrants::default());
|
||||
for grant in grants.inner.into_iter() {
|
||||
for grant in grants.into_iter() {
|
||||
let unmap_result = if reaping {
|
||||
log::error!("{}: {}: Grant should not exist: {:?}", context.id.into(), *context.name.read(), grant);
|
||||
|
||||
@@ -1042,7 +1004,7 @@ pub fn usermode_bootstrap(mut data: Box<[u8]>) -> ! {
|
||||
assert!(!data.is_empty());
|
||||
|
||||
const LOAD_BASE: usize = 0;
|
||||
let grant = context::memory::Grant::map(VirtualAddress::new(LOAD_BASE), data.len(), PageFlags::new().user(true).write(true).execute(true));
|
||||
let grant = context::memory::Grant::map(VirtualAddress::new(LOAD_BASE), ((data.len()+PAGE_SIZE-1)/PAGE_SIZE)*PAGE_SIZE, PageFlags::new().user(true).write(true).execute(true));
|
||||
|
||||
let mut active_table = unsafe { ActivePageTable::new(TableKind::User) };
|
||||
|
||||
@@ -1051,6 +1013,7 @@ pub fn usermode_bootstrap(mut data: Box<[u8]>) -> ! {
|
||||
let frame = active_table.translate_page(page).expect("expected mapped init memory to have a corresponding frame");
|
||||
unsafe { ((frame.start_address().data() + crate::KERNEL_OFFSET) as *mut u8).copy_from_nonoverlapping(data.as_ptr().add(index * PAGE_SIZE), len); }
|
||||
}
|
||||
|
||||
context::contexts().current().expect("expected a context to exist when executing init").read().grants.write().insert(grant);
|
||||
|
||||
drop(data);
|
||||
@@ -1060,6 +1023,134 @@ pub fn usermode_bootstrap(mut data: Box<[u8]>) -> ! {
|
||||
let start = ((LOAD_BASE + 0x18) as *mut usize).read();
|
||||
// Start with the (probably) ELF executable loaded, without any stack the ability to load
|
||||
// sections to arbitrary addresses.
|
||||
crate::arch::start::usermode(start, 0, 0, 0);
|
||||
usermode(start, 0, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn exec(memranges: &[ExecMemRange], instruction_ptr: usize, stack_ptr: usize) -> Result<usize> {
|
||||
// TODO: rlimit?
|
||||
if memranges.len() > 1024 {
|
||||
return Err(Error::new(EINVAL));
|
||||
}
|
||||
|
||||
let mut new_grants = UserGrants::new();
|
||||
|
||||
{
|
||||
let current_context_lock = Arc::clone(context::contexts().current().ok_or(Error::new(ESRCH))?);
|
||||
|
||||
// Linux will always destroy other threads immediately if one of them executes execve(2).
|
||||
// At the moment the Redox kernel is ignorant of threads, other than them sharing files,
|
||||
// memory, etc. We fail with EBUSY if any resources that are being replaced, are shared.
|
||||
|
||||
let mut old_grants = Arc::try_unwrap(mem::take(&mut current_context_lock.write().grants)).map_err(|_| Error::new(EBUSY))?.into_inner();
|
||||
// TODO: Allow multiple contexts which share the file table, to have one of them run exec?
|
||||
let mut old_files = Arc::try_unwrap(mem::take(&mut current_context_lock.write().files)).map_err(|_| Error::new(EBUSY))?.into_inner();
|
||||
|
||||
// FIXME: Handle leak in case of ENOMEM.
|
||||
let mut new_tables = setup_new_utable()?;
|
||||
|
||||
let mut flush = PageFlushAll::new();
|
||||
|
||||
// FIXME: This is to the extreme, but fetch with atomic volatile?
|
||||
for memrange in memranges.iter().copied() {
|
||||
let old_address = if memrange.old_address == !0 { None } else { Some(memrange.old_address) };
|
||||
|
||||
if memrange.address % PAGE_SIZE != 0 || old_address.map_or(false, |a| a % PAGE_SIZE != 0) || memrange.size % PAGE_SIZE != 0 {
|
||||
return Err(Error::new(EINVAL));
|
||||
}
|
||||
if memrange.size == 0 { continue }
|
||||
|
||||
let new_start = Page::containing_address(VirtualAddress::new(memrange.address));
|
||||
let flags = MapFlags::from_bits(memrange.flags).ok_or(Error::new(EINVAL))?;
|
||||
let page_count = memrange.size / PAGE_SIZE;
|
||||
let flags = page_flags(flags);
|
||||
|
||||
if let Some(old_address) = old_address {
|
||||
let old_start = VirtualAddress::new(memrange.old_address);
|
||||
|
||||
let entire_region = Region::new(old_start, memrange.size);
|
||||
|
||||
// TODO: This will do one B-Tree search for each memrange. If a process runs exec
|
||||
// and keeps every range the way it is, then this would be O(n log n)!
|
||||
loop {
|
||||
let region = match old_grants.conflicts(entire_region).next().map(|g| *g.region()) {
|
||||
Some(r) => r,
|
||||
None => break,
|
||||
};
|
||||
let owned = old_grants.take(®ion).expect("cannot fail");
|
||||
let (before, mut current, after) = owned.extract(region).expect("cannot fail");
|
||||
|
||||
if let Some(before) = before { old_grants.insert(before); }
|
||||
if let Some(after) = after { old_grants.insert(after); }
|
||||
|
||||
new_grants.insert(current.move_to_address_space(new_start, &mut new_tables.new_utable, flags, &mut flush));
|
||||
}
|
||||
} else {
|
||||
new_grants.insert(Grant::zeroed_inactive(new_start, page_count, flags, &mut new_tables.new_utable)?);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
unsafe { flush.ignore(); }
|
||||
|
||||
new_tables.take();
|
||||
|
||||
let mut context = current_context_lock.write();
|
||||
context.grants = Arc::new(RwLock::new(new_grants));
|
||||
|
||||
let old_utable = context.arch.get_page_utable();
|
||||
let old_frame = Frame::containing_address(PhysicalAddress::new(old_utable));
|
||||
|
||||
context.arch.set_page_utable(unsafe { new_tables.new_utable.address() });
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe { x86::controlregs::cr3_write(new_tables.new_utable.address() as u64); }
|
||||
|
||||
for old_grant in old_grants.into_iter() {
|
||||
old_grant.unmap_inactive(&mut unsafe { InactivePageTable::from_address(old_utable) });
|
||||
}
|
||||
crate::memory::deallocate_frames(old_frame, 1);
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
context.arch.set_page_ktable(unsafe { new_tables.new_ktable.address() });
|
||||
|
||||
context.actions = Arc::new(RwLock::new(vec![(
|
||||
SigAction {
|
||||
sa_handler: unsafe { mem::transmute(SIG_DFL) },
|
||||
sa_mask: [0; 2],
|
||||
sa_flags: SigActionFlags::empty(),
|
||||
},
|
||||
0
|
||||
); 128]));
|
||||
let was_vfork = mem::replace(&mut context.vfork, false);
|
||||
|
||||
// TODO: Reuse in place if the file table is not shared.
|
||||
drop(context);
|
||||
|
||||
for file_slot in old_files.iter_mut().filter(|file_opt| file_opt.as_ref().map_or(false, |file| file.cloexec)) {
|
||||
let file = file_slot.take().expect("iterator filter requires file slot to be occupied, not None");
|
||||
let _ = file.close();
|
||||
}
|
||||
let mut context = current_context_lock.write();
|
||||
|
||||
context.files = Arc::new(RwLock::new(old_files));
|
||||
let ppid = context.ppid;
|
||||
drop(context);
|
||||
|
||||
// TODO: Should this code be preserved as is?
|
||||
if was_vfork {
|
||||
let contexts = context::contexts();
|
||||
if let Some(context_lock) = contexts.get(ppid) {
|
||||
let mut context = context_lock.write();
|
||||
if !context.unblock() {
|
||||
println!("{} not blocked for exec vfork unblock", ppid.into());
|
||||
}
|
||||
} else {
|
||||
println!("{} not found for exec vfork unblock", ppid.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe { usermode(instruction_ptr, stack_ptr, 0, 0); }
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user