Add mmap-min-addr and support unmapping parent PTs.

This commit is contained in:
4lDO2
2022-07-23 19:14:46 +02:00
parent 0720db2265
commit 5bbfdcda6b
9 changed files with 219 additions and 118 deletions

View File

@@ -162,7 +162,7 @@ impl Madt {
KernelMapper::lock()
.get_mut()
.expect("expected kernel page table not to be recursively locked while initializing MADT")
.unmap_phys(trampoline_page.start_address())
.unmap_phys(trampoline_page.start_address(), true)
.expect("failed to unmap trampoline page")
};
flush.flush();

View File

@@ -51,7 +51,7 @@ impl LocalApic {
if ! self.x2 {
log::info!("Detected xAPIC at {:#x}", physaddr.data());
if let Some((_entry, _, flush)) = mapper.unmap_phys(virtaddr) {
if let Some((_entry, _, flush)) = mapper.unmap_phys(virtaddr, true) {
// Unmap xAPIC page if already mapped
flush.flush();
}

View File

@@ -258,6 +258,11 @@ pub struct Context {
/// set since there is no interrupt stack (unless the kernel stack is copied, but that is in my
/// opinion hackier and less efficient than this (and UB to do in Rust)).
pub clone_entry: Option<[usize; 2]>,
/// Lowest offset for mmap invocations where the user has not already specified the offset
/// (using MAP_FIXED/MAP_FIXED_NOREPLACE). Cf. Linux's `/proc/sys/vm/mmap_min_addr`, but with
/// the exception that we have a memory safe kernel which doesn't have to protect itself
/// against null pointers, so fixed mmaps are still allowed.
pub mmap_min: usize,
}
// Necessary because GlobalAlloc::dealloc requires the layout to be the same, and therefore Box
@@ -373,6 +378,7 @@ impl Context {
ptrace_stop: false,
sigstack: None,
clone_entry: None,
mmap_min: MMAP_MIN_DEFAULT,
};
Ok(this)
}
@@ -566,3 +572,5 @@ impl Context {
); 128]))
}
}
pub const MMAP_MIN_DEFAULT: usize = PAGE_SIZE;

View File

@@ -1,5 +1,5 @@
use alloc::collections::{BTreeMap, BTreeSet};
use alloc::sync::Arc;
use alloc::{sync::Arc, vec::Vec};
use core::borrow::Borrow;
use core::cmp::{self, Eq, Ordering, PartialEq, PartialOrd};
use core::fmt::{self, Debug};
@@ -14,7 +14,7 @@ use rmm::Arch as _;
use crate::arch::paging::PAGE_SIZE;
use crate::context::file::FileDescriptor;
use crate::memory::{Enomem, Frame};
use crate::paging::mapper::{Flusher, PageFlushAll};
use crate::paging::mapper::{Flusher, InactiveFlusher, PageFlushAll};
use crate::paging::{KernelMapper, Page, PageFlags, PageIter, PageMapper, PhysicalAddress, RmmA, round_up_pages, VirtualAddress};
pub fn page_flags(flags: MapFlags) -> PageFlags<RmmA> {
@@ -54,6 +54,10 @@ pub struct AddrSpace {
pub grants: UserGrants,
}
impl AddrSpace {
pub fn current() -> Result<Arc<RwLock<Self>>> {
Ok(Arc::clone(super::current()?.read().addr_space()?))
}
/// Attempt to clone an existing address space so that all mappings are copied (CoW).
pub fn try_clone(&mut self) -> Result<Arc<RwLock<Self>>> {
let mut new = new_addrspace()?;
@@ -102,6 +106,50 @@ impl AddrSpace {
pub fn is_current(&self) -> bool {
self.table.utable.is_current()
}
pub fn mprotect(&mut self, base: Page, page_count: usize, flags: MapFlags) -> Result<()> {
let (mut active, mut inactive);
let mut flusher = if self.is_current() {
active = PageFlushAll::new();
&mut active as &mut dyn Flusher<RmmA>
} else {
inactive = InactiveFlusher::new();
&mut inactive as &mut dyn Flusher<RmmA>
};
let mut mapper = &mut self.table.utable;
let region = Region::new(base.start_address(), page_count * PAGE_SIZE);
// TODO: Remove allocation
let regions = self.grants.conflicts(region).map(|g| *g.region()).collect::<Vec<_>>();
for grant_region in regions {
let grant = self.grants.take(&grant_region).expect("grant cannot magically disappear while we hold the lock!");
let intersection = grant_region.intersect(region);
let (before, mut grant, after) = grant.extract(intersection).expect("failed to extract grant");
if let Some(before) = before { self.grants.insert(before); }
if let Some(after) = after { self.grants.insert(after); }
if !grant.is_owned() && flags.contains(MapFlags::PROT_WRITE) && !grant.flags().has_write() {
self.grants.insert(grant);
return Err(Error::new(EACCES));
}
let new_flags = grant.flags()
// TODO: Require a capability in order to map executable memory?
.execute(flags.contains(MapFlags::PROT_EXEC))
.write(flags.contains(MapFlags::PROT_WRITE));
// TODO: Allow enabling/disabling read access on architectures which allow it. On
// x86_64 with protection keys (although only enforced by userspace), and AArch64 (I
// think), execute-only memory is also supported.
grant.remap(mapper, &mut flusher, new_flags);
self.grants.insert(grant);
}
Ok(())
}
}
#[derive(Debug)]
@@ -149,22 +197,31 @@ impl UserGrants {
}
/// Return a free region with the specified size
// TODO: Alignment (x86_64: 4 KiB, 2 MiB, or 1 GiB).
pub fn find_free(&self, size: usize) -> Option<Region> {
pub fn find_free(&self, min: usize, size: usize) -> Option<Region> {
// Get first available hole, but do reserve the page starting from zero as most compiled
// languages cannot handle null pointers safely even if they point to valid memory. If an
// application absolutely needs to map the 0th page, they will have to do so explicitly via
// MAP_FIXED/MAP_FIXED_NOREPLACE.
// TODO: Allow explicitly allocating guard pages?
let (hole_start, hole_size) = self.holes.iter().find(|(hole_offset, hole_size)| size <= if hole_offset.data() == 0 { hole_size.saturating_sub(PAGE_SIZE) } else { **hole_size })?;
let (hole_start, hole_size) = self.holes.iter()
.skip_while(|(hole_offset, hole_size)| hole_offset.data() + **hole_size <= min)
.find(|(hole_offset, hole_size)| {
let avail_size = if hole_offset.data() <= min && min <= hole_offset.data() + **hole_size {
**hole_size - (min - hole_offset.data())
} else {
**hole_size
};
size <= avail_size
})?;
// Create new region
Some(Region::new(VirtualAddress::new(cmp::max(hole_start.data(), PAGE_SIZE)), size))
Some(Region::new(VirtualAddress::new(cmp::max(hole_start.data(), min)), size))
}
/// Return a free region, respecting the user's hinted address and flags. Address may be null.
pub fn find_free_at(&mut self, address: VirtualAddress, size: usize, flags: MapFlags) -> Result<Region> {
pub fn find_free_at(&mut self, min: usize, address: VirtualAddress, size: usize, flags: MapFlags) -> Result<Region> {
if address == VirtualAddress::new(0) {
// Free hands!
return self.find_free(size).ok_or(Error::new(ENOMEM));
return self.find_free(min, size).ok_or(Error::new(ENOMEM));
}
// The user wished to have this region...
@@ -178,18 +235,18 @@ impl UserGrants {
return Err(Error::new(EINVAL));
}
if let Some(grant) = self.contains(requested.start_address()) {
if let Some(grant) = self.conflicts(requested).next() {
// ... but it already exists
if flags.contains(MapFlags::MAP_FIXED_NOREPLACE) {
println!("grant: {:#x} conflicts with: {:#x} - {:#x}", address.data(), grant.start_address().data(), grant.end_address().data());
return Err(Error::new(EEXIST));
} else if flags.contains(MapFlags::MAP_FIXED) {
// TODO: Overwrite existing grant
}
if flags.contains(MapFlags::MAP_FIXED) {
return Err(Error::new(EOPNOTSUPP));
} else {
// TODO: Find grant close to requested address?
requested = self.find_free(requested.size()).ok_or(Error::new(ENOMEM))?;
requested = self.find_free(min, requested.size()).ok_or(Error::new(ENOMEM))?;
}
}
@@ -241,9 +298,36 @@ impl UserGrants {
holes.insert(grant.start_address(), grant.size() + exactly_after_size.unwrap_or(0));
}
}
pub fn insert(&mut self, grant: Grant) {
pub fn insert(&mut self, mut grant: Grant) {
assert!(self.conflicts(*grant).next().is_none());
self.reserve(&grant);
// FIXME: This currently causes issues, mostly caused by old code that unmaps only based on
// offsets. For instance, the scheme code does not specify any length, and would thus unmap
// memory outside of what it intended to.
/*
let before_region = self.inner
.range(..grant.region).next_back()
.filter(|b| b.end_address() == grant.start_address() && b.can_be_merged_if_adjacent(&grant)).map(|g| g.region);
let after_region = self.inner
.range(Region::new(grant.end_address(), 1)..).next()
.filter(|a| a.start_address() == grant.end_address() && a.can_be_merged_if_adjacent(&grant)).map(|g| g.region);
if let Some(before) = before_region {
grant.region.start = before.start;
grant.region.size += before.size;
core::mem::forget(self.inner.take(&before));
}
if let Some(after) = after_region {
grant.region.size += after.size;
core::mem::forget(self.inner.take(&after));
}
*/
self.inner.insert(grant);
}
pub fn remove(&mut self, region: &Region) -> bool {
@@ -493,14 +577,14 @@ impl Grant {
pub fn borrow(src_base: Page, dst_base: Page, page_count: usize, flags: PageFlags<RmmA>, desc_opt: Option<GrantFileRef>, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, dst_flusher: impl Flusher<RmmA>) -> Result<Grant, Enomem> {
Self::copy_inner(src_base, dst_base, page_count, flags, desc_opt, src_mapper, dst_mapper, (), dst_flusher, false, false)
}
pub fn reborrow(src_grant: &Grant, dst_base: Page, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, dst_flusher: impl Flusher<RmmA>) -> Result<Grant, Enomem> {
Self::borrow(Page::containing_address(src_grant.start_address()), dst_base, src_grant.size() / PAGE_SIZE, src_grant.flags(), src_grant.desc_opt.clone(), src_mapper, dst_mapper, dst_flusher)
pub fn reborrow(src_grant: &Grant, dst_base: Page, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, dst_flusher: impl Flusher<RmmA>) -> Result<Grant> {
Self::borrow(Page::containing_address(src_grant.start_address()), dst_base, src_grant.size() / PAGE_SIZE, src_grant.flags(), src_grant.desc_opt.clone(), src_mapper, dst_mapper, dst_flusher).map_err(Into::into)
}
pub fn transfer(mut src_grant: Grant, dst_base: Page, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, src_flusher: impl Flusher<RmmA>, dst_flusher: impl Flusher<RmmA>) -> Result<Grant, Enomem> {
pub fn transfer(mut src_grant: Grant, dst_base: Page, src_mapper: &mut PageMapper, dst_mapper: &mut PageMapper, src_flusher: impl Flusher<RmmA>, dst_flusher: impl Flusher<RmmA>) -> Result<Grant> {
assert!(core::mem::replace(&mut src_grant.mapped, false));
let desc_opt = src_grant.desc_opt.take();
Self::copy_inner(Page::containing_address(src_grant.start_address()), dst_base, src_grant.size() / PAGE_SIZE, src_grant.flags(), desc_opt, src_mapper, dst_mapper, src_flusher, dst_flusher, src_grant.owned, true)
Self::copy_inner(Page::containing_address(src_grant.start_address()), dst_base, src_grant.size() / PAGE_SIZE, src_grant.flags(), desc_opt, src_mapper, dst_mapper, src_flusher, dst_flusher, src_grant.owned, true).map_err(Into::into)
}
fn copy_inner(
@@ -521,7 +605,7 @@ impl Grant {
for index in 0..page_count {
let src_page = src_base.next_by(index);
let (address, entry_flags) = if unmap {
let (entry, entry_flags, flush) = unsafe { src_mapper.unmap_phys(src_page.start_address()).expect("grant references unmapped memory") };
let (entry, entry_flags, flush) = unsafe { src_mapper.unmap_phys(src_page.start_address(), true).expect("grant references unmapped memory") };
src_flusher.consume(flush);
(entry, entry_flags)
@@ -543,7 +627,7 @@ impl Grant {
if successful_count != page_count {
// TODO: The grant will be lost in case of ENOMEM. Allow putting it back in source?
for index in 0..successful_count {
let (frame, _, flush) = match unsafe { dst_mapper.unmap_phys(dst_base.next_by(index).start_address()) } {
let (frame, _, flush) = match unsafe { dst_mapper.unmap_phys(dst_base.next_by(index).start_address(), true) } {
Some(f) => f,
None => unreachable!("grant unmapped by someone else in the meantime despite having a &mut PageMapper"),
};
@@ -572,11 +656,24 @@ impl Grant {
self.flags
}
pub fn remap(&mut self, mapper: &mut PageMapper, mut flusher: impl Flusher<RmmA>, flags: PageFlags<RmmA>) {
assert!(self.mapped);
for page in self.pages() {
unsafe {
let result = mapper.remap(page.start_address(), flags).expect("grant contained unmap address");
flusher.consume(result);
}
}
self.flags = flags;
}
pub fn unmap(mut self, mapper: &mut PageMapper, mut flusher: impl Flusher<RmmA>) -> UnmapResult {
assert!(self.mapped);
for page in self.pages() {
let (entry, _, flush) = unsafe { mapper.unmap_phys(page.start_address()) }
let (entry, _, flush) = unsafe { mapper.unmap_phys(page.start_address(), true) }
.unwrap_or_else(|| panic!("missing page at {:#0x} for grant {:?}", page.start_address().data(), self));
if self.owned {
@@ -643,6 +740,18 @@ impl Grant {
Some((before_grant, self, after_grant))
}
// FIXME
/*
pub fn can_be_merged_if_adjacent(&self, with: &Self) -> bool {
match (&self.desc_opt, &with.desc_opt) {
(None, None) => (),
(Some(ref a), Some(ref b)) if Arc::ptr_eq(&a.desc.description, &b.desc.description) => (),
_ => return false,
}
self.owned == with.owned && self.mapped == with.mapped && self.flags.data() == with.flags.data()
}
*/
}
impl Deref for Grant {

View File

@@ -4,6 +4,7 @@ use crate::memory::{free_frames, used_frames, PAGE_SIZE};
use crate::paging::{mapper::PageFlushAll, Page, VirtualAddress};
use crate::syscall::data::{Map, StatVfs};
use crate::syscall::error::*;
use crate::syscall::flag::MapFlags;
use crate::syscall::scheme::Scheme;
pub struct MemoryScheme;
@@ -25,7 +26,7 @@ impl MemoryScheme {
let mut addr_space = context.addr_space()?.write();
let addr_space = &mut *addr_space;
let region = addr_space.grants.find_free_at(VirtualAddress::new(map.address), map.size, map.flags)?.round();
let region = addr_space.grants.find_free_at(context.mmap_min, VirtualAddress::new(map.address), map.size, map.flags)?.round();
addr_space.grants.insert(Grant::zeroed(Page::containing_address(region.start_address()), map.size / PAGE_SIZE, page_flags(map.flags), &mut addr_space.table.utable, PageFlushAll::new())?);

View File

@@ -145,6 +145,8 @@ enum Operation {
Sigactions(Arc<RwLock<Vec<(SigAction, usize)>>>),
CurrentSigactions,
AwaitingSigactionsChange(Arc<RwLock<Vec<(SigAction, usize)>>>),
MmapMinAddr,
}
#[derive(Clone, Copy, PartialEq, Eq)]
enum Attr {
@@ -281,15 +283,11 @@ impl ProcScheme {
}
}
fn current_addrspace() -> Result<Arc<RwLock<AddrSpace>>> {
Ok(Arc::clone(context::current()?.read().addr_space()?))
}
impl ProcScheme {
fn open_inner(&self, pid: ContextId, operation_str: Option<&str>, flags: usize, uid: u32, gid: u32) -> Result<usize> {
let operation = match operation_str {
Some("mem") => Operation::Memory { addrspace: current_addrspace()? },
Some("addrspace") => Operation::AddrSpace { addrspace: current_addrspace()? },
Some("mem") => Operation::Memory { addrspace: AddrSpace::current()? },
Some("addrspace") => Operation::AddrSpace { addrspace: AddrSpace::current()? },
Some("filetable") => Operation::Filetable { filetable: Arc::clone(&context::current()?.read().files) },
Some("current-addrspace") => Operation::CurrentAddrSpace,
Some("current-filetable") => Operation::CurrentFiletable,
@@ -306,6 +304,7 @@ impl ProcScheme {
Some("open_via_dup") => Operation::OpenViaDup,
Some("sigactions") => Operation::Sigactions(Arc::clone(&context::current()?.read().actions)),
Some("current-sigactions") => Operation::CurrentSigactions,
Some("mmap-min-addr") => Operation::MmapMinAddr,
_ => return Err(Error::new(EINVAL))
};
@@ -695,6 +694,11 @@ impl Scheme for ProcScheme {
read_from(buf, &data.buf, &mut data.offset)
}
Operation::MmapMinAddr => {
let val = with_context(info.pid, |context| Ok(context.mmap_min))?;
*buf.array_chunks_mut::<{mem::size_of::<usize>()}>().next().unwrap() = usize::to_ne_bytes(val);
Ok(mem::size_of::<usize>())
}
// TODO: Replace write() with SYS_DUP_FORWARD.
// TODO: Find a better way to switch address spaces, since they also require switching
// the instruction and stack pointer. Maybe remove `<pid>/regs` altogether and replace it
@@ -798,7 +802,7 @@ impl Scheme for ProcScheme {
// Forbid transferring grants to the same address space!
if is_active { return Err(Error::new(EBUSY)); }
let current_addrspace = current_addrspace()?;
let current_addrspace = AddrSpace::current()?;
let mut current_addrspace = current_addrspace.write();
let current_addrspace = &mut *current_addrspace;
let src_grant = current_addrspace.grants.take(&Region::new(VirtualAddress::new(src_address), size)).ok_or(Error::new(EINVAL))?;
@@ -1032,6 +1036,11 @@ impl Scheme for ProcScheme {
self.handles.write().get_mut(&id).ok_or(Error::new(EBADF))?.info.operation = Operation::AwaitingSigactionsChange(sigactions);
Ok(mem::size_of::<usize>())
}
Operation::MmapMinAddr => {
let val = usize::from_ne_bytes(<[u8; mem::size_of::<usize>()]>::try_from(buf).map_err(|_| Error::new(EINVAL))?);
with_context_mut(info.pid, |context| { context.mmap_min = val; Ok(()) })?;
Ok(mem::size_of::<usize>())
}
_ => return Err(Error::new(EBADF)),
}
}
@@ -1082,6 +1091,7 @@ impl Scheme for ProcScheme {
Operation::CurrentFiletable => "current-filetable",
Operation::CurrentSigactions => "current-sigactions",
Operation::OpenViaDup => "open-via-dup",
Operation::MmapMinAddr => "mmap-min-addr",
_ => return Err(Error::new(EOPNOTSUPP)),
});
@@ -1117,29 +1127,32 @@ impl Scheme for ProcScheme {
let stop_context = if handle.info.pid == context::context_id() { with_context_mut } else { try_stop_context };
match handle.info.operation {
Operation::AwaitingAddrSpaceChange { new, new_sp, new_ip } => stop_context(handle.info.pid, |context: &mut Context| unsafe {
if let Some(saved_regs) = ptrace::regs_for_mut(context) {
saved_regs.iret.rip = new_ip;
saved_regs.iret.rsp = new_sp;
} else {
context.clone_entry = Some([new_ip, new_sp]);
}
let prev_addr_space = context.set_addr_space(new);
if let Some(mut prev) = prev_addr_space.and_then(|a| Arc::try_unwrap(a).ok()).map(RwLock::into_inner) {
// We are the last reference to the address space; therefore it must be
// unmapped.
// TODO: Optimize away clearing of page tables? In that case, what about memory
// deallocation?
for grant in prev.grants.into_iter() {
grant.unmap(&mut prev.table.utable, ());
Operation::AwaitingAddrSpaceChange { new, new_sp, new_ip } => {
stop_context(handle.info.pid, |context: &mut Context| unsafe {
if let Some(saved_regs) = ptrace::regs_for_mut(context) {
saved_regs.iret.rip = new_ip;
saved_regs.iret.rsp = new_sp;
} else {
context.clone_entry = Some([new_ip, new_sp]);
}
}
Ok(())
})?,
let prev_addr_space = context.set_addr_space(new);
if let Some(mut prev) = prev_addr_space.and_then(|a| Arc::try_unwrap(a).ok()).map(RwLock::into_inner) {
// We are the last reference to the address space; therefore it must be
// unmapped.
// TODO: Optimize away clearing of page tables? In that case, what about memory
// deallocation?
for grant in prev.grants.into_iter() {
grant.unmap(&mut prev.table.utable, ());
}
}
Ok(())
})?;
let _ = ptrace::send_event(crate::syscall::ptrace_event!(PTRACE_EVENT_ADDRSPACE_SWITCH, 0));
}
Operation::AwaitingFiletableChange(new) => with_context_mut(handle.info.pid, |context: &mut Context| {
context.files = new;
Ok(())
@@ -1216,27 +1229,41 @@ extern "C" fn clone_handler() {
}
fn inherit_context() -> Result<ContextId> {
let current_context_lock = Arc::clone(context::contexts().current().ok_or(Error::new(ESRCH))?);
let new_context_lock = Arc::clone(context::contexts_mut().spawn(clone_handler)?);
let new_id = {
let current_context_lock = Arc::clone(context::contexts().current().ok_or(Error::new(ESRCH))?);
let new_context_lock = Arc::clone(context::contexts_mut().spawn(clone_handler)?);
let current_context = current_context_lock.read();
let mut new_context = new_context_lock.write();
let current_context = current_context_lock.read();
let mut new_context = new_context_lock.write();
new_context.status = Status::Stopped(SIGSTOP);
new_context.euid = current_context.euid;
new_context.egid = current_context.egid;
new_context.ruid = current_context.ruid;
new_context.rgid = current_context.rgid;
new_context.ens = current_context.ens;
new_context.rns = current_context.rns;
new_context.ppid = current_context.id;
new_context.pgid = current_context.pgid;
new_context.umask = current_context.umask;
new_context.sigmask = current_context.sigmask;
new_context.status = Status::Stopped(SIGSTOP);
new_context.euid = current_context.euid;
new_context.egid = current_context.egid;
new_context.ruid = current_context.ruid;
new_context.rgid = current_context.rgid;
new_context.ens = current_context.ens;
new_context.rns = current_context.rns;
new_context.ppid = current_context.id;
new_context.pgid = current_context.pgid;
new_context.umask = current_context.umask;
new_context.sigmask = current_context.sigmask;
new_context.cpu_id = current_context.cpu_id;
// TODO: More to copy?
// TODO: More to copy?
Ok(new_context.id)
new_context.id
};
if ptrace::send_event(crate::syscall::ptrace_event!(PTRACE_EVENT_CLONE, new_id.into())).is_some() {
// Freeze the clone, allow ptrace to put breakpoints
// to it before it starts
let contexts = context::contexts();
let context = contexts.get(new_id).expect("Newly created context doesn't exist??");
let mut context = context.write();
context.ptrace_stop = true;
}
Ok(new_id)
}
fn extract_scheme_number(fd: usize) -> Result<(Arc<dyn KernelScheme>, usize)> {
let (scheme_id, number) = match &*context::contexts().current().ok_or(Error::new(ESRCH))?.read().get_file(FileHandle::from(fd)).ok_or(Error::new(EBADF))?.description.read() {

View File

@@ -153,7 +153,7 @@ impl UserInner {
let dst_address = round_down_pages(dst_address);
let offset = address - src_address;
let aligned_size = round_up_pages(offset + size);
let dst_region = addr_space.grants.find_free_at(VirtualAddress::new(dst_address), aligned_size, flags)?;
let dst_region = addr_space.grants.find_free_at(context.mmap_min, VirtualAddress::new(dst_address), aligned_size, flags)?;
let current_addrspace = Arc::clone(
context::contexts().current().ok_or(Error::new(ESRCH))?

View File

@@ -94,7 +94,7 @@ pub fn inner_physmap(physical_address: usize, size: usize, flags: PhysmapFlags)
let mut addr_space = context.addr_space()?.write();
let addr_space = &mut *addr_space;
let dst_address = addr_space.grants.find_free(size).ok_or(Error::new(ENOMEM))?;
let dst_address = addr_space.grants.find_free(context.mmap_min, size).ok_or(Error::new(ENOMEM))?;
let mut page_flags = PageFlags::new().user(true);
if flags.contains(PHYSMAP_WRITE) {

View File

@@ -6,7 +6,7 @@ use core::mem;
use spin::{RwLock, RwLockWriteGuard};
use crate::context::{Context, ContextId, WaitpidKey};
use crate::context::{Context, ContextId, memory::AddrSpace, WaitpidKey};
use crate::Bootstrap;
use crate::context;
@@ -289,54 +289,10 @@ pub fn kill(pid: ContextId, sig: usize) -> Result<usize> {
pub fn mprotect(address: usize, size: usize, flags: MapFlags) -> Result<usize> {
// println!("mprotect {:#X}, {}, {:#X}", address, size, flags);
let end_address = address.checked_add(size).ok_or(Error::new(EFAULT))?;
if address % PAGE_SIZE != 0 || size % PAGE_SIZE != 0 { return Err(Error::new(EINVAL)); }
if address.saturating_add(size) > crate::USER_END_OFFSET { return Err(Error::new(EFAULT)); }
let address_space = Arc::clone(context::current()?.read().addr_space()?);
let mut address_space = address_space.write();
let mut flush_all = PageFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(address));
let end_page = Page::containing_address(VirtualAddress::new(end_address));
for page in Page::range_exclusive(start_page, end_page) {
// Check if the page is actually mapped before trying to change the flags.
// FIXME can other processes change if a page is mapped beneath our feet?
let mut page_flags = if let Some((_, flags)) = address_space.table.utable.translate(page.start_address()) {
flags
} else {
flush_all.flush();
return Err(Error::new(EFAULT));
};
if !page_flags.has_present() {
flush_all.flush();
return Err(Error::new(EFAULT));
}
if flags.contains(PROT_EXEC) {
page_flags = page_flags.execute(true);
} else {
page_flags = page_flags.execute(false);
}
if flags.contains(PROT_WRITE) {
//TODO: Not allowing gain of write privileges
} else {
page_flags = page_flags.write(false);
}
if flags.contains(PROT_READ) {
//TODO: No flags for readable pages
} else {
//TODO: No flags for readable pages
}
let flush = unsafe { address_space.table.utable.remap(page.start_address(), page_flags).expect("failed to remap page in mprotect") };
flush_all.consume(flush);
}
flush_all.flush();
Ok(0)
AddrSpace::current()?.write().mprotect(Page::containing_address(VirtualAddress::new(address)), size / PAGE_SIZE, flags).map(|()| 0)
}
pub fn setpgid(pid: ContextId, pgid: ContextId) -> Result<usize> {