Fix "clone grant using fmap"

This does the same as the previous MR, but fixes the issue where the
parent process got the mapping (and at the wrong address) and not the
child process.
This commit is contained in:
4lDO2
2022-04-08 16:10:16 +02:00
parent e72fd5a0e4
commit 4d7da495f5
5 changed files with 66 additions and 36 deletions

View File

@@ -7,7 +7,7 @@ use crate::scheme::{self, SchemeNamespace, SchemeId};
use crate::syscall::error::{Result, Error, EBADF};
/// A file description
#[derive(Debug)]
#[derive(Clone, Copy, Debug)]
pub struct FileDescription {
/// The namespace the file was opened from (used for debugging)
pub namespace: SchemeNamespace,

View File

@@ -36,12 +36,12 @@ pub fn page_flags(flags: MapFlags) -> PageFlags<RmmA> {
}
pub struct UnmapResult {
pub file_desc: Option<FileDescriptor>,
pub file_desc: Option<GrantFileRef>,
}
impl Drop for UnmapResult {
fn drop(&mut self) {
if let Some(fd) = self.file_desc.take() {
let _ = fd.close();
let _ = fd.desc.close();
}
}
}
@@ -301,7 +301,15 @@ pub struct Grant {
mapped: bool,
owned: bool,
//TODO: This is probably a very heavy way to keep track of fmap'd files, perhaps move to the context?
pub desc_opt: Option<FileDescriptor>,
pub desc_opt: Option<GrantFileRef>,
}
#[derive(Clone, Debug)]
pub struct GrantFileRef {
pub desc: FileDescriptor,
pub offset: usize,
// TODO: Can the flags maybe be stored together with the page flags. Should some flags be kept,
// and others discarded when re-fmapping on clone?
pub flags: MapFlags,
}
impl Grant {
@@ -374,7 +382,7 @@ impl Grant {
}
}
pub fn map_inactive(src: VirtualAddress, dst: VirtualAddress, size: usize, flags: PageFlags<RmmA>, desc_opt: Option<FileDescriptor>, inactive_table: &mut InactivePageTable) -> Grant {
pub fn map_inactive(src: VirtualAddress, dst: VirtualAddress, size: usize, flags: PageFlags<RmmA>, desc_opt: Option<GrantFileRef>, inactive_table: &mut InactivePageTable) -> Grant {
let active_table = unsafe { ActivePageTable::new(src.kind()) };
let mut inactive_mapper = inactive_table.mapper();

View File

@@ -8,7 +8,7 @@ use spin::{Mutex, RwLock};
use crate::context::{self, Context};
use crate::context::file::FileDescriptor;
use crate::context::memory::{DANGLING, page_flags, round_down_pages, Grant, Region};
use crate::context::memory::{DANGLING, page_flags, round_down_pages, Grant, Region, GrantFileRef};
use crate::event;
use crate::paging::{PAGE_SIZE, InactivePageTable, VirtualAddress};
use crate::scheme::{AtomicSchemeId, SchemeId};
@@ -123,7 +123,7 @@ impl UserInner {
).map(|addr| addr.data())
}
fn capture_inner(context_weak: &Weak<RwLock<Context>>, dst_address: usize, address: usize, size: usize, flags: MapFlags, desc_opt: Option<FileDescriptor>)
fn capture_inner(context_weak: &Weak<RwLock<Context>>, dst_address: usize, address: usize, size: usize, flags: MapFlags, desc_opt: Option<GrantFileRef>)
-> Result<VirtualAddress> {
// TODO: More abstractions over grant creation!
@@ -233,7 +233,8 @@ impl UserInner {
if address % PAGE_SIZE > 0 {
println!("scheme returned unaligned address, causing extra frame to be allocated");
}
let res = UserInner::capture_inner(&context_weak, map.address, address, map.size, map.flags, Some(desc));
let file_ref = GrantFileRef { desc, offset: map.offset, flags: map.flags };
let res = UserInner::capture_inner(&context_weak, map.address, address, map.size, map.flags, Some(file_ref));
if let Ok(grant_address) = res {
if let Some(context_lock) = context_weak.upgrade() {
let context = context_lock.read();

View File

@@ -489,11 +489,8 @@ pub fn funmap_old(virtual_address: usize) -> Result<usize> {
}
}
if let Some(desc) = desc_opt {
let scheme_id = {
let description = desc.description.read();
description.scheme
};
if let Some(file_ref) = desc_opt {
let scheme_id = { file_ref.desc.description.read().scheme };
let scheme = {
let schemes = scheme::schemes();
@@ -502,7 +499,7 @@ pub fn funmap_old(virtual_address: usize) -> Result<usize> {
};
let res = scheme.funmap_old(virtual_address);
let _ = desc.close();
let _ = file_ref.desc.close();
res
} else {
@@ -555,11 +552,8 @@ pub fn funmap(virtual_address: usize, length: usize) -> Result<usize> {
}
}
for (desc, intersection) in notify_files {
let scheme_id = {
let description = desc.description.read();
description.scheme
};
for (file_ref, intersection) in notify_files {
let scheme_id = { file_ref.desc.description.read().scheme };
let scheme = {
let schemes = scheme::schemes();
@@ -568,7 +562,7 @@ pub fn funmap(virtual_address: usize, length: usize) -> Result<usize> {
};
let res = scheme.funmap(intersection.start_address().data(), intersection.size());
let _ = desc.close();
let _ = file_ref.desc.close();
res?;
}

View File

@@ -10,7 +10,7 @@ use core::ops::DerefMut;
use core::{intrinsics, mem, str};
use spin::{RwLock, RwLockWriteGuard};
use crate::context::file::FileDescriptor;
use crate::context::file::{FileDescription, FileDescriptor};
use crate::context::memory::{UserGrants, Region};
use crate::context::{Context, ContextId, WaitpidKey};
use crate::context;
@@ -56,7 +56,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
let mut image = vec![];
let mut stack_opt = None;
let mut sigstack_opt = None;
let grants;
let mut grants;
let name;
let cwd;
let files;
@@ -266,24 +266,51 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
// If not cloning virtual memory, use fmap to re-obtain every grant where possible
if !flags.contains(CLONE_VM) {
let mut grants = grants.write();
let grants = Arc::get_mut(&mut grants).ok_or(Error::new(EBUSY))?.get_mut();
let old_grants = mem::take(&mut grants.inner);
let mut to_remove = BTreeSet::new();
// TODO: Find some way to do this without having to allocate.
// TODO: Use drain_filter if possible
// TODO: Check that the current process is not allowed to serve any scheme this logic
// could interfere with. Deadlocks would otherwise seem inevitable.
for grant in grants.iter() {
let remove = false;
if let Some(ref _desc) = grant.desc_opt {
println!("todo: clone grant using fmap: {:?}", grant);
}
if remove {
to_remove.insert(Region::from(grant));
}
}
for mut grant in old_grants.into_iter() {
let region = *grant.region();
let address = region.start_address().data();
let size = region.size();
for region in to_remove {
grants.remove(&region);
let new_grant = if let Some(ref mut file_ref) = grant.desc_opt.take() {
// TODO: Technically this is redundant as the grants are already secret_cloned.
// Maybe grants with fds can be excluded from that step?
grant.unmap();
let FileDescription { scheme, number, .. } = { *file_ref.desc.description.read() };
let scheme_arc = match crate::scheme::schemes().get(scheme) {
Some(s) => Arc::clone(s),
None => continue,
};
let map = crate::syscall::data::Map {
address,
size,
offset: file_ref.offset,
flags: file_ref.flags | MapFlags::MAP_FIXED_NOREPLACE,
};
let ptr = match scheme_arc.fmap(number, &map) {
Ok(new_range) => new_range as *mut u8,
Err(_) => continue,
};
// This will eventually be freed from the parent context after move_to is
// called.
context::contexts().current().ok_or(Error::new(ESRCH))?
.read().grants.write()
.take(&Region::new(VirtualAddress::new(ptr as usize), map.size))
.ok_or(Error::new(EFAULT))?
} else {
grant
};
grants.insert(new_grant);
}
}