Fix AddrSpace memory leak.

This commit is contained in:
4lDO2
2022-07-11 12:39:08 +02:00
parent b141cdaad2
commit 4aea0cfd0c
4 changed files with 45 additions and 39 deletions

View File

@@ -359,7 +359,7 @@ impl Context {
sigstack: None,
clone_entry: None,
};
this.set_addr_space(new_addrspace()?.1);
let _ = this.set_addr_space(new_addrspace()?);
Ok(this)
}
@@ -533,7 +533,8 @@ impl Context {
pub fn addr_space(&self) -> Result<&Arc<RwLock<AddrSpace>>> {
self.addr_space.as_ref().ok_or(Error::new(ESRCH))
}
pub fn set_addr_space(&mut self, addr_space: Arc<RwLock<AddrSpace>>) {
#[must_use = "grants must be manually unmapped, otherwise it WILL panic!"]
pub fn set_addr_space(&mut self, addr_space: Arc<RwLock<AddrSpace>>) -> Option<Arc<RwLock<AddrSpace>>> {
let physaddr = addr_space.read().frame.utable.start_address();
if self.id == super::context_id() {
unsafe {
@@ -542,6 +543,6 @@ impl Context {
}
self.arch.set_page_utable(physaddr.data());
self.addr_space = Some(addr_space);
self.addr_space.replace(addr_space)
}
}

View File

@@ -35,6 +35,14 @@ pub fn page_flags(flags: MapFlags) -> PageFlags<RmmA> {
.write(flags.contains(MapFlags::PROT_WRITE))
//TODO: PROT_READ
}
pub fn map_flags(page_flags: PageFlags<RmmA>) -> MapFlags {
let mut flags = MapFlags::PROT_READ;
if page_flags.has_write() { flags |= MapFlags::PROT_WRITE; }
if page_flags.has_execute() { flags |= MapFlags::PROT_EXEC; }
// TODO: MAP_SHARED/MAP_PRIVATE (requires that grants keep track of what they borrow and if
// they borrow shared or CoW).
flags
}
pub struct UnmapResult {
pub file_desc: Option<GrantFileRef>,
@@ -47,31 +55,19 @@ impl Drop for UnmapResult {
}
}
int_like!(PtId, usize);
static ADDRSPACES: RwLock<BTreeMap<PtId, Arc<RwLock<AddrSpace>>>> = RwLock::new(BTreeMap::new());
static NEXT_PTID: atomic::AtomicUsize = atomic::AtomicUsize::new(1);
pub fn new_addrspace() -> Result<(PtId, Arc<RwLock<AddrSpace>>)> {
let id = PtId::from(NEXT_PTID.fetch_add(1, atomic::Ordering::Relaxed));
let arc = Arc::try_new(RwLock::new(AddrSpace::new(id)?)).map_err(|_| Error::new(ENOMEM))?;
ADDRSPACES.write().insert(id, Arc::clone(&arc));
Ok((id, arc))
}
pub fn addrspace(id: PtId) -> Option<Arc<RwLock<AddrSpace>>> {
ADDRSPACES.read().get(&id).map(Arc::clone)
pub fn new_addrspace() -> Result<Arc<RwLock<AddrSpace>>> {
Arc::try_new(RwLock::new(AddrSpace::new()?)).map_err(|_| Error::new(ENOMEM))
}
#[derive(Debug)]
pub struct AddrSpace {
pub frame: Tables,
pub grants: UserGrants,
pub id: PtId,
}
impl AddrSpace {
/// Attempt to clone an existing address space so that all mappings are copied (CoW).
pub fn try_clone(&self) -> Result<(PtId, Arc<RwLock<Self>>)> {
let (id, mut new) = new_addrspace()?;
pub fn try_clone(&self) -> Result<Arc<RwLock<Self>>> {
let mut new = new_addrspace()?;
// TODO: Abstract away this.
let (mut inactive, mut active);
@@ -111,13 +107,12 @@ impl AddrSpace {
new.write().grants.insert(new_grant);
}
Ok((id, new))
Ok(new)
}
pub fn new(id: PtId) -> Result<Self> {
pub fn new() -> Result<Self> {
Ok(Self {
grants: UserGrants::new(),
frame: setup_new_utable()?,
id,
})
}
pub fn is_current(&self) -> bool {
@@ -271,7 +266,7 @@ impl UserGrants {
}
pub fn take(&mut self, region: &Region) -> Option<Grant> {
let grant = self.inner.take(region)?;
Self::unreserve(&mut self.holes, region);
Self::unreserve(&mut self.holes, grant.region());
Some(grant)
}
pub fn iter(&self) -> impl Iterator<Item = &Grant> + '_ {
@@ -560,7 +555,7 @@ impl Grant {
size: page_count * PAGE_SIZE,
},
flags,
mapped: !unmap,
mapped: true,
owned,
desc_opt,
}

View File

@@ -1,6 +1,6 @@
use crate::{
arch::paging::{ActivePageTable, Flusher, InactivePageTable, mapper::{InactiveFlusher, Mapper, PageFlushAll}, Page, RmmA, VirtualAddress},
context::{self, Context, ContextId, Status, file::{FileDescription, FileDescriptor}, memory::{AddrSpace, Grant, new_addrspace, PtId, page_flags, Region}},
context::{self, Context, ContextId, Status, file::{FileDescription, FileDescriptor}, memory::{AddrSpace, Grant, new_addrspace, map_flags, page_flags, Region}},
memory::PAGE_SIZE,
ptrace,
scheme::{self, AtomicSchemeId, FileHandle, KernelScheme, SchemeId},
@@ -8,7 +8,7 @@ use crate::{
FloatRegisters,
IntRegisters,
EnvRegisters,
data::{PtraceEvent, Stat},
data::{Map, PtraceEvent, Stat},
error::*,
flag::*,
scheme::{calc_seek_offset_usize, Scheme},
@@ -450,8 +450,8 @@ impl Scheme for ProcScheme {
let (operation, is_mem) = match buf {
// TODO: Better way to obtain new empty address spaces, perhaps using SYS_OPEN. But
// in that case, what scheme?
b"empty" => (Operation::AddrSpace { addrspace: new_addrspace()?.1 }, false),
b"exclusive" => (Operation::AddrSpace { addrspace: addrspace.read().try_clone()?.1 }, false),
b"empty" => (Operation::AddrSpace { addrspace: new_addrspace()? }, false),
b"exclusive" => (Operation::AddrSpace { addrspace: addrspace.read().try_clone()? }, false),
b"mem" => (Operation::Memory { addrspace: Arc::clone(&addrspace) }, true),
grant_handle if grant_handle.starts_with(b"grant-") => {
@@ -552,7 +552,7 @@ impl Scheme for ProcScheme {
let mut qwords = record_bytes.array_chunks_mut::<{mem::size_of::<usize>()}>();
qwords.next().unwrap().copy_from_slice(&usize::to_ne_bytes(grant.start_address().data()));
qwords.next().unwrap().copy_from_slice(&usize::to_ne_bytes(grant.size()));
qwords.next().unwrap().copy_from_slice(&usize::to_ne_bytes(grant.flags().data() | if grant.desc_opt.is_some() { 0x8000_0000 } else { 0 }));
qwords.next().unwrap().copy_from_slice(&usize::to_ne_bytes(map_flags(grant.flags()).bits() | if grant.desc_opt.is_some() { 0x8000_0000 } else { 0 }));
qwords.next().unwrap().copy_from_slice(&usize::to_ne_bytes(grant.desc_opt.as_ref().map_or(0, |d| d.offset)));
bytes_read += RECORD_SIZE;
}
@@ -739,10 +739,6 @@ impl Scheme for ProcScheme {
Operation::AddrSpace { addrspace } => {
// FIXME: Forbid upgrading external mappings.
let pid = self.handles.read()
.get(&id).ok_or(Error::new(EBADF))?
.info.pid;
let mut chunks = buf.array_chunks::<{mem::size_of::<usize>()}>().copied().map(usize::from_ne_bytes);
// Update grant mappings, like mprotect but allowed to target other contexts.
let base = chunks.next().ok_or(Error::new(EINVAL))?;
@@ -1110,7 +1106,21 @@ impl Scheme for ProcScheme {
context.clone_entry = Some([new_ip, new_sp]);
}
context.set_addr_space(new);
let prev_addr_space = context.set_addr_space(new);
if let Some(prev) = prev_addr_space.and_then(|a| Arc::try_unwrap(a).ok()).map(RwLock::into_inner) {
// We are the last reference to the address space; therefore it must be
// unmapped.
let mut table = unsafe { InactivePageTable::from_address(prev.frame.utable.start_address().data()) };
// TODO: Optimize away clearing of page tables? In that case, what about memory
// deallocation?
for grant in prev.grants.into_iter() {
grant.unmap(&mut table.mapper(), ());
}
}
Ok(())
})?,
Operation::AwaitingFiletableChange(new) => with_context_mut(handle.info.pid, |context: &mut Context| {
@@ -1135,7 +1145,7 @@ impl Scheme for ProcScheme {
Ok(0)
}
// TODO: Support borrowing someone else's memory.
fn fmap(&self, id: usize, map: &syscall::data::Map) -> Result<usize> {
fn fmap(&self, id: usize, map: &Map) -> Result<usize> {
let description_lock = match self.handles.read().get(&id) {
Some(Handle { info: Info { operation: Operation::GrantHandle { ref description }, .. }, .. }) => Arc::clone(description),
_ => return Err(Error::new(EBADF)),

View File

@@ -146,10 +146,10 @@ impl UserInner {
let context_lock = context_weak.upgrade().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) };
let mut addr_space = context.addr_space()?.write();
let mut new_table = unsafe { InactivePageTable::from_address(addr_space.frame.utable.start_address().data()) };
let src_address = round_down_pages(address);
let dst_address = round_down_pages(dst_address);
let offset = address - src_address;
@@ -178,14 +178,14 @@ impl UserInner {
let context_lock = self.context.upgrade().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
let mut other_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) };
let mut addr_space = context.addr_space()?.write();
let mut other_table = unsafe { InactivePageTable::from_address(addr_space.frame.utable.start_address().data()) };
let region = match addr_space.grants.contains(VirtualAddress::new(address)).map(Region::from) {
Some(region) => region,
None => return Err(Error::new(EFAULT)),
};
addr_space.grants.take(&region).unwrap().unmap(&mut other_table.mapper(), crate::paging::mapper::InactiveFlusher::new());
addr_space.grants.take(&region).unwrap().unmap(&mut other_table.mapper(), InactiveFlusher::new());
Ok(())
}