diff --git a/scheme/user.rs b/scheme/user.rs index 2854821..ef2bd22 100644 --- a/scheme/user.rs +++ b/scheme/user.rs @@ -1,7 +1,8 @@ -use alloc::arc::Weak; +use alloc::arc::{Arc, Weak}; +use collections::BTreeMap; use core::sync::atomic::{AtomicUsize, AtomicU64, Ordering}; use core::{mem, slice, usize}; -use spin::RwLock; +use spin::{Mutex, RwLock}; use arch; use arch::paging::{InactivePageTable, Page, VirtualAddress, entry}; @@ -23,6 +24,7 @@ pub struct UserInner { next_id: AtomicU64, context: Weak>, todo: WaitQueue, + fmap: Mutex>, usize)>>, done: WaitMap } @@ -35,6 +37,7 @@ impl UserInner { next_id: AtomicU64::new(1), context: context, todo: WaitQueue::new(), + fmap: Mutex::new(BTreeMap::new()), done: WaitMap::new() } } @@ -47,10 +50,8 @@ impl UserInner { (context.id, context.euid, context.egid) }; - let id = self.next_id.fetch_add(1, Ordering::SeqCst); - - let packet = Packet { - id: id, + self.call_inner(Packet { + id: self.next_id.fetch_add(1, Ordering::SeqCst), pid: pid, uid: uid, gid: gid, @@ -58,7 +59,11 @@ impl UserInner { b: b, c: c, d: d - }; + }) + } + + fn call_inner(&self, packet: Packet) -> Result { + let id = packet.id; let len = self.todo.send(packet); context::event::trigger(ROOT_SCHEME_ID.load(Ordering::SeqCst), self.handle_id, EVENT_READ, mem::size_of::() * len); @@ -67,18 +72,18 @@ impl UserInner { } pub fn capture(&self, buf: &[u8]) -> Result { - self.capture_inner(buf.as_ptr() as usize, buf.len(), false) + UserInner::capture_inner(&self.context, buf.as_ptr() as usize, buf.len(), false) } pub fn capture_mut(&self, buf: &mut [u8]) -> Result { - self.capture_inner(buf.as_mut_ptr() as usize, buf.len(), true) + UserInner::capture_inner(&self.context, buf.as_mut_ptr() as usize, buf.len(), true) } - fn capture_inner(&self, address: usize, size: usize, writable: bool) -> Result { + fn capture_inner(context_weak: &Weak>, address: usize, size: usize, writable: bool) -> Result { if size == 0 { Ok(0) } else { - let context_lock = self.context.upgrade().ok_or(Error::new(ESRCH))?; + let context_lock = context_weak.upgrade().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); let mut grants = context.grants.lock(); @@ -165,13 +170,19 @@ impl UserInner { let len = buf.len()/packet_size; let mut i = 0; while i < len { - let packet = unsafe { *(buf.as_ptr() as *const Packet).offset(i as isize) }; + let mut packet = unsafe { *(buf.as_ptr() as *const Packet).offset(i as isize) }; if packet.id == 0 { match packet.a { SYS_FEVENT => context::event::trigger(self.scheme_id.load(Ordering::SeqCst), packet.b, packet.c, packet.d), _ => println!("Unknown scheme -> kernel message {}", packet.a) } } else { + if let Some((context_weak, size)) = self.fmap.lock().remove(&packet.id) { + if let Ok(address) = Error::demux(packet.a) { + packet.a = Error::mux(UserInner::capture_inner(&context_weak, address, size, true)); + } + } + self.done.send(packet.id, packet.a); } i += 1; @@ -269,6 +280,32 @@ impl Scheme for UserScheme { inner.call(SYS_FEVENT, file, flags, 0) } + fn fmap(&self, file: usize, offset: usize, size: usize) -> Result { + let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?; + + let (pid, uid, gid, context_lock) = { + let contexts = context::contexts(); + let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; + let context = context_lock.read(); + (context.id, context.euid, context.egid, Arc::downgrade(&context_lock)) + }; + + let id = inner.next_id.fetch_add(1, Ordering::SeqCst); + + inner.fmap.lock().insert(id, (context_lock, size)); + + inner.call_inner(Packet { + id: id, + pid: pid, + uid: uid, + gid: gid, + a: SYS_FMAP, + b: file, + c: offset, + d: size + }) + } + fn fpath(&self, file: usize, buf: &mut [u8]) -> Result { let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?; let address = inner.capture_mut(buf)?; diff --git a/syscall/fs.rs b/syscall/fs.rs index 3e1a5a6..c40ec8d 100644 --- a/syscall/fs.rs +++ b/syscall/fs.rs @@ -293,3 +293,27 @@ pub fn fevent(fd: usize, flags: usize) -> Result { context::event::register(fd, file.scheme, event_id); Ok(0) } + +pub fn funmap(virtual_address: usize) -> Result { + if virtual_address == 0 { + Ok(0) + } else { + let contexts = context::contexts(); + let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; + let context = context_lock.read(); + + let mut grants = context.grants.lock(); + + for i in 0 .. grants.len() { + let start = grants[i].start_address().get(); + let end = start + grants[i].size(); + if virtual_address >= start && virtual_address < end { + grants.remove(i).unmap(); + + return Ok(0); + } + } + + Err(Error::new(EFAULT)) + } +} diff --git a/syscall/mod.rs b/syscall/mod.rs index 2598cfa..952ccfa 100644 --- a/syscall/mod.rs +++ b/syscall/mod.rs @@ -41,6 +41,7 @@ pub extern fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize SYS_CLOSE => close(b), SYS_DUP => dup(b, validate_slice(c as *const u8, d)?), SYS_FEVENT => fevent(b, c), + SYS_FUNMAP => funmap(b), _ => file_op(a, b, c, d) } }, diff --git a/syscall/process.rs b/syscall/process.rs index 39ed2ad..87053af 100644 --- a/syscall/process.rs +++ b/syscall/process.rs @@ -367,11 +367,12 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { context.heap = Some(heap_shared); } + // Copy grant mapping if ! grants.lock().is_empty() { - let frame = active_table.p4()[2].pointed_frame().expect("user heap not mapped"); + let frame = active_table.p4()[2].pointed_frame().expect("user grants not mapped"); let flags = active_table.p4()[2].flags(); active_table.with(&mut new_table, &mut temporary_page, |mapper| { - mapper.p4_mut()[1].set(frame, flags); + mapper.p4_mut()[2].set(frame, flags); }); } context.grants = grants;