diff --git a/context/context.rs b/context/context.rs index 73f31f4..c06e778 100644 --- a/context/context.rs +++ b/context/context.rs @@ -25,6 +25,8 @@ pub struct Context { pub running: bool, /// The architecture specific context pub arch: arch::context::Context, + /// Kernel FX + pub kfx: Option>, /// Kernel stack pub kstack: Option>, /// Executable image @@ -49,6 +51,7 @@ impl Context { status: Status::Blocked, running: false, arch: arch::context::Context::new(), + kfx: None, kstack: None, image: Vec::new(), heap: None, diff --git a/context/list.rs b/context/list.rs index 2ad5efb..5541efa 100644 --- a/context/list.rs +++ b/context/list.rs @@ -1,4 +1,5 @@ use alloc::arc::Arc; +use alloc::boxed::Box; use collections::BTreeMap; use core::mem; use core::sync::atomic::Ordering; @@ -64,6 +65,10 @@ impl ContextList { let context_lock = self.new_context()?; { let mut context = context_lock.write(); + let mut fx = unsafe { Box::from_raw(::alloc::heap::allocate(512, 16) as *mut [u8; 512]) }; + for b in fx.iter_mut() { + *b = 0; + } let mut stack = vec![0; 65536].into_boxed_slice(); let offset = stack.len() - mem::size_of::(); unsafe { @@ -72,7 +77,9 @@ impl ContextList { *(func_ptr as *mut usize) = func as usize; } context.arch.set_page_table(unsafe { arch::paging::ActivePageTable::new().address() }); + context.arch.set_fx(fx.as_ptr() as usize); context.arch.set_stack(stack.as_ptr() as usize + offset); + context.kfx = Some(fx); context.kstack = Some(stack); } Ok(context_lock) diff --git a/context/mod.rs b/context/mod.rs index 041eec6..597c301 100644 --- a/context/mod.rs +++ b/context/mod.rs @@ -1,5 +1,5 @@ //! Context management - +use alloc::boxed::Box; use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -38,6 +38,13 @@ pub fn init() { let mut contexts = contexts_mut(); let context_lock = contexts.new_context().expect("could not initialize first context"); let mut context = context_lock.write(); + let mut fx = unsafe { Box::from_raw(::alloc::heap::allocate(512, 16) as *mut [u8; 512]) }; + for b in fx.iter_mut() { + *b = 0; + } + + context.arch.set_fx(fx.as_ptr() as usize); + context.kfx = Some(fx); context.status = Status::Runnable; context.running = true; CONTEXT_ID.store(context.id, Ordering::SeqCst); diff --git a/lib.rs b/lib.rs index 0bc1c0e..a2f2129 100644 --- a/lib.rs +++ b/lib.rs @@ -69,6 +69,7 @@ #![feature(collections)] #![feature(const_fn)] #![feature(drop_types_in_const)] +#![feature(heap_api)] #![feature(question_mark)] #![feature(never_type)] #![feature(thread_local)] diff --git a/syscall/process.rs b/syscall/process.rs index b38aee6..bc61d38 100644 --- a/syscall/process.rs +++ b/syscall/process.rs @@ -1,6 +1,6 @@ ///! Process syscalls - use alloc::arc::Arc; +use alloc::boxed::Box; use collections::Vec; use core::mem; use core::str; @@ -58,6 +58,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { let pid; { let arch; + let mut kfx_option = None; let mut kstack_option = None; let mut offset = 0; let mut image = vec![]; @@ -77,6 +78,14 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { arch = context.arch.clone(); + if let Some(ref fx) = context.kfx { + let mut new_fx = unsafe { Box::from_raw(::alloc::heap::allocate(512, 16) as *mut [u8; 512]) }; + for (new_b, b) in new_fx.iter_mut().zip(fx.iter()) { + *new_b = *b; + } + kfx_option = Some(new_fx); + } + if let Some(ref stack) = context.kstack { offset = stack_base - stack.as_ptr() as usize - mem::size_of::(); // Add clone ret let mut new_stack = stack.clone(); @@ -238,6 +247,11 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { }); } + if let Some(fx) = kfx_option.take() { + context.arch.set_fx(fx.as_ptr() as usize); + context.kfx = Some(fx); + } + // Set kernel stack if let Some(stack) = kstack_option.take() { context.arch.set_stack(stack.as_ptr() as usize + offset);