Merge pull request #9 from redox-os/refactor

Refactor
This commit is contained in:
Jeremy Soller
2017-04-05 17:36:36 -06:00
committed by GitHub
70 changed files with 445 additions and 908 deletions

View File

@@ -9,8 +9,10 @@ path = "src/lib.rs"
crate-type = ["staticlib"]
[dependencies]
alloc_kernel = { path = "alloc_kernel" }
bitflags = "0.7"
spin = "0.4"
raw-cpuid = { git = "https://github.com/gz/rust-cpuid", branch = "master" }
redox_syscall = "0.1"
[dependencies.goblin]
@@ -18,14 +20,9 @@ verion = "0.0.8"
default-features = false
features = ["elf32", "elf64"]
[dev-dependencies]
arch_test = { path = "arch/test" }
[target.'cfg(target_arch = "arm")'.dependencies]
arch_arm = { path = "arch/arm" }
[target.'cfg(target_arch = "x86_64")'.dependencies]
arch_x86_64 = { path = "arch/x86_64" }
[dependencies.x86]
version = "0.7"
default-features = false
[features]
default = []

View File

@@ -1,8 +0,0 @@
[package]
name = "arch_arm"
version = "0.1.0"
[dependencies]
alloc_kernel = { path = "../../alloc_kernel" }
bitflags = "0.7"
spin = "0.4"

View File

@@ -1,9 +0,0 @@
#[derive(Debug)]
pub struct Context;
impl Context {
pub fn new() -> Self {
Context
}
}

View File

@@ -1,70 +0,0 @@
/// Memcpy
///
/// Copy N bytes of memory from one location to another.
#[no_mangle]
pub unsafe extern fn memcpy(dest: *mut u8, src: *const u8,
n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
dest
}
/// Memmove
///
/// Copy N bytes of memory from src to dest. The memory areas may overlap.
#[no_mangle]
pub unsafe extern fn memmove(dest: *mut u8, src: *const u8,
n: usize) -> *mut u8 {
if src < dest as *const u8 {
let mut i = n;
while i != 0 {
i -= 1;
*dest.offset(i as isize) = *src.offset(i as isize);
}
} else {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
dest
}
/// Memset
///
/// Fill a block of memory with a specified value.
#[no_mangle]
pub unsafe extern fn memset(s: *mut u8, c: i32, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*s.offset(i as isize) = c as u8;
i += 1;
}
s
}
/// Memcmp
///
/// Compare two blocks of memory.
#[no_mangle]
pub unsafe extern fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
let mut i = 0;
while i < n {
let a = *s1.offset(i as isize);
let b = *s2.offset(i as isize);
if a != b {
return a as i32 - b as i32
}
i += 1;
}
0
}

View File

@@ -1,30 +0,0 @@
//! Interrupt instructions
/// Clear interrupts
#[inline(always)]
pub unsafe fn disable() {
}
/// Set interrupts
#[inline(always)]
pub unsafe fn enable() {
}
/// Set interrupts and halt
#[inline(always)]
pub unsafe fn enable_and_halt() {
halt();
}
/// Halt instruction
#[inline(always)]
pub unsafe fn halt() {
//asm!("wfi" : : : : "volatile");
asm!("nop" : : : : "volatile");
}
/// Get a stack trace
//TODO: Check for stack being mapped before dereferencing
#[inline(never)]
pub unsafe fn stack_trace() {
}

View File

@@ -1,39 +0,0 @@
//! Architecture support for ARM
#![feature(asm)]
#![feature(lang_items)]
#![feature(naked_functions)]
#![no_std]
extern crate alloc_kernel as allocator;
#[macro_use]
extern crate bitflags;
extern crate spin;
/// Print to console
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ({});
}
/// Print with new line to console
#[macro_export]
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
/// Context switching
pub mod context;
/// Memset, memcpy, etc.
pub mod externs;
/// Interrupt handling
pub mod interrupt;
/// Panic support
pub mod panic;
/// Initialization function
pub mod start;

View File

@@ -1,60 +0,0 @@
ENTRY(kstart)
OUTPUT_ARCH(arm)
OUTPUT_FORMAT(elf32-littlearm)
KERNEL_OFFSET = 0;
SECTIONS {
. = KERNEL_OFFSET;
.text : AT(ADDR(.text) - KERNEL_OFFSET) {
__text_start = .;
*(.text*)
. = ALIGN(4096);
__text_end = .;
}
.rodata : AT(ADDR(.rodata) - KERNEL_OFFSET) {
__rodata_start = .;
*(.rodata*)
. = ALIGN(4096);
__rodata_end = .;
}
.data : AT(ADDR(.data) - KERNEL_OFFSET) {
__data_start = .;
*(.data*)
. = ALIGN(4096);
__data_end = .;
}
.tdata : AT(ADDR(.tdata) - KERNEL_OFFSET) {
__tdata_start = .;
*(.tdata*)
. = ALIGN(4096);
__tdata_end = .;
__tbss_start = .;
*(.tbss*)
. += 8;
. = ALIGN(4096);
__tbss_end = .;
}
.bss : AT(ADDR(.bss) - KERNEL_OFFSET) {
__bss_start = .;
*(.bss*)
. = ALIGN(4096);
__bss_end = .;
}
__end = .;
/DISCARD/ : {
*(.comment*)
*(.debug*)
*(.eh_frame*)
*(.gcc_except_table*)
*(.note*)
*(.rel.eh_frame*)
}
}

View File

@@ -1,38 +0,0 @@
//! Intrinsics for panic handling
use interrupt;
#[cfg(not(test))]
#[lang = "eh_personality"]
extern "C" fn eh_personality() {}
#[cfg(not(test))]
/// Required to handle panics
#[lang = "panic_fmt"]
extern "C" fn panic_fmt(fmt: ::core::fmt::Arguments, file: &str, line: u32) -> ! {
println!("PANIC: {}", fmt);
println!("FILE: {}", file);
println!("LINE: {}", line);
unsafe { interrupt::stack_trace(); }
println!("HALT");
loop {
unsafe { interrupt::halt(); }
}
}
#[allow(non_snake_case)]
#[no_mangle]
/// Required to handle panics
pub extern "C" fn _Unwind_Resume() -> ! {
loop {
unsafe { interrupt::halt(); }
}
}
/// Required for linker
#[no_mangle]
pub extern "C" fn __aeabi_unwind_cpp_pr0() {
loop {}
}

View File

@@ -1,27 +0,0 @@
const SERIAL_BASE: *mut u8 = 0x16000000 as *mut u8;
const SERIAL_FLAG_REGISTER: *const u8 = 0x16000018 as *const u8;
const SERIAL_BUFFER_FULL: u8 = (1 << 5);
unsafe fn putc (c: u8)
{
/* Wait until the serial buffer is empty */
while *SERIAL_FLAG_REGISTER & SERIAL_BUFFER_FULL == SERIAL_BUFFER_FULL {}
/* Put our character, c, into the serial buffer */
*SERIAL_BASE = c;
}
unsafe fn puts(string: &str)
{
for b in string.bytes() {
putc(b);
}
}
#[no_mangle]
#[naked]
pub unsafe extern fn kstart() -> ! {
asm!("mov sp, 0x18000" : : : : "volatile");
puts("TEST\r\n");
loop {}
}

View File

@@ -1,3 +0,0 @@
[package]
name = "arch_test"
version = "0.1.0"

View File

@@ -1,43 +0,0 @@
//! Interrupt instructions
static mut INTERRUPTS_ENABLED: bool = false;
/// Clear interrupts
#[inline(always)]
pub unsafe fn disable() {
println!("CLEAR INTERRUPTS");
INTERRUPTS_ENABLED = false;
}
/// Set interrupts
#[inline(always)]
pub unsafe fn enable() {
println!("SET INTERRUPTS");
INTERRUPTS_ENABLED = true;
}
/// Halt instruction
#[inline(always)]
pub unsafe fn halt() {
assert!(INTERRUPTS_ENABLED);
::std::thread::yield_now();
}
/// Pause instruction
#[inline(always)]
pub unsafe fn pause() {
}
/// Set interrupts and nop
#[inline(always)]
pub unsafe fn enable_and_nop() {
enable();
}
/// Set interrupts and halt
#[inline(always)]
pub unsafe fn enable_and_halt() {
enable();
halt();
}

View File

@@ -1,43 +0,0 @@
//! Architecture support for testing
pub use std::io;
/// Print to console
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ({
use $crate::io::Write;
let _ = write!($crate::io::stdout(), $($arg)*);
});
}
/// Print with new line to console
#[macro_export]
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
/// Create an interrupt function that can safely run rust code
#[macro_export]
macro_rules! interrupt {
($name:ident, $func:block) => {
pub unsafe extern fn $name () {
unsafe fn inner() {
$func
}
// Call inner rust function
inner();
}
};
}
/// Interrupt instructions
pub mod interrupt;
/// Initialization and main function
pub mod main;
/// Time functions
pub mod time;

View File

@@ -1,11 +0,0 @@
/// This function is where the kernel sets up IRQ handlers
/// It is increcibly unsafe, and should be minimal in nature
extern {
fn kmain() -> !;
}
#[no_mangle]
pub unsafe extern fn kstart() -> ! {
kmain();
}

View File

@@ -1,7 +0,0 @@
pub fn monotonic() -> (u64, u64) {
(0, 0)
}
pub fn realtime() -> (u64, u64) {
(0, 0)
}

View File

@@ -1,14 +0,0 @@
[package]
name = "arch_x86_64"
version = "0.1.0"
[dependencies]
alloc_kernel = { path = "../../alloc_kernel/" }
bitflags = "0.7"
raw-cpuid = { git = "https://github.com/gz/rust-cpuid", branch = "master" }
spin = "0.4"
redox_syscall = "0.1"
[dependencies.x86]
version = "0.7"
default-features = false

View File

@@ -1,328 +0,0 @@
//! Architecture support for x86_64
//#![deny(warnings)]
#![deny(unused_must_use)]
#![feature(asm)]
#![feature(concat_idents)]
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(drop_types_in_const)]
#![feature(lang_items)]
#![feature(naked_functions)]
#![feature(thread_local)]
#![feature(unique)]
#![no_std]
extern crate alloc_kernel as allocator;
#[macro_use]
extern crate bitflags;
extern crate spin;
extern crate syscall;
pub extern crate x86;
// Because the memory map is so important to not be aliased, it is defined here, in one place
// The lower 256 PML4 entries are reserved for userspace
// Each PML4 entry references up to 512 GB of memory
// The top (511) PML4 is reserved for recursive mapping
// The second from the top (510) PML4 is reserved for the kernel
/// The size of a single PML4
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
/// Offset of recursive paging
pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
/// Offset of kernel
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
/// Offset to kernel heap
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET + PML4_SIZE/2;
/// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 256 * 1024 * 1024; // 256 MB
/// Offset to kernel percpu variables
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000;
/// Size of kernel percpu variables
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
/// Offset to user image
pub const USER_OFFSET: usize = 0;
/// Offset to user TCB
pub const USER_TCB_OFFSET: usize = 0xB000_0000;
/// Offset to user arguments
pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE/2;
/// Offset to user heap
pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE;
/// Offset to user grants
pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
/// Offset to user stack
pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE;
/// Size of user stack
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
/// Offset to user TLS
pub const USER_TLS_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
/// Offset to user temporary image (used when cloning)
pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE;
/// Offset to user temporary heap (used when cloning)
pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE;
/// Offset to user temporary page for grants
pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
/// Offset to user temporary stack (used when cloning)
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE;
/// Offset to user temporary tls (used when cloning)
pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE;
/// Offset for usage in other temporary pages
pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE;
/// Print to console
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ({
use core::fmt::Write;
let _ = write!($crate::console::CONSOLE.lock(), $($arg)*);
});
}
/// Print with new line to console
#[macro_export]
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
/// Create an interrupt function that can safely run rust code
#[macro_export]
macro_rules! interrupt {
($name:ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner() {
$func
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov rax, 0x18
mov fs, ax"
: : : : "intel", "volatile");
// Call inner rust function
inner();
// Pop scratch registers and return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
iretq"
: : : : "intel", "volatile");
}
};
}
#[allow(dead_code)]
#[repr(packed)]
pub struct InterruptStack {
fs: usize,
r11: usize,
r10: usize,
r9: usize,
r8: usize,
rsi: usize,
rdi: usize,
rdx: usize,
rcx: usize,
rax: usize,
rip: usize,
cs: usize,
rflags: usize,
}
#[macro_export]
macro_rules! interrupt_stack {
($name:ident, $stack: ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner($stack: &$crate::InterruptStack) {
$func
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov rax, 0x18
mov fs, ax"
: : : : "intel", "volatile");
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Call inner rust function
inner(&*(rsp as *const $crate::InterruptStack));
// Pop scratch registers and return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
iretq"
: : : : "intel", "volatile");
}
};
}
#[allow(dead_code)]
#[repr(packed)]
pub struct InterruptErrorStack {
fs: usize,
r11: usize,
r10: usize,
r9: usize,
r8: usize,
rsi: usize,
rdi: usize,
rdx: usize,
rcx: usize,
rax: usize,
code: usize,
rip: usize,
cs: usize,
rflags: usize,
}
#[macro_export]
macro_rules! interrupt_error {
($name:ident, $stack:ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner($stack: &$crate::InterruptErrorStack) {
$func
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov rax, 0x18
mov fs, ax"
: : : : "intel", "volatile");
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Call inner rust function
inner(&*(rsp as *const $crate::InterruptErrorStack));
// Pop scratch registers, error code, and return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
add rsp, 8
iretq"
: : : : "intel", "volatile");
}
};
}
/// ACPI table parsing
pub mod acpi;
/// Console handling
pub mod console;
/// Context switching
pub mod context;
/// Devices
pub mod device;
/// Memcpy, memmove, etc.
pub mod externs;
/// Global descriptor table
pub mod gdt;
/// Interrupt descriptor table
pub mod idt;
/// Interrupt instructions
pub mod interrupt;
/// Memory management
pub mod memory;
/// Paging
pub mod paging;
/// Panic
pub mod panic;
/// Initialization and start function
pub mod start;
/// Shutdown function
pub mod stop;
/// Time
pub mod time;

View File

@@ -57,7 +57,7 @@ impl Dsdt {
}
}
let SLP_TYPa = (data[i] as u16) << 10;
let a = (data[i] as u16) << 10;
i += 1;
if i >= data.len() {
return None;
@@ -70,9 +70,9 @@ impl Dsdt {
}
}
let SLP_TYPb = (data[i] as u16) << 10;
let b = (data[i] as u16) << 10;
Some((SLP_TYPa, SLP_TYPb))
Some((a, b))
}
}

View File

@@ -20,13 +20,13 @@ use self::rsdt::Rsdt;
use self::sdt::Sdt;
use self::xsdt::Xsdt;
pub mod dmar;
pub mod dsdt;
pub mod fadt;
pub mod madt;
pub mod rsdt;
pub mod sdt;
pub mod xsdt;
mod dmar;
mod dsdt;
mod fadt;
mod madt;
mod rsdt;
mod sdt;
mod xsdt;
const TRAMPOLINE: usize = 0x7E00;
const AP_STARTUP: usize = TRAMPOLINE + 512;

65
src/consts.rs Normal file
View File

@@ -0,0 +1,65 @@
// Because the memory map is so important to not be aliased, it is defined here, in one place
// The lower 256 PML4 entries are reserved for userspace
// Each PML4 entry references up to 512 GB of memory
// The top (511) PML4 is reserved for recursive mapping
// The second from the top (510) PML4 is reserved for the kernel
/// The size of a single PML4
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
/// Offset of recursive paging
pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
/// Offset of kernel
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
/// Offset to kernel heap
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET + PML4_SIZE/2;
/// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 256 * 1024 * 1024; // 256 MB
/// Offset to kernel percpu variables
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000;
/// Size of kernel percpu variables
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
/// Offset to user image
pub const USER_OFFSET: usize = 0;
/// Offset to user TCB
pub const USER_TCB_OFFSET: usize = 0xB000_0000;
/// Offset to user arguments
pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE/2;
/// Offset to user heap
pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE;
/// Offset to user grants
pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
/// Offset to user stack
pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE;
/// Size of user stack
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
/// Offset to user TLS
pub const USER_TLS_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
/// Offset to user temporary image (used when cloning)
pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE;
/// Offset to user temporary heap (used when cloning)
pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE;
/// Offset to user temporary page for grants
pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
/// Offset to user temporary stack (used when cloning)
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE;
/// Offset to user temporary tls (used when cloning)
pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE;
/// Offset for usage in other temporary pages
pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE;

View File

@@ -3,9 +3,10 @@ use alloc::boxed::Box;
use collections::{BTreeMap, Vec, VecDeque};
use spin::Mutex;
use arch;
use context::arch;
use context::file::File;
use context::memory::{Grant, Memory, SharedMemory, Tls};
use device;
use scheme::{SchemeNamespace, FileHandle};
use syscall::data::Event;
use sync::{WaitMap, WaitQueue};
@@ -57,7 +58,7 @@ pub struct Context {
/// Context should wake up at specified time
pub wake: Option<(u64, u64)>,
/// The architecture specific context
pub arch: arch::context::Context,
pub arch: arch::Context,
/// Kernel FX - used to store SIMD and FPU registers on context switch
pub kfx: Option<Box<[u8]>>,
/// Kernel stack
@@ -102,7 +103,7 @@ impl Context {
waitpid: Arc::new(WaitMap::new()),
pending: VecDeque::new(),
wake: None,
arch: arch::context::Context::new(),
arch: arch::Context::new(),
kfx: None,
kstack: None,
image: Vec::new(),
@@ -206,7 +207,7 @@ impl Context {
if cpu_id != ::cpu_id() {
// Send IPI if not on current CPU
// TODO: Make this more architecture independent
unsafe { arch::device::local_apic::LOCAL_APIC.ipi(cpu_id) };
unsafe { device::local_apic::LOCAL_APIC.ipi(cpu_id) };
}
}
true

View File

@@ -3,9 +3,9 @@ use alloc::boxed::Box;
use collections::BTreeMap;
use core::mem;
use core::sync::atomic::Ordering;
use paging;
use spin::RwLock;
use arch;
use syscall::error::{Result, Error, EAGAIN};
use super::context::{Context, ContextId};
@@ -76,7 +76,7 @@ impl ContextList {
let func_ptr = stack.as_mut_ptr().offset(offset as isize);
*(func_ptr as *mut usize) = func as usize;
}
context.arch.set_page_table(unsafe { arch::paging::ActivePageTable::new().address() });
context.arch.set_page_table(unsafe { paging::ActivePageTable::new().address() });
context.arch.set_fx(fx.as_ptr() as usize);
context.arch.set_stack(stack.as_ptr() as usize + offset);
context.kfx = Some(fx);

View File

@@ -3,11 +3,11 @@ use collections::VecDeque;
use core::intrinsics;
use spin::Mutex;
use arch::memory::Frame;
use arch::paging::{ActivePageTable, InactivePageTable, Page, PageIter, PhysicalAddress, VirtualAddress};
use arch::paging::entry::{self, EntryFlags};
use arch::paging::mapper::MapperFlushAll;
use arch::paging::temporary_page::TemporaryPage;
use memory::Frame;
use paging::{ActivePageTable, InactivePageTable, Page, PageIter, PhysicalAddress, VirtualAddress};
use paging::entry::{self, EntryFlags};
use paging::mapper::MapperFlushAll;
use paging::temporary_page::TemporaryPage;
#[derive(Debug)]
pub struct Grant {

View File

@@ -8,6 +8,9 @@ pub use self::list::ContextList;
pub use self::switch::switch;
pub use context::context::ContextId;
#[path = "arch/x86_64.rs"]
mod arch;
/// Context struct
mod context;

View File

@@ -1,8 +1,10 @@
use core::sync::atomic::Ordering;
use arch;
use context::{contexts, Context, Status, CONTEXT_ID};
use context::{arch, contexts, Context, Status, CONTEXT_ID};
use gdt;
use interrupt;
use syscall;
use time;
/// Switch to the next context
///
@@ -13,8 +15,8 @@ pub unsafe fn switch() -> bool {
use core::ops::DerefMut;
// Set the global lock to avoid the unsafe operations below from causing issues
while arch::context::CONTEXT_SWITCH_LOCK.compare_and_swap(false, true, Ordering::SeqCst) {
arch::interrupt::pause();
while arch::CONTEXT_SWITCH_LOCK.compare_and_swap(false, true, Ordering::SeqCst) {
interrupt::pause();
}
let cpu_id = ::cpu_id();
@@ -43,7 +45,7 @@ pub unsafe fn switch() -> bool {
if context.status == Status::Blocked && context.wake.is_some() {
let wake = context.wake.expect("context::switch: wake not set");
let current = arch::time::monotonic();
let current = time::monotonic();
if current.0 > wake.0 || (current.0 == wake.0 && current.1 >= wake.1) {
context.wake = None;
context.unblock();
@@ -86,19 +88,19 @@ pub unsafe fn switch() -> bool {
if to_ptr as usize == 0 {
// Unset global lock if no context found
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
arch::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
return false;
}
(&mut *from_ptr).running = false;
(&mut *to_ptr).running = true;
if let Some(ref stack) = (*to_ptr).kstack {
arch::gdt::TSS.rsp[0] = (stack.as_ptr() as usize + stack.len() - 256) as u64;
gdt::TSS.rsp[0] = (stack.as_ptr() as usize + stack.len() - 256) as u64;
}
CONTEXT_ID.store((&mut *to_ptr).id, Ordering::SeqCst);
// Unset global lock before switch, as arch is only usable by the current CPU at this time
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
arch::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
if let Some(sig) = to_sig {
println!("Handle {}", sig);

View File

@@ -81,23 +81,6 @@ bitflags! {
}
}
#[repr(packed)]
pub struct IdtDescriptor {
size: u16,
offset: u64
}
impl IdtDescriptor {
pub fn set_slice(&mut self, slice: &'static [IdtEntry]) {
self.size = (slice.len() * mem::size_of::<IdtEntry>() - 1) as u16;
self.offset = slice.as_ptr() as u64;
}
pub unsafe fn load(&self) {
asm!("lidt [rax]" : : "{rax}"(self as *const _ as usize) : : "intel", "volatile");
}
}
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct IdtEntry {

View File

@@ -12,31 +12,21 @@
#![feature(drop_types_in_const)]
#![feature(heap_api)]
#![feature(integer_atomics)]
#![feature(lang_items)]
#![feature(naked_functions)]
#![feature(never_type)]
#![feature(thread_local)]
#![feature(unique)]
#![no_std]
use arch::interrupt;
/// Architecture specific items (test)
#[cfg(test)]
#[macro_use]
extern crate arch_test as arch;
/// Architecture specific items (ARM)
#[cfg(all(not(test), target_arch = "arm"))]
#[macro_use]
extern crate arch_arm as arch;
/// Architecture specific items (x86_64)
#[cfg(all(not(test), target_arch = "x86_64"))]
#[macro_use]
extern crate arch_x86_64 as arch;
extern crate alloc_kernel as allocator;
pub extern crate x86;
extern crate alloc;
#[macro_use]
extern crate collections;
#[macro_use]
extern crate bitflags;
extern crate goblin;
extern crate spin;
@@ -44,25 +34,73 @@ extern crate spin;
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use scheme::FileHandle;
pub use consts::*;
#[macro_use]
/// Shared data structures
pub mod common;
/// Macros like print, println, and interrupt
#[macro_use]
pub mod macros;
/// Constants like memory locations
pub mod consts;
/// ACPI table parsing
mod acpi;
/// Console handling
pub mod console;
/// Context management
pub mod context;
/// Devices
pub mod device;
/// ELF file parsing
pub mod elf;
/// External functions
pub mod externs;
/// Global descriptor table
pub mod gdt;
/// Interrupt descriptor table
mod idt;
/// Interrupt instructions
pub mod interrupt;
/// Memory management
pub mod memory;
/// Paging
pub mod paging;
/// Panic
pub mod panic;
/// Schemes, filesystem handlers
pub mod scheme;
/// Initialization and start function
pub mod start;
/// Shutdown function
pub mod stop;
/// Synchronization primitives
pub mod sync;
/// Syscall handlers
pub mod syscall;
/// Time
pub mod time;
/// Tests
#[cfg(test)]
pub mod tests;
@@ -100,20 +138,6 @@ pub extern fn userspace_init() {
panic!("init returned");
}
/// Allow exception handlers to send signal to arch-independant kernel
#[no_mangle]
pub extern fn ksignal(signal: usize) {
println!("SIGNAL {}, CPU {}, PID {:?}", signal, cpu_id(), context::context_id());
{
let contexts = context::contexts();
if let Some(context_lock) = contexts.current() {
let context = context_lock.read();
println!("NAME {}", unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) });
}
}
syscall::exit(signal & 0x7F);
}
/// This is the kernel entry point for the primary CPU. The arch crate is responsible for calling this
#[no_mangle]
pub extern fn kmain(cpus: usize) {
@@ -150,14 +174,7 @@ pub extern fn kmain(cpus: usize) {
/// This is the main kernel entry point for secondary CPUs
#[no_mangle]
pub extern fn kmain_ap(_id: usize) {
// Disable APs for now
loop {
unsafe { interrupt::disable(); }
unsafe { interrupt::halt(); }
}
/*
pub extern fn kmain_ap(id: usize) {
CPU_ID.store(id, Ordering::SeqCst);
context::init();
@@ -176,5 +193,18 @@ pub extern fn kmain_ap(_id: usize) {
}
}
}
*/
}
/// Allow exception handlers to send signal to arch-independant kernel
#[no_mangle]
pub extern fn ksignal(signal: usize) {
println!("SIGNAL {}, CPU {}, PID {:?}", signal, cpu_id(), context::context_id());
{
let contexts = context::contexts();
if let Some(context_lock) = contexts.current() {
let context = context_lock.read();
println!("NAME {}", unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) });
}
}
syscall::exit(signal & 0x7F);
}

198
src/macros.rs Normal file
View File

@@ -0,0 +1,198 @@
/// Print to console
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ({
use core::fmt::Write;
let _ = write!($crate::console::CONSOLE.lock(), $($arg)*);
});
}
/// Print with new line to console
#[macro_export]
macro_rules! println {
() => (print!("\n"));
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
/// Create an interrupt function that can safely run rust code
#[macro_export]
macro_rules! interrupt {
($name:ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner() {
$func
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov rax, 0x18
mov fs, ax"
: : : : "intel", "volatile");
// Call inner rust function
inner();
// Pop scratch registers and return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
iretq"
: : : : "intel", "volatile");
}
};
}
#[allow(dead_code)]
#[repr(packed)]
pub struct InterruptStack {
pub fs: usize,
pub r11: usize,
pub r10: usize,
pub r9: usize,
pub r8: usize,
pub rsi: usize,
pub rdi: usize,
pub rdx: usize,
pub rcx: usize,
pub rax: usize,
pub rip: usize,
pub cs: usize,
pub rflags: usize,
}
#[macro_export]
macro_rules! interrupt_stack {
($name:ident, $stack: ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner($stack: &$crate::macros::InterruptStack) {
$func
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov rax, 0x18
mov fs, ax"
: : : : "intel", "volatile");
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Call inner rust function
inner(&*(rsp as *const $crate::macros::InterruptStack));
// Pop scratch registers and return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
iretq"
: : : : "intel", "volatile");
}
};
}
#[allow(dead_code)]
#[repr(packed)]
pub struct InterruptErrorStack {
pub fs: usize,
pub r11: usize,
pub r10: usize,
pub r9: usize,
pub r8: usize,
pub rsi: usize,
pub rdi: usize,
pub rdx: usize,
pub rcx: usize,
pub rax: usize,
pub code: usize,
pub rip: usize,
pub cs: usize,
pub rflags: usize,
}
#[macro_export]
macro_rules! interrupt_error {
($name:ident, $stack:ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner($stack: &$crate::macros::InterruptErrorStack) {
$func
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov rax, 0x18
mov fs, ax"
: : : : "intel", "volatile");
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Call inner rust function
inner(&*(rsp as *const $crate::macros::InterruptErrorStack));
// Pop scratch registers, error code, and return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
add rsp, 8
iretq"
: : : : "intel", "volatile");
}
};
}

View File

@@ -6,7 +6,7 @@ use paging::PhysicalAddress;
use super::{Frame, FrameAllocator, MemoryArea, MemoryAreaIter};
pub struct AreaFrameAllocator {
pub struct BumpAllocator {
next_free_frame: Frame,
current_area: Option<&'static MemoryArea>,
areas: MemoryAreaIter,
@@ -14,9 +14,9 @@ pub struct AreaFrameAllocator {
kernel_end: Frame
}
impl AreaFrameAllocator {
pub fn new(kernel_start: usize, kernel_end: usize, memory_areas: MemoryAreaIter) -> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
impl BumpAllocator {
pub fn new(kernel_start: usize, kernel_end: usize, memory_areas: MemoryAreaIter) -> BumpAllocator {
let mut allocator = BumpAllocator {
next_free_frame: Frame::containing_address(PhysicalAddress::new(0)),
current_area: None,
areas: memory_areas,
@@ -42,7 +42,7 @@ impl AreaFrameAllocator {
}
}
impl FrameAllocator for AreaFrameAllocator {
impl FrameAllocator for BumpAllocator {
fn free_frames(&self) -> usize {
let mut count = 0;
@@ -121,7 +121,7 @@ impl FrameAllocator for AreaFrameAllocator {
}
}
fn deallocate_frames(&mut self, frame: Frame, count: usize) {
//panic!("AreaFrameAllocator::deallocate_frame: not supported: {:?}", frame);
fn deallocate_frames(&mut self, _frame: Frame, _count: usize) {
//panic!("BumpAllocator::deallocate_frame: not supported: {:?}", frame);
}
}

View File

@@ -3,11 +3,11 @@
pub use paging::{PAGE_SIZE, PhysicalAddress};
use self::area_frame_allocator::AreaFrameAllocator;
use self::bump::BumpAllocator;
use spin::Mutex;
pub mod area_frame_allocator;
pub mod bump;
/// The current memory map. It's size is maxed out to 512 entries, due to it being
/// from 0x500 to 0x5000 (800 is the absolute total)
@@ -64,7 +64,7 @@ impl Iterator for MemoryAreaIter {
}
}
static ALLOCATOR: Mutex<Option<AreaFrameAllocator>> = Mutex::new(None);
static ALLOCATOR: Mutex<Option<BumpAllocator>> = Mutex::new(None);
/// Init memory module
/// Must be called once, and only once,
@@ -77,17 +77,7 @@ pub unsafe fn init(kernel_start: usize, kernel_end: usize) {
}
}
*ALLOCATOR.lock() = Some(AreaFrameAllocator::new(kernel_start, kernel_end, MemoryAreaIter::new(MEMORY_AREA_FREE)));
}
/// Allocate a frame
pub fn allocate_frame() -> Option<Frame> {
allocate_frames(1)
}
/// Deallocate a frame
pub fn deallocate_frame(frame: Frame) {
deallocate_frames(frame, 1)
*ALLOCATOR.lock() = Some(BumpAllocator::new(kernel_start, kernel_end, MemoryAreaIter::new(MEMORY_AREA_FREE)));
}
/// Get the number of frames available

View File

@@ -1,7 +1,7 @@
use core::mem;
use core::ptr::Unique;
use memory::{allocate_frame, deallocate_frame, Frame};
use memory::{allocate_frames, deallocate_frames, Frame};
use super::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress};
use super::entry::{self, EntryFlags};
@@ -113,7 +113,7 @@ impl Mapper {
/// Map a page to the next free frame
pub fn map(&mut self, page: Page, flags: EntryFlags) -> MapperFlush {
let frame = allocate_frame().expect("out of frames");
let frame = allocate_frames(1).expect("out of frames");
self.map_to(page, frame, flags)
}
@@ -143,7 +143,7 @@ impl Mapper {
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
// TODO free p(1,2,3) table if empty
deallocate_frame(frame);
deallocate_frames(frame, 1);
MapperFlush::new(page)
}

View File

@@ -1,11 +1,11 @@
//! # Paging
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::mem;
use core::{mem, ptr};
use core::ops::{Deref, DerefMut};
use x86::{msr, tlb};
use memory::{allocate_frame, Frame};
use memory::{allocate_frames, Frame};
use self::entry::{EntryFlags, PRESENT, GLOBAL, WRITABLE, NO_EXECUTE};
use self::mapper::Mapper;
@@ -67,8 +67,8 @@ unsafe fn init_tcb(cpu_id: usize) -> usize {
let end = start + size;
tcb_offset = end - mem::size_of::<usize>();
::externs::memcpy(start as *mut u8, & __tdata_start as *const u8, tbss_offset);
::externs::memset((start + tbss_offset) as *mut u8, 0, size - tbss_offset);
ptr::copy(& __tdata_start as *const u8, start as *mut u8, tbss_offset);
ptr::write_bytes((start + tbss_offset) as *mut u8, 0, size - tbss_offset);
*(tcb_offset as *mut usize) = end;
}
@@ -113,7 +113,7 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_MISC_OFFSET)));
let mut new_table = {
let frame = allocate_frame().expect("no more frames in paging::init new_table");
let frame = allocate_frames(1).expect("no more frames in paging::init new_table");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};

View File

@@ -4,7 +4,7 @@
use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
use memory::allocate_frame;
use memory::allocate_frames;
use super::entry::*;
use super::ENTRY_COUNT;
@@ -65,7 +65,7 @@ impl<L> Table<L> where L: HierarchicalLevel {
if self.next_table(index).is_none() {
assert!(!self[index].flags().contains(HUGE_PAGE),
"next_table_create does not support huge pages");
let frame = allocate_frame().expect("no frames available");
let frame = allocate_frames(1).expect("no frames available");
self[index].set(frame, PRESENT | WRITABLE | USER_ACCESSIBLE /* Allow users to go down the page table, implement permissions at the page level */);
self.next_table_mut(index).unwrap().zero();
}

View File

@@ -2,7 +2,7 @@ use core::{mem, str};
use core::sync::atomic::Ordering;
use spin::Mutex;
use arch::interrupt::irq::acknowledge;
use interrupt::irq::acknowledge;
use context;
use scheme::{AtomicSchemeId, ATOMIC_SCHEMEID_INIT, SchemeId};
use syscall::error::*;

View File

@@ -1,4 +1,4 @@
use arch::memory::{free_frames, used_frames};
use memory::{free_frames, used_frames};
use syscall::data::StatVfs;
use syscall::error::*;

View File

@@ -1,6 +1,6 @@
use collections::Vec;
use arch::device::cpu::cpu_info;
use device::cpu::cpu_info;
use syscall::error::{Error, EIO, Result};
pub fn resource() -> Result<Vec<u8>> {

View File

@@ -4,9 +4,8 @@ use core::sync::atomic::{AtomicU64, Ordering};
use core::{mem, slice, usize};
use spin::{Mutex, RwLock};
use arch;
use arch::paging::{InactivePageTable, Page, VirtualAddress, entry};
use arch::paging::temporary_page::TemporaryPage;
use paging::{InactivePageTable, Page, VirtualAddress, entry};
use paging::temporary_page::TemporaryPage;
use context::{self, Context};
use context::memory::Grant;
use scheme::{AtomicSchemeId, ATOMIC_SCHEMEID_INIT, SchemeId};
@@ -91,12 +90,12 @@ impl UserInner {
let mut grants = context.grants.lock();
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(arch::USER_TMP_GRANT_OFFSET)));
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_GRANT_OFFSET)));
let from_address = (address/4096) * 4096;
let offset = address - from_address;
let full_size = ((offset + size + 4095)/4096) * 4096;
let mut to_address = arch::USER_GRANT_OFFSET;
let mut to_address = ::USER_GRANT_OFFSET;
let mut flags = entry::PRESENT | entry::NO_EXECUTE | entry::USER_ACCESSIBLE;
if writable {
@@ -146,7 +145,7 @@ impl UserInner {
let mut grants = context.grants.lock();
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(arch::USER_TMP_GRANT_OFFSET)));
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_GRANT_OFFSET)));
for i in 0 .. grants.len() {
let start = grants[i].start_address().get();

View File

@@ -3,12 +3,12 @@
/// It must create the IDT with the correct entries, those entries are
/// defined in other files inside of the `arch` module
use core::ptr;
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use acpi;
use allocator;
use device;
use externs::memset;
use gdt;
use idt;
use interrupt;
@@ -58,7 +58,7 @@ pub unsafe extern fn kstart() -> ! {
if start_ptr as usize <= end_ptr {
let size = end_ptr - start_ptr as usize;
memset(start_ptr, 0, size);
ptr::write_bytes(start_ptr, 0, size);
}
assert_eq!(BSS_TEST_ZERO, 0);
@@ -113,7 +113,7 @@ pub unsafe extern fn kstart() -> ! {
// Init the allocator
allocator::init(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE);
}
// Initialize devices
device::init(&mut active_table);

View File

@@ -1,6 +1,5 @@
use arch;
use arch::memory::{allocate_frames, deallocate_frames, Frame};
use arch::paging::{entry, ActivePageTable, PhysicalAddress, VirtualAddress};
use memory::{allocate_frames, deallocate_frames, Frame};
use paging::{entry, ActivePageTable, PhysicalAddress, VirtualAddress};
use context;
use context::memory::Grant;
use syscall::error::{Error, EFAULT, ENOMEM, EPERM, ESRCH, Result};
@@ -54,7 +53,7 @@ pub fn physmap(physical_address: usize, size: usize, flags: usize) -> Result<usi
let from_address = (physical_address/4096) * 4096;
let offset = physical_address - from_address;
let full_size = ((offset + size + 4095)/4096) * 4096;
let mut to_address = arch::USER_GRANT_OFFSET;
let mut to_address = ::USER_GRANT_OFFSET;
let mut entry_flags = entry::PRESENT | entry::NO_EXECUTE | entry::USER_ACCESSIBLE;
if flags & MAP_WRITE == MAP_WRITE {

View File

@@ -2,7 +2,7 @@
extern crate syscall;
pub use self::syscall::{data, error, flag, number, scheme};
pub use self::syscall::{data, error, flag, io, number, scheme};
pub use self::driver::*;
pub use self::fs::*;

View File

@@ -6,11 +6,11 @@ use core::{intrinsics, mem, str};
use core::ops::DerefMut;
use spin::Mutex;
use arch;
use arch::memory::allocate_frame;
use arch::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, entry};
use arch::paging::temporary_page::TemporaryPage;
use arch::start::usermode;
use memory::allocate_frames;
use paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, entry};
use paging::temporary_page::TemporaryPage;
use start::usermode;
use interrupt;
use context;
use context::ContextId;
use elf::{self, program_header};
@@ -40,11 +40,11 @@ pub fn brk(address: usize) -> Result<usize> {
if address == 0 {
//println!("Brk query {:X}", current);
Ok(current)
} else if address >= arch::USER_HEAP_OFFSET {
} else if address >= ::USER_HEAP_OFFSET {
//TODO: out of memory errors
if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
heap.resize(address - arch::USER_HEAP_OFFSET, true);
heap.resize(address - ::USER_HEAP_OFFSET, true);
});
} else {
panic!("user heap not initialized");
@@ -118,7 +118,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
unsafe {
let func_ptr = new_stack.as_mut_ptr().offset(offset as isize);
*(func_ptr as *mut usize) = arch::interrupt::syscall::clone_ret as usize;
*(func_ptr as *mut usize) = interrupt::syscall::clone_ret as usize;
}
kstack_option = Some(new_stack);
@@ -136,7 +136,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
for memory_shared in context.image.iter() {
memory_shared.with(|memory| {
let mut new_memory = context::memory::Memory::new(
VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET),
VirtualAddress::new(memory.start_address().get() + ::USER_TMP_OFFSET),
memory.size(),
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
false
@@ -156,7 +156,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
let mut new_heap = context::memory::Memory::new(
VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET),
VirtualAddress::new(::USER_TMP_HEAP_OFFSET),
heap.size(),
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
false
@@ -176,7 +176,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
if let Some(ref stack) = context.stack {
let mut new_stack = context::memory::Memory::new(
VirtualAddress::new(arch::USER_TMP_STACK_OFFSET),
VirtualAddress::new(::USER_TMP_STACK_OFFSET),
stack.size(),
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
false
@@ -197,7 +197,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
master: tls.master,
file_size: tls.file_size,
mem: context::memory::Memory::new(
VirtualAddress::new(arch::USER_TMP_TLS_OFFSET),
VirtualAddress::new(::USER_TMP_TLS_OFFSET),
tls.mem.size(),
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
true
@@ -321,10 +321,10 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
let mut active_table = unsafe { ActivePageTable::new() };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(arch::USER_TMP_MISC_OFFSET)));
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_MISC_OFFSET)));
let mut new_table = {
let frame = allocate_frame().expect("no more frames in syscall::clone new_table");
let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
@@ -393,7 +393,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
let size = unsafe { & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize };
let start = arch::KERNEL_PERCPU_OFFSET + arch::KERNEL_PERCPU_SIZE * cpu_id;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
@@ -411,7 +411,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
// Move copy of image
for memory_shared in image.iter_mut() {
memory_shared.with(|memory| {
let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET);
let start = VirtualAddress::new(memory.start_address().get() - ::USER_TMP_OFFSET + ::USER_OFFSET);
memory.move_to(start, &mut new_table, &mut temporary_page);
});
}
@@ -420,7 +420,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
// Move copy of heap
if let Some(heap_shared) = heap_option {
heap_shared.with(|heap| {
heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page);
heap.move_to(VirtualAddress::new(::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page);
});
context.heap = Some(heap_shared);
}
@@ -428,13 +428,13 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
// Setup user stack
if let Some(mut stack) = stack_option {
stack.move_to(VirtualAddress::new(arch::USER_STACK_OFFSET), &mut new_table, &mut temporary_page);
stack.move_to(VirtualAddress::new(::USER_STACK_OFFSET), &mut new_table, &mut temporary_page);
context.stack = Some(stack);
}
// Setup user TLS
if let Some(mut tls) = tls_option {
tls.mem.move_to(VirtualAddress::new(arch::USER_TLS_OFFSET), &mut new_table, &mut temporary_page);
tls.mem.move_to(VirtualAddress::new(::USER_TLS_OFFSET), &mut new_table, &mut temporary_page);
context.tls = Some(tls);
}
@@ -478,7 +478,7 @@ fn empty(context: &mut context::Context, reaping: bool) {
println!("{}: {}: Grant should not exist: {:?}", context.id.into(), unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }, grant);
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(arch::USER_TMP_GRANT_OFFSET)));
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_GRANT_OFFSET)));
grant.unmap_inactive(&mut new_table, &mut temporary_page);
} else {
@@ -490,7 +490,7 @@ fn empty(context: &mut context::Context, reaping: bool) {
pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
let entry;
let mut sp = arch::USER_STACK_OFFSET + arch::USER_STACK_SIZE - 256;
let mut sp = ::USER_STACK_OFFSET + ::USER_STACK_SIZE - 256;
{
let mut args = Vec::new();
@@ -592,13 +592,13 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
context.image.push(memory.to_shared());
} else if segment.p_type == program_header::PT_TLS {
let memory = context::memory::Memory::new(
VirtualAddress::new(arch::USER_TCB_OFFSET),
VirtualAddress::new(::USER_TCB_OFFSET),
4096,
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
true
);
unsafe { *(arch::USER_TCB_OFFSET as *mut usize) = arch::USER_TLS_OFFSET + segment.p_memsz as usize; }
unsafe { *(::USER_TCB_OFFSET as *mut usize) = ::USER_TLS_OFFSET + segment.p_memsz as usize; }
context.image.push(memory.to_shared());
@@ -612,7 +612,7 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
// Map heap
context.heap = Some(context::memory::Memory::new(
VirtualAddress::new(arch::USER_HEAP_OFFSET),
VirtualAddress::new(::USER_HEAP_OFFSET),
0,
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
true
@@ -620,8 +620,8 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
// Map stack
context.stack = Some(context::memory::Memory::new(
VirtualAddress::new(arch::USER_STACK_OFFSET),
arch::USER_STACK_SIZE,
VirtualAddress::new(::USER_STACK_OFFSET),
::USER_STACK_SIZE,
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
true
));
@@ -632,7 +632,7 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
master: master,
file_size: file_size,
mem: context::memory::Memory::new(
VirtualAddress::new(arch::USER_TLS_OFFSET),
VirtualAddress::new(::USER_TLS_OFFSET),
size,
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
true
@@ -653,7 +653,7 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
let mut arg_size = 0;
for arg in args.iter().rev() {
sp -= mem::size_of::<usize>();
unsafe { *(sp as *mut usize) = arch::USER_ARG_OFFSET + arg_size; }
unsafe { *(sp as *mut usize) = ::USER_ARG_OFFSET + arg_size; }
sp -= mem::size_of::<usize>();
unsafe { *(sp as *mut usize) = arg.len(); }
@@ -665,7 +665,7 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
if arg_size > 0 {
let mut memory = context::memory::Memory::new(
VirtualAddress::new(arch::USER_ARG_OFFSET),
VirtualAddress::new(::USER_ARG_OFFSET),
arg_size,
entry::NO_EXECUTE | entry::WRITABLE,
true
@@ -675,7 +675,7 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
for arg in args.iter().rev() {
unsafe {
intrinsics::copy(arg.as_ptr(),
(arch::USER_ARG_OFFSET + arg_offset) as *mut u8,
(::USER_ARG_OFFSET + arg_offset) as *mut u8,
arg.len());
}
@@ -916,7 +916,7 @@ fn reap(pid: ContextId) -> Result<ContextId> {
running = context.running;
}
arch::interrupt::pause();
interrupt::pause();
}
let mut contexts = context::contexts_mut();

View File

@@ -1,4 +1,4 @@
use arch;
use time;
use context;
use syscall::data::TimeSpec;
use syscall::error::*;
@@ -7,13 +7,13 @@ use syscall::flag::{CLOCK_REALTIME, CLOCK_MONOTONIC};
pub fn clock_gettime(clock: usize, time: &mut TimeSpec) -> Result<usize> {
match clock {
CLOCK_REALTIME => {
let arch_time = arch::time::realtime();
let arch_time = time::realtime();
time.tv_sec = arch_time.0 as i64;
time.tv_nsec = arch_time.1 as i32;
Ok(0)
},
CLOCK_MONOTONIC => {
let arch_time = arch::time::monotonic();
let arch_time = time::monotonic();
time.tv_sec = arch_time.0 as i64;
time.tv_nsec = arch_time.1 as i32;
Ok(0)
@@ -23,7 +23,7 @@ pub fn clock_gettime(clock: usize, time: &mut TimeSpec) -> Result<usize> {
}
pub fn nanosleep(req: &TimeSpec, rem_opt: Option<&mut TimeSpec>) -> Result<usize> {
let start = arch::time::monotonic();
let start = time::monotonic();
let sum = start.1 + req.tv_nsec as u64;
let end = (start.0 + req.tv_sec as u64 + sum / 1000000000, sum % 1000000000);
@@ -39,7 +39,7 @@ pub fn nanosleep(req: &TimeSpec, rem_opt: Option<&mut TimeSpec>) -> Result<usize
unsafe { context::switch(); }
if let Some(mut rem) = rem_opt {
//TODO let current = arch::time::monotonic();
//TODO let current = time::monotonic();
rem.tv_sec = 0;
rem.tv_nsec = 0;
}

View File

@@ -1,6 +1,6 @@
use core::{mem, slice};
use arch::paging::{ActivePageTable, Page, VirtualAddress, entry};
use paging::{ActivePageTable, Page, VirtualAddress, entry};
use syscall::error::*;
fn validate(address: usize, size: usize, flags: entry::EntryFlags) -> Result<()> {