Switch to 2018 edition

Most of this was generated by the absolutely extraordinary `cargo fix`
subcommand. There were still 2 errors and a few warnings to patch up,
but compared to the normal 600+ errors, I'd say the fixer did a damn
good job! I'm also amazed that I could still start the VM after this,
I half expected some kinds of runtime failure...
This commit is contained in:
jD91mZM2
2019-06-21 11:09:32 +02:00
parent 1be77c2ab4
commit fe705d9b63
73 changed files with 374 additions and 368 deletions

View File

@@ -2,6 +2,7 @@
name = "kernel"
version = "0.1.54"
build = "build.rs"
edition = "2018"
[lib]
name = "kernel"

View File

@@ -3,7 +3,7 @@ use core::ptr::NonNull;
use linked_list_allocator::Heap;
use spin::Mutex;
use paging::ActivePageTable;
use crate::paging::ActivePageTable;
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
@@ -32,10 +32,10 @@ unsafe impl GlobalAlloc for Allocator {
panic!("__rust_allocate: heap not initialized");
};
super::map_heap(&mut ActivePageTable::new(), ::KERNEL_HEAP_OFFSET + size, ::KERNEL_HEAP_SIZE);
super::map_heap(&mut ActivePageTable::new(), crate::KERNEL_HEAP_OFFSET + size, crate::KERNEL_HEAP_SIZE);
if let Some(ref mut heap) = *HEAP.lock() {
heap.extend(::KERNEL_HEAP_SIZE);
heap.extend(crate::KERNEL_HEAP_SIZE);
} else {
panic!("__rust_allocate: heap not initialized");
}

View File

@@ -1,6 +1,6 @@
use paging::{ActivePageTable, Page, VirtualAddress};
use paging::entry::EntryFlags;
use paging::mapper::MapperFlushAll;
use crate::paging::{ActivePageTable, Page, VirtualAddress};
use crate::paging::entry::EntryFlags;
use crate::paging::mapper::MapperFlushAll;
#[cfg(not(feature="slab"))]
pub use self::linked_list::Allocator;
@@ -28,8 +28,8 @@ unsafe fn map_heap(active_table: &mut ActivePageTable, offset: usize, size: usiz
}
pub unsafe fn init(active_table: &mut ActivePageTable) {
let offset = ::KERNEL_HEAP_OFFSET;
let size = ::KERNEL_HEAP_SIZE;
let offset = crate::KERNEL_HEAP_OFFSET;
let size = crate::KERNEL_HEAP_SIZE;
// Map heap pages
map_heap(active_table, offset, size);

View File

@@ -3,12 +3,12 @@ use core::fmt;
use spin::Mutex;
use spin::MutexGuard;
use log::{LOG, Log};
use crate::log::{LOG, Log};
#[cfg(feature = "qemu_debug")]
use syscall::io::Io;
use syscall::io::Pio;
use crate::syscall::io::Pio;
#[cfg(feature = "serial_debug")]
use devices::uart_16550::SerialPort;
use crate::devices::uart_16550::SerialPort;
#[cfg(feature = "graphical_debug")]
use super::graphical_debug::{DEBUG_DISPLAY, DebugDisplay};

View File

@@ -2,9 +2,9 @@ use core::intrinsics::{volatile_load, volatile_store};
use x86::shared::cpuid::CpuId;
use x86::shared::msr::*;
use memory::Frame;
use paging::{ActivePageTable, PhysicalAddress, Page, VirtualAddress};
use paging::entry::EntryFlags;
use crate::memory::Frame;
use crate::paging::{ActivePageTable, PhysicalAddress, Page, VirtualAddress};
use crate::paging::entry::EntryFlags;
pub static mut LOCAL_APIC: LocalApic = LocalApic {
address: 0,
@@ -27,12 +27,12 @@ pub struct LocalApic {
impl LocalApic {
unsafe fn init(&mut self, active_table: &mut ActivePageTable) {
self.address = (rdmsr(IA32_APIC_BASE) as usize & 0xFFFF_0000) + ::KERNEL_OFFSET;
self.address = (rdmsr(IA32_APIC_BASE) as usize & 0xFFFF_0000) + crate::KERNEL_OFFSET;
self.x2 = CpuId::new().get_feature_info().unwrap().has_x2apic();
if ! self.x2 {
let page = Page::containing_address(VirtualAddress::new(self.address));
let frame = Frame::containing_address(PhysicalAddress::new(self.address - ::KERNEL_OFFSET));
let frame = Frame::containing_address(PhysicalAddress::new(self.address - crate::KERNEL_OFFSET));
let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
result.flush(active_table);
}

View File

@@ -1,4 +1,4 @@
use paging::ActivePageTable;
use crate::paging::ActivePageTable;
pub mod cpu;
pub mod local_apic;

View File

@@ -1,4 +1,4 @@
use syscall::io::{Io, Pio};
use crate::syscall::io::{Io, Pio};
pub static mut MASTER: Pic = Pic::new(0x20);
pub static mut SLAVE: Pic = Pic::new(0xA0);

View File

@@ -1,4 +1,4 @@
use syscall::io::{Io, Pio};
use crate::syscall::io::{Io, Pio};
pub static mut CHAN0: Pio<u8> = Pio::new(0x40);
pub static mut CHAN1: Pio<u8> = Pio::new(0x41);

View File

@@ -1,5 +1,5 @@
use syscall::io::{Io, Pio};
use time;
use crate::syscall::io::{Io, Pio};
use crate::time;
pub fn init() {
let mut rtc = Rtc::new();

View File

@@ -1,5 +1,5 @@
use devices::uart_16550::SerialPort;
use syscall::io::Pio;
use crate::devices::uart_16550::SerialPort;
use crate::syscall::io::Pio;
use spin::Mutex;
pub static COM1: Mutex<SerialPort<Pio<u8>>> = Mutex::new(SerialPort::<Pio<u8>>::new(0x3F8));

View File

@@ -8,7 +8,7 @@ use x86::shared::dtables::{self, DescriptorTablePointer};
use x86::shared::segmentation::{self, SegmentDescriptor, SegmentSelector};
use x86::shared::task;
use paging::PAGE_SIZE;
use crate::paging::PAGE_SIZE;
pub const GDT_NULL: usize = 0;
pub const GDT_KERNEL_CODE: usize = 1;
@@ -94,7 +94,7 @@ pub static mut TSS: TaskStateSegment = TaskStateSegment {
};
pub unsafe fn set_tcb(pid: usize) {
GDT[GDT_USER_TLS].set_offset((::USER_TCB_OFFSET + pid * PAGE_SIZE) as u32);
GDT[GDT_USER_TLS].set_offset((crate::USER_TCB_OFFSET + pid * PAGE_SIZE) as u32);
}
#[cfg(feature = "pti")]

View File

@@ -2,8 +2,8 @@ use core::mem;
use x86::current::irq::IdtEntry as X86IdtEntry;
use x86::shared::dtables::{self, DescriptorTablePointer};
use interrupt::*;
use ipi::IpiKind;
use crate::interrupt::*;
use crate::ipi::IpiKind;
pub static mut INIT_IDTR: DescriptorTablePointer<X86IdtEntry> = DescriptorTablePointer {
limit: 0,

View File

@@ -1,5 +1,5 @@
use interrupt::stack_trace;
use syscall::flag::*;
use crate::interrupt::stack_trace;
use crate::syscall::flag::*;
extern {
fn ksignal(signal: usize);

View File

@@ -1,8 +1,8 @@
use core::sync::atomic::Ordering;
use x86::shared::tlb;
use context;
use device::local_apic::LOCAL_APIC;
use crate::context;
use crate::device::local_apic::LOCAL_APIC;
use super::irq::PIT_TICKS;
interrupt!(wakeup, {

View File

@@ -1,12 +1,12 @@
use core::sync::atomic::{AtomicUsize, Ordering};
use context;
use context::timeout;
use device::pic;
use device::serial::{COM1, COM2};
use ipi::{ipi, IpiKind, IpiTarget};
use scheme::debug::debug_input;
use time;
use crate::context;
use crate::context::timeout;
use crate::device::pic;
use crate::device::serial::{COM1, COM2};
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::scheme::debug::debug_input;
use crate::time;
//resets to 0 in context::switch()
pub static PIT_TICKS: AtomicUsize = AtomicUsize::new(0);

View File

@@ -1,5 +1,5 @@
use arch::{gdt, pti};
use syscall;
use crate::arch::{gdt, pti};
use crate::syscall;
use x86::shared::msr;
pub unsafe fn init() {

View File

@@ -2,7 +2,7 @@ use core::{mem, str};
use goblin::elf::sym;
use rustc_demangle::demangle;
use paging::{ActivePageTable, VirtualAddress};
use crate::paging::{ActivePageTable, VirtualAddress};
/// Get a stack trace
//TODO: Check for stack being mapped before dereferencing
@@ -43,10 +43,10 @@ pub unsafe fn symbol_trace(addr: usize) {
use core::slice;
use core::sync::atomic::Ordering;
use elf::Elf;
use start::{KERNEL_BASE, KERNEL_SIZE};
use crate::elf::Elf;
use crate::start::{KERNEL_BASE, KERNEL_SIZE};
let kernel_ptr = (KERNEL_BASE.load(Ordering::SeqCst) + ::KERNEL_OFFSET) as *const u8;
let kernel_ptr = (KERNEL_BASE.load(Ordering::SeqCst) + crate::KERNEL_OFFSET) as *const u8;
let kernel_slice = slice::from_raw_parts(kernel_ptr, KERNEL_SIZE.load(Ordering::SeqCst));
if let Ok(elf) = Elf::from(kernel_slice) {
let mut strtab_opt = None;

View File

@@ -1,7 +1,7 @@
//! # Page table entry
//! Some code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use memory::Frame;
use crate::memory::Frame;
use super::PhysicalAddress;

View File

@@ -1,7 +1,7 @@
use core::mem;
use core::ptr::Unique;
use memory::{allocate_frames, deallocate_frames, Frame};
use crate::memory::{allocate_frames, deallocate_frames, Frame};
use super::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress};
use super::entry::EntryFlags;

View File

@@ -5,7 +5,7 @@ use core::{mem, ptr};
use core::ops::{Deref, DerefMut};
use x86::shared::{control_regs, msr, tlb};
use memory::{allocate_frames, Frame};
use crate::memory::{allocate_frames, Frame};
use self::entry::EntryFlags;
use self::mapper::Mapper;
@@ -63,7 +63,7 @@ unsafe fn init_tcb(cpu_id: usize) -> usize {
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let tbss_offset = & __tbss_start as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let start = crate::KERNEL_PERCPU_OFFSET + crate::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
tcb_offset = end - mem::size_of::<usize>();
@@ -110,7 +110,7 @@ pub unsafe fn init(cpu_id: usize, kernel_start: usize, kernel_end: usize, stack_
let mut active_table = ActivePageTable::new();
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_MISC_OFFSET)));
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET)));
let mut new_table = {
let frame = allocate_frames(1).expect("no more frames in paging::init new_table");
@@ -120,10 +120,10 @@ pub unsafe fn init(cpu_id: usize, kernel_start: usize, kernel_end: usize, stack_
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
// Remap stack writable, no execute
{
let start_frame = Frame::containing_address(PhysicalAddress::new(stack_start - ::KERNEL_OFFSET));
let end_frame = Frame::containing_address(PhysicalAddress::new(stack_end - ::KERNEL_OFFSET - 1));
let start_frame = Frame::containing_address(PhysicalAddress::new(stack_start - crate::KERNEL_OFFSET));
let end_frame = Frame::containing_address(PhysicalAddress::new(stack_end - crate::KERNEL_OFFSET - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET));
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + crate::KERNEL_OFFSET));
let result = mapper.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::GLOBAL | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE);
// The flush can be ignored as this is not the active table. See later active_table.switch
/* unsafe */ { result.ignore(); }
@@ -136,7 +136,7 @@ pub unsafe fn init(cpu_id: usize, kernel_start: usize, kernel_end: usize, stack_
let end_frame = Frame::containing_address(PhysicalAddress::new(kernel_end - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let phys_addr = frame.start_address().get();
let virt_addr = phys_addr + ::KERNEL_OFFSET;
let virt_addr = phys_addr + crate::KERNEL_OFFSET;
macro_rules! in_section {
($n: ident) => (
@@ -176,7 +176,7 @@ pub unsafe fn init(cpu_id: usize, kernel_start: usize, kernel_end: usize, stack_
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let start = crate::KERNEL_PERCPU_OFFSET + crate::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
@@ -214,14 +214,14 @@ pub unsafe fn init_ap(cpu_id: usize, bsp_table: usize, stack_start: usize, stack
let mut new_table = InactivePageTable::from_address(bsp_table);
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_MISC_OFFSET)));
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET)));
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
// Map tdata and tbss
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let start = crate::KERNEL_PERCPU_OFFSET + crate::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
@@ -238,7 +238,7 @@ pub unsafe fn init_ap(cpu_id: usize, bsp_table: usize, stack_start: usize, stack
let start_frame = Frame::containing_address(PhysicalAddress::new(start));
let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET));
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + crate::KERNEL_OFFSET));
let result = mapper.map_to(page, frame, flags);
// The flush can be ignored as this is not the active table. See later active_table.switch
result.ignore();
@@ -247,7 +247,7 @@ pub unsafe fn init_ap(cpu_id: usize, bsp_table: usize, stack_start: usize, stack
};
// Remap stack writable, no execute
remap(stack_start - ::KERNEL_OFFSET, stack_end - ::KERNEL_OFFSET, EntryFlags::PRESENT | EntryFlags::GLOBAL | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE);
remap(stack_start - crate::KERNEL_OFFSET, stack_end - crate::KERNEL_OFFSET, EntryFlags::PRESENT | EntryFlags::GLOBAL | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE);
});
// This switches the active table, which is setup by the bootloader, to a correct table
@@ -312,14 +312,14 @@ impl ActivePageTable {
let p4_table = temporary_page.map_table_frame(backup.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE, self);
// overwrite recursive mapping
self.p4_mut()[::RECURSIVE_PAGE_PML4].set(table.p4_frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
self.p4_mut()[crate::RECURSIVE_PAGE_PML4].set(table.p4_frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
self.flush_all();
// execute f in the new context
f(self);
// restore recursive mapping to original p4 table
p4_table[::RECURSIVE_PAGE_PML4].set(backup, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
p4_table[crate::RECURSIVE_PAGE_PML4].set(backup, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
self.flush_all();
}
@@ -342,7 +342,7 @@ impl InactivePageTable {
// now we are able to zero the table
table.zero();
// set up recursive mapping for the table
table[::RECURSIVE_PAGE_PML4].set(frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
table[crate::RECURSIVE_PAGE_PML4].set(frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
}
temporary_page.unmap(active_table);

View File

@@ -4,12 +4,12 @@
use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
use memory::allocate_frames;
use crate::memory::allocate_frames;
use super::entry::{EntryFlags, Entry};
use super::ENTRY_COUNT;
pub const P4: *mut Table<Level4> = (::RECURSIVE_PAGE_OFFSET | 0x7ffffff000) as *mut _;
pub const P4: *mut Table<Level4> = (crate::RECURSIVE_PAGE_OFFSET | 0x7ffffff000) as *mut _;
pub trait TableLevel {}

View File

@@ -1,7 +1,7 @@
//! Temporarily map a page
//! From [Phil Opp's Blog](http://os.phil-opp.com/remap-the-kernel.html)
use memory::Frame;
use crate::memory::Frame;
use super::{ActivePageTable, Page, VirtualAddress};
use super::entry::EntryFlags;

View File

@@ -6,19 +6,19 @@
use core::slice;
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use allocator;
use crate::allocator;
#[cfg(feature = "acpi")]
use acpi;
#[cfg(feature = "graphical_debug")]
use arch::x86_64::graphical_debug;
use arch::x86_64::pti;
use device;
use gdt;
use idt;
use interrupt;
use log;
use memory;
use paging;
use crate::arch::x86_64::pti;
use crate::device;
use crate::gdt;
use crate::idt;
use crate::interrupt;
use crate::log;
use crate::memory;
use crate::paging;
/// Test of zero values in BSS.
static BSS_TEST_ZERO: usize = 0;
@@ -141,7 +141,7 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
slice::from_raw_parts(env_base as *const u8, env_size)
};
::kmain(CPU_COUNT.load(Ordering::SeqCst), env);
crate::kmain(CPU_COUNT.load(Ordering::SeqCst), env);
}
#[repr(packed)]
@@ -204,7 +204,7 @@ pub unsafe extern fn kstart_ap(args_ptr: *const KernelArgsAp) -> ! {
interrupt::pause();
}
::kmain_ap(cpu_id);
crate::kmain_ap(cpu_id);
}
#[naked]

View File

@@ -1,6 +1,6 @@
#[cfg(feature = "acpi")]
use acpi;
use syscall::io::{Io, Pio};
use crate::syscall::io::{Io, Pio};
#[no_mangle]
pub unsafe extern fn kreset() -> ! {

View File

@@ -29,9 +29,11 @@ macro_rules! int_like {
pub struct $new_type_name($backing_type);
impl $new_type_name {
#[allow(dead_code)]
pub const fn into(self) -> $backing_type {
self.0
}
#[allow(dead_code)]
pub const fn from(x: $backing_type) -> Self {
$new_type_name(x)
}
@@ -48,17 +50,21 @@ macro_rules! int_like {
}
impl $new_atomic_type_name {
#[allow(dead_code)]
pub const fn new(x: $new_type_name) -> Self {
$new_atomic_type_name {
container: $backing_atomic_type::new(x.into())
}
}
#[allow(dead_code)]
pub const fn default() -> Self {
Self::new($new_type_name::from(0))
}
#[allow(dead_code)]
pub fn load(&self, order: ::core::sync::atomic::Ordering) -> $new_type_name {
$new_type_name::from(self.container.load(order))
}
#[allow(dead_code)]
pub fn store(&self, val: $new_type_name, order: ::core::sync::atomic::Ordering) {
self.container.store(val.into(), order)
}
@@ -88,14 +94,13 @@ macro_rules! int_like {
}
}
#[cfg(test)]
#[test]
fn test() {
use core::mem::size_of;
use ::core::sync::atomic::AtomicUsize;
// Generate type `usize_like`.
int_like!(UsizeLike, usize);
const ZERO: UsizeLike = UsizeLike::from(0);
assert_eq!(size_of::<UsizeLike>(), size_of::<usize>());

View File

@@ -7,15 +7,15 @@ use core::cmp::Ordering;
use core::mem;
use spin::Mutex;
use arch::paging::PAGE_SIZE;
use context::arch;
use context::file::FileDescriptor;
use context::memory::{Grant, Memory, SharedMemory, Tls};
use ipi::{ipi, IpiKind, IpiTarget};
use scheme::{SchemeNamespace, FileHandle};
use syscall::data::SigAction;
use syscall::flag::SIG_DFL;
use sync::WaitMap;
use crate::arch::paging::PAGE_SIZE;
use crate::context::arch;
use crate::context::file::FileDescriptor;
use crate::context::memory::{Grant, Memory, SharedMemory, Tls};
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::scheme::{SchemeNamespace, FileHandle};
use crate::syscall::data::SigAction;
use crate::syscall::flag::SIG_DFL;
use crate::sync::WaitMap;
/// Unique identifier for a context (i.e. `pid`).
use ::core::sync::atomic::AtomicUsize;
@@ -169,8 +169,8 @@ pub struct Context {
impl Context {
pub fn new(id: ContextId) -> Context {
let syscall_head = unsafe { Box::from_raw(::ALLOCATOR.alloc(Layout::from_size_align_unchecked(PAGE_SIZE, PAGE_SIZE)) as *mut [u8; PAGE_SIZE]) };
let syscall_tail = unsafe { Box::from_raw(::ALLOCATOR.alloc(Layout::from_size_align_unchecked(PAGE_SIZE, PAGE_SIZE)) as *mut [u8; PAGE_SIZE]) };
let syscall_head = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(PAGE_SIZE, PAGE_SIZE)) as *mut [u8; PAGE_SIZE]) };
let syscall_tail = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(PAGE_SIZE, PAGE_SIZE)) as *mut [u8; PAGE_SIZE]) };
Context {
id: id,
@@ -304,7 +304,7 @@ impl Context {
self.status = Status::Runnable;
if let Some(cpu_id) = self.cpu_id {
if cpu_id != ::cpu_id() {
if cpu_id != crate::cpu_id() {
// Send IPI if not on current CPU
ipi(IpiKind::Wakeup, IpiTarget::Other);
}

View File

@@ -1,10 +1,10 @@
//! File structs
use alloc::sync::Arc;
use event;
use crate::event;
use spin::RwLock;
use scheme::{self, SchemeId};
use syscall::error::{Result, Error, EBADF};
use crate::scheme::{self, SchemeId};
use crate::syscall::error::{Result, Error, EBADF};
/// A file description
#[derive(Debug)]

View File

@@ -4,10 +4,10 @@ use alloc::collections::BTreeMap;
use core::alloc::{GlobalAlloc, Layout};
use core::mem;
use core::sync::atomic::Ordering;
use paging;
use crate::paging;
use spin::RwLock;
use syscall::error::{Result, Error, EAGAIN};
use crate::syscall::error::{Result, Error, EAGAIN};
use super::context::{Context, ContextId};
/// Context list type
@@ -66,7 +66,7 @@ impl ContextList {
let context_lock = self.new_context()?;
{
let mut context = context_lock.write();
let mut fx = unsafe { Box::from_raw(::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
let mut fx = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
for b in fx.iter_mut() {
*b = 0;
}

View File

@@ -3,14 +3,14 @@ use alloc::collections::VecDeque;
use core::intrinsics;
use spin::Mutex;
use arch::paging::PAGE_SIZE;
use context::file::FileDescriptor;
use ipi::{ipi, IpiKind, IpiTarget};
use memory::Frame;
use paging::{ActivePageTable, InactivePageTable, Page, PageIter, PhysicalAddress, VirtualAddress};
use paging::entry::EntryFlags;
use paging::mapper::MapperFlushAll;
use paging::temporary_page::TemporaryPage;
use crate::arch::paging::PAGE_SIZE;
use crate::context::file::FileDescriptor;
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::memory::Frame;
use crate::paging::{ActivePageTable, InactivePageTable, Page, PageIter, PhysicalAddress, VirtualAddress};
use crate::paging::entry::EntryFlags;
use crate::paging::mapper::MapperFlushAll;
use crate::paging::temporary_page::TemporaryPage;
#[derive(Debug)]
pub struct Grant {

View File

@@ -50,7 +50,7 @@ pub fn init() {
let mut contexts = contexts_mut();
let context_lock = contexts.new_context().expect("could not initialize first context");
let mut context = context_lock.write();
let mut fx = unsafe { Box::from_raw(::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
let mut fx = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
for b in fx.iter_mut() {
*b = 0;
}
@@ -59,7 +59,7 @@ pub fn init() {
context.kfx = Some(fx);
context.status = Status::Runnable;
context.running = true;
context.cpu_id = Some(::cpu_id());
context.cpu_id = Some(crate::cpu_id());
CONTEXT_ID.store(context.id, Ordering::SeqCst);
}

View File

@@ -1,10 +1,10 @@
use alloc::sync::Arc;
use core::mem;
use context::{contexts, switch, Status, WaitpidKey};
use start::usermode;
use syscall;
use syscall::flag::{SIG_DFL, SIG_IGN, SIGCHLD, SIGCONT, SIGSTOP, SIGTSTP, SIGTTIN, SIGTTOU};
use crate::context::{contexts, switch, Status, WaitpidKey};
use crate::start::usermode;
use crate::syscall;
use crate::syscall::flag::{SIG_DFL, SIG_IGN, SIGCHLD, SIGCONT, SIGSTOP, SIGTSTP, SIGTTIN, SIGTTOU};
pub extern "C" fn signal_handler(sig: usize) {
let (action, restorer) = {
@@ -36,7 +36,7 @@ pub extern "C" fn signal_handler(sig: usize) {
if let Some(parent_lock) = contexts.get(ppid) {
let waitpid = {
let mut parent = parent_lock.write();
let parent = parent_lock.write();
Arc::clone(&parent.waitpid)
};
@@ -64,7 +64,7 @@ pub extern "C" fn signal_handler(sig: usize) {
if let Some(parent_lock) = contexts.get(ppid) {
let waitpid = {
let mut parent = parent_lock.write();
let parent = parent_lock.write();
Arc::clone(&parent.waitpid)
};
@@ -90,7 +90,7 @@ pub extern "C" fn signal_handler(sig: usize) {
// println!("Call {:X}", handler);
unsafe {
let mut sp = ::USER_SIGSTACK_OFFSET + ::USER_SIGSTACK_SIZE - 256;
let mut sp = crate::USER_SIGSTACK_OFFSET + crate::USER_SIGSTACK_SIZE - 256;
sp = (sp / 16) * 16;

View File

@@ -1,11 +1,11 @@
use core::sync::atomic::Ordering;
use context::{arch, contexts, Context, Status, CONTEXT_ID};
use context::signal::signal_handler;
use gdt;
use interrupt;
use interrupt::irq::PIT_TICKS;
use time;
use crate::context::{arch, contexts, Context, Status, CONTEXT_ID};
use crate::context::signal::signal_handler;
use crate::gdt;
use crate::interrupt;
use crate::interrupt::irq::PIT_TICKS;
use crate::time;
unsafe fn update(context: &mut Context, cpu_id: usize) {
// Take ownership if not already owned
@@ -74,7 +74,7 @@ pub unsafe fn switch() -> bool {
interrupt::pause();
}
let cpu_id = ::cpu_id();
let cpu_id = crate::cpu_id();
let from_ptr;
let mut to_ptr = 0 as *mut Context;

View File

@@ -1,11 +1,11 @@
use alloc::collections::VecDeque;
use spin::{Once, Mutex, MutexGuard};
use event;
use scheme::SchemeId;
use syscall::data::TimeSpec;
use syscall::flag::{CLOCK_MONOTONIC, CLOCK_REALTIME, EVENT_READ};
use time;
use crate::event;
use crate::scheme::SchemeId;
use crate::syscall::data::TimeSpec;
use crate::syscall::flag::{CLOCK_MONOTONIC, CLOCK_REALTIME, EVENT_READ};
use crate::time;
#[derive(Debug)]
struct Timeout {

View File

@@ -1,4 +1,4 @@
use syscall::io::{Io, Pio, Mmio, ReadOnly};
use crate::syscall::io::{Io, Pio, Mmio, ReadOnly};
bitflags! {
/// Interrupt enable flags

View File

@@ -3,11 +3,11 @@ use alloc::collections::BTreeMap;
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use context;
use scheme::{self, SchemeId};
use sync::WaitQueue;
use syscall::data::Event;
use syscall::error::{Error, Result, EBADF, EINTR, ESRCH};
use crate::context;
use crate::scheme::{self, SchemeId};
use crate::sync::WaitQueue;
use crate::syscall::data::Event;
use crate::syscall::error::{Error, Result, EBADF, EINTR, ESRCH};
int_like!(EventQueueId, AtomicEventQueueId, usize, AtomicUsize);
@@ -34,7 +34,7 @@ impl EventQueue {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let mut files = context.files.lock();
let files = context.files.lock();
match files.get(event.id).ok_or(Error::new(EBADF))? {
Some(file) => file.clone(),
None => return Err(Error::new(EBADF))

View File

@@ -44,9 +44,9 @@ extern crate slab_allocator;
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};
use scheme::{FileHandle, SchemeNamespace};
use crate::scheme::{FileHandle, SchemeNamespace};
pub use consts::*;
pub use crate::consts::*;
#[macro_use]
/// Shared data structures
@@ -55,7 +55,7 @@ pub mod common;
/// Architecture-dependent stuff
#[macro_use]
pub mod arch;
pub use arch::*;
pub use crate::arch::*;
/// Constants like memory locations
pub mod consts;

View File

@@ -1,7 +1,7 @@
//! # Bump frame allocator
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html)
use paging::PhysicalAddress;
use crate::paging::PhysicalAddress;
use super::{Frame, FrameAllocator, MemoryArea, MemoryAreaIter};

View File

@@ -1,7 +1,7 @@
//! # Memory management
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html)
pub use paging::{PAGE_SIZE, PhysicalAddress};
pub use crate::paging::{PAGE_SIZE, PhysicalAddress};
use self::bump::BumpAllocator;
use self::recycle::RecycleAllocator;

View File

@@ -3,7 +3,7 @@
use alloc::vec::Vec;
use paging::PhysicalAddress;
use crate::paging::PhysicalAddress;
use super::{Frame, FrameAllocator};

View File

@@ -3,7 +3,7 @@
use core::alloc::Layout;
use core::panic::PanicInfo;
use interrupt;
use crate::interrupt;
#[lang = "eh_personality"]
#[no_mangle]

View File

@@ -1,12 +1,12 @@
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use arch::debug::Writer;
use event;
use scheme::*;
use sync::WaitQueue;
use syscall::flag::{EVENT_READ, F_GETFL, F_SETFL, O_ACCMODE, O_NONBLOCK};
use syscall::scheme::Scheme;
use crate::arch::debug::Writer;
use crate::event;
use crate::scheme::*;
use crate::sync::WaitQueue;
use crate::syscall::flag::{EVENT_READ, F_GETFL, F_SETFL, O_ACCMODE, O_NONBLOCK};
use crate::syscall::scheme::Scheme;
pub static DEBUG_SCHEME_ID: AtomicSchemeId = ATOMIC_SCHEMEID_INIT;

View File

@@ -1,10 +1,10 @@
use alloc::sync::Arc;
use core::{mem, slice};
use event::{EventQueue, EventQueueId, next_queue_id, queues, queues_mut};
use syscall::data::Event;
use syscall::error::*;
use syscall::scheme::Scheme;
use crate::event::{EventQueue, EventQueueId, next_queue_id, queues, queues_mut};
use crate::syscall::data::Event;
use crate::syscall::error::*;
use crate::syscall::scheme::Scheme;
pub struct EventScheme;

View File

@@ -3,10 +3,10 @@ use core::{cmp, str};
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::RwLock;
use syscall::data::Stat;
use syscall::error::*;
use syscall::flag::{MODE_DIR, MODE_FILE, SEEK_SET, SEEK_CUR, SEEK_END};
use syscall::scheme::Scheme;
use crate::syscall::data::Stat;
use crate::syscall::error::*;
use crate::syscall::flag::{MODE_DIR, MODE_FILE, SEEK_SET, SEEK_CUR, SEEK_END};
use crate::syscall::scheme::Scheme;
#[cfg(test)]
mod gen {

View File

@@ -2,12 +2,12 @@ use core::{mem, str};
use core::sync::atomic::Ordering;
use spin::Mutex;
use event;
use interrupt::irq::acknowledge;
use scheme::{AtomicSchemeId, ATOMIC_SCHEMEID_INIT, SchemeId};
use syscall::error::*;
use syscall::flag::EVENT_READ;
use syscall::scheme::Scheme;
use crate::event;
use crate::interrupt::irq::acknowledge;
use crate::scheme::{AtomicSchemeId, ATOMIC_SCHEMEID_INIT, SchemeId};
use crate::syscall::error::*;
use crate::syscall::flag::EVENT_READ;
use crate::syscall::scheme::Scheme;
pub static IRQ_SCHEME_ID: AtomicSchemeId = ATOMIC_SCHEMEID_INIT;

View File

@@ -3,10 +3,10 @@ use core::{mem, slice, str};
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::RwLock;
use syscall::data::ITimerSpec;
use syscall::error::*;
use syscall::flag::{CLOCK_REALTIME, CLOCK_MONOTONIC};
use syscall::scheme::Scheme;
use crate::syscall::data::ITimerSpec;
use crate::syscall::error::*;
use crate::syscall::flag::{CLOCK_REALTIME, CLOCK_MONOTONIC};
use crate::syscall::scheme::Scheme;
pub struct ITimerScheme {
next_id: AtomicUsize,
@@ -41,7 +41,7 @@ impl Scheme for ITimerScheme {
}
fn read(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let clock = {
let _clock = {
let handles = self.handles.read();
*handles.get(&id).ok_or(Error::new(EBADF))?
};
@@ -58,7 +58,7 @@ impl Scheme for ITimerScheme {
}
fn write(&self, id: usize, buf: &[u8]) -> Result<usize> {
let clock = {
let _clock = {
let handles = self.handles.read();
*handles.get(&id).ok_or(Error::new(EBADF))?
};

View File

@@ -1,12 +1,12 @@
use context;
use context::memory::Grant;
use memory::{free_frames, used_frames};
use paging::VirtualAddress;
use paging::entry::EntryFlags;
use syscall::data::{Map, StatVfs};
use syscall::error::*;
use syscall::flag::{PROT_EXEC, PROT_READ, PROT_WRITE};
use syscall::scheme::Scheme;
use crate::context;
use crate::context::memory::Grant;
use crate::memory::{free_frames, used_frames};
use crate::paging::VirtualAddress;
use crate::paging::entry::EntryFlags;
use crate::syscall::data::{Map, StatVfs};
use crate::syscall::error::*;
use crate::syscall::flag::{PROT_EXEC, PROT_READ, PROT_WRITE};
use crate::syscall::scheme::Scheme;
pub struct MemoryScheme;
@@ -44,7 +44,7 @@ impl Scheme for MemoryScheme {
let mut grants = context.grants.lock();
let full_size = ((map.size + 4095)/4096) * 4096;
let mut to_address = ::USER_GRANT_OFFSET;
let mut to_address = crate::USER_GRANT_OFFSET;
let mut entry_flags = EntryFlags::PRESENT | EntryFlags::USER_ACCESSIBLE;
if map.flags & PROT_EXEC == 0 {

View File

@@ -12,8 +12,8 @@ use alloc::collections::BTreeMap;
use core::sync::atomic::AtomicUsize;
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use syscall::error::*;
use syscall::scheme::Scheme;
use crate::syscall::error::*;
use crate::syscall::scheme::Scheme;
use self::debug::DebugScheme;
use self::event::EventScheme;
@@ -66,15 +66,15 @@ pub mod user;
/// Limit on number of schemes
pub const SCHEME_MAX_SCHEMES: usize = 65_536;
/// Unique identifier for a scheme namespace.
// Unique identifier for a scheme namespace.
int_like!(SchemeNamespace, AtomicSchemeNamespace, usize, AtomicUsize);
/// Unique identifier for a scheme.
// Unique identifier for a scheme.
int_like!(SchemeId, AtomicSchemeId, usize, AtomicUsize);
pub const ATOMIC_SCHEMEID_INIT: AtomicSchemeId = AtomicSchemeId::default();
/// Unique identifier for a file descriptor.
// Unique identifier for a file descriptor.
int_like!(FileHandle, AtomicFileHandle, usize, AtomicUsize);
pub struct SchemeIter<'a> {

View File

@@ -3,13 +3,13 @@ use alloc::collections::{BTreeMap, VecDeque};
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::{Mutex, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use event;
use scheme::{AtomicSchemeId, ATOMIC_SCHEMEID_INIT, SchemeId};
use sync::WaitCondition;
use syscall::error::{Error, Result, EAGAIN, EBADF, EINTR, EINVAL, EPIPE, ESPIPE};
use syscall::flag::{EVENT_READ, EVENT_WRITE, F_GETFL, F_SETFL, O_ACCMODE, O_NONBLOCK, MODE_FIFO};
use syscall::scheme::Scheme;
use syscall::data::Stat;
use crate::event;
use crate::scheme::{AtomicSchemeId, ATOMIC_SCHEMEID_INIT, SchemeId};
use crate::sync::WaitCondition;
use crate::syscall::error::{Error, Result, EAGAIN, EBADF, EINTR, EINVAL, EPIPE, ESPIPE};
use crate::syscall::flag::{EVENT_READ, EVENT_WRITE, F_GETFL, F_SETFL, O_ACCMODE, O_NONBLOCK, MODE_FIFO};
use crate::syscall::scheme::Scheme;
use crate::syscall::data::Stat;
/// Pipes list
pub static PIPE_SCHEME_ID: AtomicSchemeId = ATOMIC_SCHEMEID_INIT;
@@ -101,7 +101,7 @@ impl Scheme for PipeScheme {
}
}
if let Some(pipe) = pipes.1.get(&id) {
if let Some(_pipe) = pipes.1.get(&id) {
if flags == EVENT_WRITE {
return Ok(EVENT_WRITE);
}

View File

@@ -6,13 +6,13 @@ use core::{cmp, str};
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::{Mutex, RwLock};
use context;
use syscall::data::Stat;
use syscall::error::*;
use syscall::flag::{O_CREAT, MODE_FILE, MODE_DIR, SEEK_SET, SEEK_CUR, SEEK_END};
use syscall::scheme::Scheme;
use scheme::{self, SchemeNamespace, SchemeId};
use scheme::user::{UserInner, UserScheme};
use crate::context;
use crate::syscall::data::Stat;
use crate::syscall::error::*;
use crate::syscall::flag::{O_CREAT, MODE_FILE, MODE_DIR, SEEK_SET, SEEK_CUR, SEEK_END};
use crate::syscall::scheme::Scheme;
use crate::scheme::{self, SchemeNamespace, SchemeId};
use crate::scheme::user::{UserInner, UserScheme};
struct FolderInner {
data: Box<[u8]>,

View File

@@ -2,8 +2,8 @@ use alloc::string::String;
use alloc::vec::Vec;
use core::str;
use context;
use syscall::error::Result;
use crate::context;
use crate::syscall::error::Result;
pub fn resource() -> Result<Vec<u8>> {
let mut string = format!("{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<8}{}\n",

View File

@@ -1,10 +1,10 @@
use alloc::vec::Vec;
use device::cpu::cpu_info;
use syscall::error::{Error, EIO, Result};
use crate::device::cpu::cpu_info;
use crate::syscall::error::{Error, EIO, Result};
pub fn resource() -> Result<Vec<u8>> {
let mut string = format!("CPUs: {}\n", ::cpu_count());
let mut string = format!("CPUs: {}\n", crate::cpu_count());
match cpu_info(&mut string) {
Ok(()) => Ok(string.into_bytes()),

View File

@@ -1,7 +1,7 @@
use alloc::vec::Vec;
use context;
use syscall::error::{Error, ESRCH, Result};
use crate::context;
use crate::syscall::error::{Error, ESRCH, Result};
pub fn resource() -> Result<Vec<u8>> {
let mut name = {

View File

@@ -3,9 +3,9 @@ use alloc::vec::Vec;
use core::fmt::Write;
use core::str;
use context;
use scheme;
use syscall::error::Result;
use crate::context;
use crate::scheme;
use crate::syscall::error::Result;
pub fn resource() -> Result<Vec<u8>> {
let mut string = String::new();

View File

@@ -1,7 +1,7 @@
use alloc::vec::Vec;
use log::LOG;
use syscall::error::Result;
use crate::log::LOG;
use crate::syscall::error::Result;
pub fn resource() -> Result<Vec<u8>> {
let mut vec = Vec::new();

View File

@@ -5,10 +5,10 @@ use core::{cmp, str};
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::RwLock;
use syscall::data::Stat;
use syscall::error::{Error, EBADF, EINVAL, ENOENT, Result};
use syscall::flag::{MODE_DIR, MODE_FILE, SEEK_CUR, SEEK_END, SEEK_SET};
use syscall::scheme::Scheme;
use crate::syscall::data::Stat;
use crate::syscall::error::{Error, EBADF, EINVAL, ENOENT, Result};
use crate::syscall::flag::{MODE_DIR, MODE_FILE, SEEK_CUR, SEEK_END, SEEK_SET};
use crate::syscall::scheme::Scheme;
mod context;
mod cpu;

View File

@@ -1,8 +1,8 @@
use alloc::vec::Vec;
use context;
use scheme;
use syscall::error::{Error, ESRCH, Result};
use crate::context;
use crate::scheme;
use crate::syscall::error::{Error, ESRCH, Result};
pub fn resource() -> Result<Vec<u8>> {
let scheme_ns = {

View File

@@ -1,8 +1,8 @@
use alloc::vec::Vec;
use context;
use scheme;
use syscall::error::{Error, ESRCH, Result};
use crate::context;
use crate::scheme;
use crate::syscall::error::{Error, ESRCH, Result};
pub fn resource() -> Result<Vec<u8>> {
let scheme_ns = {

View File

@@ -3,9 +3,9 @@ use alloc::vec::Vec;
use core::fmt::Write;
use core::str;
use context;
use syscall;
use syscall::error::Result;
use crate::context;
use crate::syscall;
use crate::syscall::error::Result;
pub fn resource() -> Result<Vec<u8>> {
let mut string = String::new();

View File

@@ -1,5 +1,5 @@
use alloc::vec::Vec;
use syscall::error::Result;
use crate::syscall::error::Result;
pub fn resource() -> Result<Vec<u8>> {
Ok(format!("Redox\n\n{}\n\n{}\n",

View File

@@ -3,13 +3,13 @@ use core::{mem, slice, str};
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::RwLock;
use context::timeout;
use scheme::SchemeId;
use syscall::data::TimeSpec;
use syscall::error::*;
use syscall::flag::{CLOCK_REALTIME, CLOCK_MONOTONIC};
use syscall::scheme::Scheme;
use time;
use crate::context::timeout;
use crate::scheme::SchemeId;
use crate::syscall::data::TimeSpec;
use crate::syscall::error::*;
use crate::syscall::flag::{CLOCK_REALTIME, CLOCK_MONOTONIC};
use crate::syscall::scheme::Scheme;
use crate::time;
pub struct TimeScheme {
scheme_id: SchemeId,

View File

@@ -5,20 +5,20 @@ use core::sync::atomic::{AtomicU64, Ordering};
use core::{mem, slice, usize};
use spin::{Mutex, RwLock};
use context::{self, Context};
use context::file::FileDescriptor;
use context::memory::Grant;
use event;
use paging::{InactivePageTable, Page, VirtualAddress};
use paging::entry::EntryFlags;
use paging::temporary_page::TemporaryPage;
use scheme::{AtomicSchemeId, ATOMIC_SCHEMEID_INIT, SchemeId};
use sync::{WaitQueue, WaitMap};
use syscall::data::{Map, Packet, Stat, StatVfs, TimeSpec};
use syscall::error::*;
use syscall::flag::{EVENT_READ, O_NONBLOCK, PROT_EXEC, PROT_READ, PROT_WRITE};
use syscall::number::*;
use syscall::scheme::Scheme;
use crate::context::{self, Context};
use crate::context::file::FileDescriptor;
use crate::context::memory::Grant;
use crate::event;
use crate::paging::{InactivePageTable, Page, VirtualAddress};
use crate::paging::entry::EntryFlags;
use crate::paging::temporary_page::TemporaryPage;
use crate::scheme::{AtomicSchemeId, ATOMIC_SCHEMEID_INIT, SchemeId};
use crate::sync::{WaitQueue, WaitMap};
use crate::syscall::data::{Map, Packet, Stat, StatVfs, TimeSpec};
use crate::syscall::error::*;
use crate::syscall::flag::{EVENT_READ, O_NONBLOCK, PROT_EXEC, PROT_READ, PROT_WRITE};
use crate::syscall::number::*;
use crate::syscall::scheme::Scheme;
pub struct UserInner {
root_id: SchemeId,
@@ -99,12 +99,12 @@ impl UserInner {
let mut grants = context.grants.lock();
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_GRANT_OFFSET)));
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET)));
let from_address = (address/4096) * 4096;
let offset = address - from_address;
let full_size = ((offset + size + 4095)/4096) * 4096;
let mut to_address = ::USER_GRANT_OFFSET;
let mut to_address = crate::USER_GRANT_OFFSET;
let mut entry_flags = EntryFlags::PRESENT | EntryFlags::USER_ACCESSIBLE;
if flags & PROT_EXEC == 0 {
@@ -155,7 +155,7 @@ impl UserInner {
let mut grants = context.grants.lock();
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_GRANT_OFFSET)));
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET)));
for i in 0 .. grants.len() {
let start = grants[i].start_address().get();

View File

@@ -2,7 +2,7 @@ use alloc::sync::Arc;
use alloc::vec::Vec;
use spin::{Mutex, RwLock};
use context::{self, Context};
use crate::context::{self, Context};
#[derive(Debug)]
pub struct WaitCondition {

View File

@@ -2,7 +2,7 @@ use alloc::collections::BTreeMap;
use core::mem;
use spin::Mutex;
use sync::WaitCondition;
use crate::sync::WaitCondition;
#[derive(Debug)]
pub struct WaitMap<K, V> {

View File

@@ -1,7 +1,7 @@
use alloc::collections::VecDeque;
use spin::Mutex;
use sync::WaitCondition;
use crate::sync::WaitCondition;
#[derive(Debug)]
pub struct WaitQueue<T> {

View File

@@ -1,11 +1,11 @@
use interrupt::syscall::SyscallStack;
use memory::{allocate_frames, deallocate_frames, Frame};
use paging::{ActivePageTable, PhysicalAddress, VirtualAddress};
use paging::entry::EntryFlags;
use context;
use context::memory::Grant;
use syscall::error::{Error, EFAULT, EINVAL, ENOMEM, EPERM, ESRCH, Result};
use syscall::flag::{PHYSMAP_WRITE, PHYSMAP_WRITE_COMBINE, PHYSMAP_NO_CACHE};
use crate::interrupt::syscall::SyscallStack;
use crate::memory::{allocate_frames, deallocate_frames, Frame};
use crate::paging::{ActivePageTable, PhysicalAddress, VirtualAddress};
use crate::paging::entry::EntryFlags;
use crate::context;
use crate::context::memory::Grant;
use crate::syscall::error::{Error, EFAULT, EINVAL, ENOMEM, EPERM, ESRCH, Result};
use crate::syscall::flag::{PHYSMAP_WRITE, PHYSMAP_WRITE_COMBINE, PHYSMAP_NO_CACHE};
fn enforce_root() -> Result<()> {
let contexts = context::contexts();
@@ -64,7 +64,7 @@ pub fn inner_physmap(physical_address: usize, size: usize, flags: usize) -> Resu
let from_address = (physical_address/4096) * 4096;
let offset = physical_address - from_address;
let full_size = ((offset + size + 4095)/4096) * 4096;
let mut to_address = ::USER_GRANT_OFFSET;
let mut to_address = crate::USER_GRANT_OFFSET;
let mut entry_flags = EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::USER_ACCESSIBLE;
if flags & PHYSMAP_WRITE == PHYSMAP_WRITE {

View File

@@ -3,13 +3,13 @@ use core::sync::atomic::Ordering;
use alloc::sync::Arc;
use spin::RwLock;
use context;
use scheme::{self, FileHandle};
use syscall;
use syscall::data::{Packet, Stat};
use syscall::error::*;
use syscall::flag::{F_GETFD, F_SETFD, F_GETFL, F_SETFL, F_DUPFD, O_ACCMODE, O_DIRECTORY, O_RDONLY, O_WRONLY, MODE_DIR, MODE_FILE, O_CLOEXEC};
use context::file::{FileDescriptor, FileDescription};
use crate::context;
use crate::scheme::{self, FileHandle};
use crate::syscall;
use crate::syscall::data::{Packet, Stat};
use crate::syscall::error::*;
use crate::syscall::flag::{F_GETFD, F_SETFD, F_GETFL, F_SETFL, F_DUPFD, O_ACCMODE, O_DIRECTORY, O_RDONLY, O_WRONLY, MODE_DIR, MODE_FILE, O_CLOEXEC};
use crate::context::file::{FileDescriptor, FileDescription};
pub fn file_op(a: usize, fd: FileHandle, c: usize, d: usize) -> Result<usize> {
let (file, pid, uid, gid) = {
@@ -126,8 +126,8 @@ pub fn open(path: &[u8], flags: usize) -> Result<FileHandle> {
pub fn pipe2(fds: &mut [usize], flags: usize) -> Result<usize> {
if fds.len() >= 2 {
let scheme_id = ::scheme::pipe::PIPE_SCHEME_ID.load(Ordering::SeqCst);
let (read_id, write_id) = ::scheme::pipe::pipe(flags);
let scheme_id = crate::scheme::pipe::PIPE_SCHEME_ID.load(Ordering::SeqCst);
let (read_id, write_id) = crate::scheme::pipe::pipe(flags);
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;

View File

@@ -7,12 +7,12 @@ use alloc::collections::VecDeque;
use core::intrinsics;
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use context::{self, Context};
use time;
use syscall::data::TimeSpec;
use syscall::error::{Error, Result, ESRCH, EAGAIN, EINVAL};
use syscall::flag::{FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE};
use syscall::validate::{validate_slice, validate_slice_mut};
use crate::context::{self, Context};
use crate::time;
use crate::syscall::data::TimeSpec;
use crate::syscall::error::{Error, Result, ESRCH, EAGAIN, EINVAL};
use crate::syscall::flag::{FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE};
use crate::syscall::validate::{validate_slice, validate_slice_mut};
type FutexList = VecDeque<(usize, Arc<RwLock<Context>>)>;

View File

@@ -18,9 +18,9 @@ use self::data::{SigAction, TimeSpec};
use self::error::{Error, Result, ENOSYS};
use self::number::*;
use context::ContextId;
use interrupt::syscall::SyscallStack;
use scheme::{FileHandle, SchemeNamespace};
use crate::context::ContextId;
use crate::interrupt::syscall::SyscallStack;
use crate::scheme::{FileHandle, SchemeNamespace};
/// Debug
pub mod debug;
@@ -162,11 +162,11 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
/*
let debug = {
let contexts = ::context::contexts();
let contexts = crate::context::contexts();
if let Some(context_lock) = contexts.current() {
let context = context_lock.read();
let name_raw = context.name.lock();
let name = unsafe { ::core::str::from_utf8_unchecked(&name_raw) };
let name = unsafe { core::str::from_utf8_unchecked(&name_raw) };
if name == "file:/bin/cargo" || name == "file:/bin/rustc" {
if a == SYS_CLOCK_GETTIME {
false
@@ -184,10 +184,10 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
};
if debug {
let contexts = ::context::contexts();
let contexts = crate::context::contexts();
if let Some(context_lock) = contexts.current() {
let context = context_lock.read();
print!("{} ({}): ", unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }, context.id.into());
print!("{} ({}): ", unsafe { core::str::from_utf8_unchecked(&context.name.lock()) }, context.id.into());
}
println!("{}", debug::format_call(a, b, c, d, e, f));
@@ -200,7 +200,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
// When the code below falls out of scope it will release the lock
// see the spin crate for details
{
let contexts = ::context::contexts();
let contexts = crate::context::contexts();
if let Some(context_lock) = contexts.current() {
let mut context = context_lock.write();
context.syscall = Some((a, b, c, d, e, f));
@@ -210,7 +210,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
let result = inner(a, b, c, d, e, f, bp, stack);
{
let contexts = ::context::contexts();
let contexts = crate::context::contexts();
if let Some(context_lock) = contexts.current() {
let mut context = context_lock.write();
context.syscall = None;
@@ -219,10 +219,10 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
/*
if debug {
let contexts = ::context::contexts();
let contexts = crate::context::contexts();
if let Some(context_lock) = contexts.current() {
let context = context_lock.read();
print!("{} ({}): ", unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }, context.id.into());
print!("{} ({}): ", unsafe { core::str::from_utf8_unchecked(&context.name.lock()) }, context.id.into());
}
print!("{} = ", debug::format_call(a, b, c, d, e, f));

View File

@@ -1,9 +1,9 @@
use alloc::vec::Vec;
use context;
use scheme::{self, SchemeNamespace};
use syscall::error::*;
use syscall::validate::validate_slice;
use crate::context;
use crate::scheme::{self, SchemeNamespace};
use crate::syscall::error::*;
use crate::syscall::validate::validate_slice;
pub fn getegid() -> Result<usize> {
let contexts = context::contexts();

View File

@@ -6,28 +6,28 @@ use core::{intrinsics, mem};
use core::ops::DerefMut;
use spin::Mutex;
use memory::allocate_frames;
use paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, PAGE_SIZE};
use paging::entry::EntryFlags;
use paging::mapper::MapperFlushAll;
use paging::temporary_page::TemporaryPage;
use start::usermode;
use interrupt;
use context;
use context::{ContextId, WaitpidKey};
use context::file::FileDescriptor;
use crate::memory::allocate_frames;
use crate::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, PAGE_SIZE};
use crate::paging::entry::EntryFlags;
use crate::paging::mapper::MapperFlushAll;
use crate::paging::temporary_page::TemporaryPage;
use crate::start::usermode;
use crate::interrupt;
use crate::context;
use crate::context::{ContextId, WaitpidKey};
use crate::context::file::FileDescriptor;
#[cfg(not(feature="doc"))]
use elf::{self, program_header};
use ipi::{ipi, IpiKind, IpiTarget};
use scheme::FileHandle;
use syscall;
use syscall::data::{SigAction, Stat};
use syscall::error::*;
use syscall::flag::{CLONE_VFORK, CLONE_VM, CLONE_FS, CLONE_FILES, CLONE_SIGHAND, CLONE_STACK,
use crate::elf::{self, program_header};
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::scheme::FileHandle;
use crate::syscall;
use crate::syscall::data::{SigAction, Stat};
use crate::syscall::error::*;
use crate::syscall::flag::{CLONE_VFORK, CLONE_VM, CLONE_FS, CLONE_FILES, CLONE_SIGHAND, CLONE_STACK,
PROT_EXEC, PROT_READ, PROT_WRITE,
SIG_DFL, SIG_BLOCK, SIG_UNBLOCK, SIG_SETMASK, SIGCONT, SIGTERM,
WCONTINUED, WNOHANG, WUNTRACED, wifcontinued, wifstopped};
use syscall::validate::{validate_slice, validate_slice_mut};
use crate::syscall::validate::{validate_slice, validate_slice_mut};
pub fn brk(address: usize) -> Result<usize> {
let contexts = context::contexts();
@@ -48,11 +48,11 @@ pub fn brk(address: usize) -> Result<usize> {
if address == 0 {
//println!("Brk query {:X}", current);
Ok(current)
} else if address >= ::USER_HEAP_OFFSET {
} else if address >= crate::USER_HEAP_OFFSET {
//TODO: out of memory errors
if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
heap.resize(address - ::USER_HEAP_OFFSET, true);
heap.resize(address - crate::USER_HEAP_OFFSET, true);
});
} else {
panic!("user heap not initialized");
@@ -120,7 +120,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
arch = context.arch.clone();
if let Some(ref fx) = context.kfx {
let mut new_fx = unsafe { Box::from_raw(::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
let mut new_fx = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
for (new_b, b) in new_fx.iter_mut().zip(fx.iter()) {
*new_b = *b;
}
@@ -151,7 +151,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
for memory_shared in context.image.iter() {
memory_shared.with(|memory| {
let mut new_memory = context::memory::Memory::new(
VirtualAddress::new(memory.start_address().get() + ::USER_TMP_OFFSET),
VirtualAddress::new(memory.start_address().get() + crate::USER_TMP_OFFSET),
memory.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
false
@@ -171,7 +171,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
let mut new_heap = context::memory::Memory::new(
VirtualAddress::new(::USER_TMP_HEAP_OFFSET),
VirtualAddress::new(crate::USER_TMP_HEAP_OFFSET),
heap.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
false
@@ -195,7 +195,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
} else {
stack_shared.with(|stack| {
let mut new_stack = context::memory::Memory::new(
VirtualAddress::new(::USER_TMP_STACK_OFFSET),
VirtualAddress::new(crate::USER_TMP_STACK_OFFSET),
stack.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
false
@@ -215,7 +215,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
if let Some(ref sigstack) = context.sigstack {
let mut new_sigstack = context::memory::Memory::new(
VirtualAddress::new(::USER_TMP_SIGSTACK_OFFSET),
VirtualAddress::new(crate::USER_TMP_SIGSTACK_OFFSET),
sigstack.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
false
@@ -236,7 +236,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
master: tls.master,
file_size: tls.file_size,
mem: context::memory::Memory::new(
VirtualAddress::new(::USER_TMP_TLS_OFFSET),
VirtualAddress::new(crate::USER_TMP_TLS_OFFSET),
tls.mem.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
true
@@ -266,7 +266,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
} else {
let mut grants_vec = Vec::new();
for grant in context.grants.lock().iter() {
let start = VirtualAddress::new(grant.start_address().get() + ::USER_TMP_GRANT_OFFSET - ::USER_GRANT_OFFSET);
let start = VirtualAddress::new(grant.start_address().get() + crate::USER_TMP_GRANT_OFFSET - crate::USER_GRANT_OFFSET);
grants_vec.push(grant.secret_clone(start));
}
grants = Arc::new(Mutex::new(grants_vec));
@@ -318,7 +318,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
if flags & CLONE_VM == 0 {
let mut i = 0;
while i < grants.lock().len() {
let mut remove = false;
let remove = false;
if let Some(grant) = grants.lock().get(i) {
if let Some(ref _desc) = grant.desc_opt {
println!("todo: clone grant {} using fmap: {:?}", i, grant);
@@ -373,7 +373,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
let mut active_table = unsafe { ActivePageTable::new() };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_MISC_OFFSET)));
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET)));
let mut new_table = {
let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table");
@@ -384,19 +384,19 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
// Copy kernel image mapping
{
let frame = active_table.p4()[::KERNEL_PML4].pointed_frame().expect("kernel image not mapped");
let flags = active_table.p4()[::KERNEL_PML4].flags();
let frame = active_table.p4()[crate::KERNEL_PML4].pointed_frame().expect("kernel image not mapped");
let flags = active_table.p4()[crate::KERNEL_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[::KERNEL_PML4].set(frame, flags);
mapper.p4_mut()[crate::KERNEL_PML4].set(frame, flags);
});
}
// Copy kernel heap mapping
{
let frame = active_table.p4()[::KERNEL_HEAP_PML4].pointed_frame().expect("kernel heap not mapped");
let flags = active_table.p4()[::KERNEL_HEAP_PML4].flags();
let frame = active_table.p4()[crate::KERNEL_HEAP_PML4].pointed_frame().expect("kernel heap not mapped");
let flags = active_table.p4()[crate::KERNEL_HEAP_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[::KERNEL_HEAP_PML4].set(frame, flags);
mapper.p4_mut()[crate::KERNEL_HEAP_PML4].set(frame, flags);
});
}
@@ -417,36 +417,36 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
if flags & CLONE_VM == CLONE_VM {
// Copy user image mapping, if found
if ! image.is_empty() {
let frame = active_table.p4()[::USER_PML4].pointed_frame().expect("user image not mapped");
let flags = active_table.p4()[::USER_PML4].flags();
let frame = active_table.p4()[crate::USER_PML4].pointed_frame().expect("user image not mapped");
let flags = active_table.p4()[crate::USER_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[::USER_PML4].set(frame, flags);
mapper.p4_mut()[crate::USER_PML4].set(frame, flags);
});
}
context.image = image;
// Copy user heap mapping, if found
if let Some(heap_shared) = heap_option {
let frame = active_table.p4()[::USER_HEAP_PML4].pointed_frame().expect("user heap not mapped");
let flags = active_table.p4()[::USER_HEAP_PML4].flags();
let frame = active_table.p4()[crate::USER_HEAP_PML4].pointed_frame().expect("user heap not mapped");
let flags = active_table.p4()[crate::USER_HEAP_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[::USER_HEAP_PML4].set(frame, flags);
mapper.p4_mut()[crate::USER_HEAP_PML4].set(frame, flags);
});
context.heap = Some(heap_shared);
}
// Copy grant mapping
if ! grants.lock().is_empty() {
let frame = active_table.p4()[::USER_GRANT_PML4].pointed_frame().expect("user grants not mapped");
let flags = active_table.p4()[::USER_GRANT_PML4].flags();
let frame = active_table.p4()[crate::USER_GRANT_PML4].pointed_frame().expect("user grants not mapped");
let flags = active_table.p4()[crate::USER_GRANT_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[::USER_GRANT_PML4].set(frame, flags);
mapper.p4_mut()[crate::USER_GRANT_PML4].set(frame, flags);
});
}
context.grants = grants;
} else {
// Copy percpu mapping
for cpu_id in 0..::cpu_count() {
for cpu_id in 0..crate::cpu_count() {
extern {
// The starting byte of the thread data segment
static mut __tdata_start: u8;
@@ -456,7 +456,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
let size = unsafe { & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize };
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let start = crate::KERNEL_PERCPU_OFFSET + crate::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
@@ -474,7 +474,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
// Move copy of image
for memory_shared in image.iter_mut() {
memory_shared.with(|memory| {
let start = VirtualAddress::new(memory.start_address().get() - ::USER_TMP_OFFSET + ::USER_OFFSET);
let start = VirtualAddress::new(memory.start_address().get() - crate::USER_TMP_OFFSET + crate::USER_OFFSET);
memory.move_to(start, &mut new_table, &mut temporary_page);
});
}
@@ -483,14 +483,14 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
// Move copy of heap
if let Some(heap_shared) = heap_option {
heap_shared.with(|heap| {
heap.move_to(VirtualAddress::new(::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page);
heap.move_to(VirtualAddress::new(crate::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page);
});
context.heap = Some(heap_shared);
}
// Move grants
for grant in grants.lock().iter_mut() {
let start = VirtualAddress::new(grant.start_address().get() + ::USER_GRANT_OFFSET - ::USER_TMP_GRANT_OFFSET);
let start = VirtualAddress::new(grant.start_address().get() + crate::USER_GRANT_OFFSET - crate::USER_TMP_GRANT_OFFSET);
grant.move_to(start, &mut new_table, &mut temporary_page);
}
context.grants = grants;
@@ -499,14 +499,14 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
// Setup user stack
if let Some(stack_shared) = stack_option {
if flags & CLONE_STACK == CLONE_STACK {
let frame = active_table.p4()[::USER_STACK_PML4].pointed_frame().expect("user stack not mapped");
let flags = active_table.p4()[::USER_STACK_PML4].flags();
let frame = active_table.p4()[crate::USER_STACK_PML4].pointed_frame().expect("user stack not mapped");
let flags = active_table.p4()[crate::USER_STACK_PML4].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[::USER_STACK_PML4].set(frame, flags);
mapper.p4_mut()[crate::USER_STACK_PML4].set(frame, flags);
});
} else {
stack_shared.with(|stack| {
stack.move_to(VirtualAddress::new(::USER_STACK_OFFSET), &mut new_table, &mut temporary_page);
stack.move_to(VirtualAddress::new(crate::USER_STACK_OFFSET), &mut new_table, &mut temporary_page);
});
}
context.stack = Some(stack_shared);
@@ -514,12 +514,12 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
// Setup user sigstack
if let Some(mut sigstack) = sigstack_option {
sigstack.move_to(VirtualAddress::new(::USER_SIGSTACK_OFFSET), &mut new_table, &mut temporary_page);
sigstack.move_to(VirtualAddress::new(crate::USER_SIGSTACK_OFFSET), &mut new_table, &mut temporary_page);
context.sigstack = Some(sigstack);
}
// Set up TCB
let tcb_addr = ::USER_TCB_OFFSET + context.id.into() * PAGE_SIZE;
let tcb_addr = crate::USER_TCB_OFFSET + context.id.into() * PAGE_SIZE;
let mut tcb = context::memory::Memory::new(
VirtualAddress::new(tcb_addr),
PAGE_SIZE,
@@ -529,13 +529,13 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
// Setup user TLS
if let Some(mut tls) = tls_option {
tls.mem.move_to(VirtualAddress::new(::USER_TLS_OFFSET), &mut new_table, &mut temporary_page);
tls.mem.move_to(VirtualAddress::new(crate::USER_TLS_OFFSET), &mut new_table, &mut temporary_page);
unsafe {
*(tcb_addr as *mut usize) = tls.mem.start_address().get() + tls.mem.size();
}
context.tls = Some(tls);
} else {
let parent_tcb_addr = ::USER_TCB_OFFSET + ppid.into() * PAGE_SIZE;
let parent_tcb_addr = crate::USER_TCB_OFFSET + ppid.into() * PAGE_SIZE;
unsafe {
intrinsics::copy(parent_tcb_addr as *const u8,
tcb_addr as *mut u8,
@@ -588,7 +588,7 @@ fn empty(context: &mut context::Context, reaping: bool) {
println!("{}: {}: Grant should not exist: {:?}", context.id.into(), unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }, grant);
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_GRANT_OFFSET)));
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET)));
grant.unmap_inactive(&mut new_table, &mut temporary_page);
} else {
@@ -615,7 +615,7 @@ fn fexec_noreturn(
vars: Box<[Box<[u8]>]>
) -> ! {
let entry;
let mut sp = ::USER_STACK_OFFSET + ::USER_STACK_SIZE - 256;
let mut sp = crate::USER_STACK_OFFSET + crate::USER_STACK_SIZE - 256;
{
let (vfork, ppid, files) = {
@@ -642,7 +642,7 @@ fn fexec_noreturn(
entry = elf.entry();
// Always map TCB
let tcb_addr = ::USER_TCB_OFFSET + context.id.into() * PAGE_SIZE;
let tcb_addr = crate::USER_TCB_OFFSET + context.id.into() * PAGE_SIZE;
let tcb_mem = context::memory::Memory::new(
VirtualAddress::new(tcb_addr),
PAGE_SIZE,
@@ -700,7 +700,7 @@ fn fexec_noreturn(
master: VirtualAddress::new(segment.p_vaddr as usize),
file_size: segment.p_filesz as usize,
mem: context::memory::Memory::new(
VirtualAddress::new(::USER_TLS_OFFSET),
VirtualAddress::new(crate::USER_TLS_OFFSET),
rounded_size as usize,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
@@ -709,7 +709,7 @@ fn fexec_noreturn(
};
unsafe {
*(tcb_addr as *mut usize) = ::USER_TLS_OFFSET + tls.mem.size();
*(tcb_addr as *mut usize) = crate::USER_TLS_OFFSET + tls.mem.size();
}
tls_option = Some(tls);
@@ -726,7 +726,7 @@ fn fexec_noreturn(
// Map heap
context.heap = Some(context::memory::Memory::new(
VirtualAddress::new(::USER_HEAP_OFFSET),
VirtualAddress::new(crate::USER_HEAP_OFFSET),
0,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
@@ -734,16 +734,16 @@ fn fexec_noreturn(
// Map stack
context.stack = Some(context::memory::Memory::new(
VirtualAddress::new(::USER_STACK_OFFSET),
::USER_STACK_SIZE,
VirtualAddress::new(crate::USER_STACK_OFFSET),
crate::USER_STACK_SIZE,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
).to_shared());
// Map stack
context.sigstack = Some(context::memory::Memory::new(
VirtualAddress::new(::USER_SIGSTACK_OFFSET),
::USER_SIGSTACK_SIZE,
VirtualAddress::new(crate::USER_SIGSTACK_OFFSET),
crate::USER_SIGSTACK_SIZE,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
));
@@ -768,7 +768,7 @@ fn fexec_noreturn(
// Push content
for arg in iter.iter().rev() {
sp -= mem::size_of::<usize>();
unsafe { *(sp as *mut usize) = ::USER_ARG_OFFSET + arg_size; }
unsafe { *(sp as *mut usize) = crate::USER_ARG_OFFSET + arg_size; }
arg_size += arg.len() + 1;
}
@@ -780,7 +780,7 @@ fn fexec_noreturn(
if arg_size > 0 {
let mut memory = context::memory::Memory::new(
VirtualAddress::new(::USER_ARG_OFFSET),
VirtualAddress::new(crate::USER_ARG_OFFSET),
arg_size,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
true
@@ -790,13 +790,13 @@ fn fexec_noreturn(
for arg in vars.iter().rev().chain(args.iter().rev()) {
unsafe {
intrinsics::copy(arg.as_ptr(),
(::USER_ARG_OFFSET + arg_offset) as *mut u8,
(crate::USER_ARG_OFFSET + arg_offset) as *mut u8,
arg.len());
}
arg_offset += arg.len();
unsafe {
*((::USER_ARG_OFFSET + arg_offset) as *mut u8) = 0;
*((crate::USER_ARG_OFFSET + arg_offset) as *mut u8) = 0;
}
arg_offset += 1;
}

View File

@@ -1,8 +1,8 @@
use time;
use context;
use syscall::data::TimeSpec;
use syscall::error::*;
use syscall::flag::{CLOCK_REALTIME, CLOCK_MONOTONIC};
use crate::time;
use crate::context;
use crate::syscall::data::TimeSpec;
use crate::syscall::error::*;
use crate::syscall::flag::{CLOCK_REALTIME, CLOCK_MONOTONIC};
pub fn clock_gettime(clock: usize, time: &mut TimeSpec) -> Result<usize> {
let arch_time = match clock {

View File

@@ -1,8 +1,8 @@
use core::{mem, slice};
use paging::{ActivePageTable, Page, VirtualAddress};
use paging::entry::EntryFlags;
use syscall::error::*;
use crate::paging::{ActivePageTable, Page, VirtualAddress};
use crate::paging::entry::EntryFlags;
use crate::syscall::error::*;
fn validate(address: usize, size: usize, flags: EntryFlags) -> Result<()> {
let end_offset = size.checked_sub(1).ok_or(Error::new(EFAULT))?;

View File

@@ -24,6 +24,6 @@ fn stdio() {
/// Test that invalid reads/writes cause errors
#[test]
fn invalid_path() {
assert_eq!(syscall::read(999, &mut []), Err(Error::new(EBADF)));
assert_eq!(syscall::write(999, &[]), Err(Error::new(EBADF)));
assert_eq!(syscall::read(999, &mut []), Err(Error::new(syscall::EBADF)));
assert_eq!(syscall::write(999, &[]), Err(Error::new(syscall::EBADF)));
}