From 02c37d3fae3e60a4e2fa27485d990d6d3a830e97 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Tue, 12 Jan 2021 20:36:33 -0700 Subject: [PATCH 01/55] WIP: aarch64 rebase --- .gitmodules | 3 +- Cargo.lock | 32 ++ Cargo.toml | 11 +- build.rs | 10 + linkers/aarch64.ld | 60 +++ rmm | 2 +- src/arch/aarch64/consts.rs | 112 ++++ src/arch/aarch64/debug.rs | 48 ++ src/arch/aarch64/device/cpu/mod.rs | 205 ++++++++ .../device/cpu/registers/control_regs.rs | 85 +++ src/arch/aarch64/device/cpu/registers/mod.rs | 2 + src/arch/aarch64/device/cpu/registers/tlb.rs | 9 + src/arch/aarch64/device/generic_timer.rs | 80 +++ src/arch/aarch64/device/gic.rs | 182 +++++++ src/arch/aarch64/device/mod.rs | 25 + src/arch/aarch64/device/rtc.rs | 59 +++ src/arch/aarch64/device/serial.rs | 38 ++ src/arch/aarch64/device/uart_pl011.rs | 170 ++++++ src/arch/aarch64/init/device_tree/mod.rs | 127 +++++ src/arch/aarch64/init/mod.rs | 1 + src/arch/aarch64/init/pre_kstart/early_init.S | 51 ++ .../pre_kstart/helpers/build_page_tables.S | 249 +++++++++ .../aarch64/init/pre_kstart/helpers/consts.h | 26 + .../pre_kstart/helpers/post_mmu_enabled.S | 95 ++++ .../init/pre_kstart/helpers/pre_mmu_enabled.S | 66 +++ .../aarch64/init/pre_kstart/helpers/vectors.S | 101 ++++ src/arch/aarch64/interrupt/handler.rs | 116 +++++ src/arch/aarch64/interrupt/irq.rs | 175 +++++++ src/arch/aarch64/interrupt/mod.rs | 73 +++ src/arch/aarch64/interrupt/syscall.rs | 197 +++++++ src/arch/aarch64/interrupt/trace.rs | 144 ++++++ .../aarch64/interrupt/unhandled_exceptions.rs | 145 ++++++ src/arch/aarch64/ipi.rs | 24 + src/arch/aarch64/macros.rs | 16 + src/arch/aarch64/mod.rs | 31 ++ src/arch/aarch64/paging/entry.rs | 163 ++++++ src/arch/aarch64/paging/mapper.rs | 350 +++++++++++++ src/arch/aarch64/paging/mod.rs | 483 ++++++++++++++++++ src/arch/aarch64/paging/table.rs | 161 ++++++ src/arch/aarch64/paging/temporary_page.rs | 45 ++ src/arch/aarch64/rmm.rs | 292 +++++++++++ src/arch/aarch64/start.rs | 191 +++++++ src/arch/aarch64/stop.rs | 21 + src/arch/mod.rs | 8 +- src/arch/x86_64/paging/mod.rs | 4 +- src/context/arch/aarch64.rs | 359 +++++++++++++ src/context/mod.rs | 5 + src/context/switch.rs | 10 +- src/devices/mod.rs | 2 +- src/elf.rs | 5 +- src/lib.rs | 3 + src/scheme/proc.rs | 14 + src/scheme/sys/mod.rs | 1 + src/syscall/driver.rs | 7 + src/syscall/mod.rs | 22 +- src/syscall/process.rs | 1 + syscall | 2 +- 57 files changed, 4901 insertions(+), 18 deletions(-) create mode 100644 linkers/aarch64.ld create mode 100644 src/arch/aarch64/consts.rs create mode 100644 src/arch/aarch64/debug.rs create mode 100644 src/arch/aarch64/device/cpu/mod.rs create mode 100644 src/arch/aarch64/device/cpu/registers/control_regs.rs create mode 100644 src/arch/aarch64/device/cpu/registers/mod.rs create mode 100644 src/arch/aarch64/device/cpu/registers/tlb.rs create mode 100644 src/arch/aarch64/device/generic_timer.rs create mode 100644 src/arch/aarch64/device/gic.rs create mode 100644 src/arch/aarch64/device/mod.rs create mode 100644 src/arch/aarch64/device/rtc.rs create mode 100644 src/arch/aarch64/device/serial.rs create mode 100644 src/arch/aarch64/device/uart_pl011.rs create mode 100644 src/arch/aarch64/init/device_tree/mod.rs create mode 100644 src/arch/aarch64/init/mod.rs create mode 100644 src/arch/aarch64/init/pre_kstart/early_init.S create mode 100644 src/arch/aarch64/init/pre_kstart/helpers/build_page_tables.S create mode 100644 src/arch/aarch64/init/pre_kstart/helpers/consts.h create mode 100644 src/arch/aarch64/init/pre_kstart/helpers/post_mmu_enabled.S create mode 100644 src/arch/aarch64/init/pre_kstart/helpers/pre_mmu_enabled.S create mode 100644 src/arch/aarch64/init/pre_kstart/helpers/vectors.S create mode 100644 src/arch/aarch64/interrupt/handler.rs create mode 100644 src/arch/aarch64/interrupt/irq.rs create mode 100644 src/arch/aarch64/interrupt/mod.rs create mode 100644 src/arch/aarch64/interrupt/syscall.rs create mode 100644 src/arch/aarch64/interrupt/trace.rs create mode 100644 src/arch/aarch64/interrupt/unhandled_exceptions.rs create mode 100644 src/arch/aarch64/ipi.rs create mode 100644 src/arch/aarch64/macros.rs create mode 100644 src/arch/aarch64/mod.rs create mode 100644 src/arch/aarch64/paging/entry.rs create mode 100644 src/arch/aarch64/paging/mapper.rs create mode 100644 src/arch/aarch64/paging/mod.rs create mode 100644 src/arch/aarch64/paging/table.rs create mode 100644 src/arch/aarch64/paging/temporary_page.rs create mode 100644 src/arch/aarch64/rmm.rs create mode 100644 src/arch/aarch64/start.rs create mode 100644 src/arch/aarch64/stop.rs create mode 100644 src/context/arch/aarch64.rs diff --git a/.gitmodules b/.gitmodules index e9b7384..afeb335 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,10 +1,11 @@ [submodule "syscall"] path = syscall url = https://gitlab.redox-os.org/redox-os/syscall.git + branch = aarch64-rebase [submodule "slab_allocator"] path = slab_allocator url = https://gitlab.redox-os.org/redox-os/slab_allocator [submodule "rmm"] path = rmm url = https://gitlab.redox-os.org/redox-os/rmm.git - branch = master + branch = aarch64-rebase diff --git a/Cargo.lock b/Cargo.lock index 7db8632..d887aa2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5,11 +5,21 @@ name = "bit_field" version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "bitfield" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "bitflags" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "byteorder" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "cc" version = "1.0.66" @@ -20,6 +30,14 @@ name = "cfg-if" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "fdt" +version = "0.1.0" +source = "git+https://gitlab.redox-os.org/thomhuds/fdt.git#baca9b0070c281dc99521ee901efcb10e5f84218" +dependencies = [ + "byteorder 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "goblin" version = "0.2.3" @@ -33,7 +51,11 @@ dependencies = [ name = "kernel" version = "0.1.54" dependencies = [ + "bitfield 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)", + "fdt 0.1.0 (git+https://gitlab.redox-os.org/thomhuds/fdt.git)", "goblin 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "linked_list_allocator 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -41,6 +63,7 @@ dependencies = [ "raw-cpuid 8.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.2.4", "rmm 0.1.0", + "rustc-cfg 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-demangle 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", "slab_allocator 0.3.1", "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -137,6 +160,11 @@ dependencies = [ name = "rmm" version = "0.1.0" +[[package]] +name = "rustc-cfg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "rustc-demangle" version = "0.1.18" @@ -211,9 +239,12 @@ dependencies = [ [metadata] "checksum bit_field 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4" +"checksum bitfield 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46afbd2983a5d5a7bd740ccb198caf5b82f45c40c09c0eed36052d91cb92e719" "checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +"checksum byteorder 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" "checksum cc 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)" = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" "checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +"checksum fdt 0.1.0 (git+https://gitlab.redox-os.org/thomhuds/fdt.git)" = "" "checksum goblin 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d20fd25aa456527ce4f544271ae4fea65d2eda4a6561ea56f39fb3ee4f7e3884" "checksum linked_list_allocator 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "47de1a43fad0250ee197e9e124e5b5deab3d7b39d4428ae8a6d741ceb340c362" "checksum linked_list_allocator 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)" = "822add9edb1860698b79522510da17bef885171f75aa395cff099d770c609c24" @@ -225,6 +256,7 @@ dependencies = [ "checksum proc-macro-hack 0.5.19 (registry+https://github.com/rust-lang/crates.io-index)" = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" "checksum raw-cpuid 7.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "beb71f708fe39b2c5e98076204c3cc094ee5a4c12c4cdb119a2b72dc34164f41" "checksum raw-cpuid 8.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1fdf7d9dbd43f3d81d94a49c1c3df73cc2b3827995147e6cf7f89d4ec5483e73" +"checksum rustc-cfg 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "56a596b5718bf5e059d59a30af12f7f462a152de147aa462b70892849ee18704" "checksum rustc-demangle 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" "checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" diff --git a/Cargo.toml b/Cargo.toml index eb4673e..77990ef 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,12 @@ name = "kernel" path = "src/lib.rs" crate-type = ["staticlib"] +[build-dependencies] +cc = "1.0.3" +rustc-cfg = "0.3.0" + [dependencies] +bitfield = "0.13.1" bitflags = "1.2.1" linked_list_allocator = "0.8.4" log = { version = "0.4" } @@ -28,12 +33,16 @@ features = ["elf32", "elf64"] version = "0.1.16" default-features = false +[target.'cfg(target_arch = "aarch64")'.dependencies] +byteorder = { version = "1", default-features = false } +fdt = { git = "https://gitlab.redox-os.org/thomhuds/fdt.git", default-features = false } + [target.'cfg(target_arch = "x86_64")'.dependencies] raw-cpuid = "8.0.0" x86 = { version = "0.32.0", default-features = false } [features] -default = ["acpi", "multi_core", "serial_debug"] +default = ["serial_debug"] acpi = [] doc = [] graphical_debug = [] diff --git a/build.rs b/build.rs index a0f1d4d..8ca068f 100644 --- a/build.rs +++ b/build.rs @@ -1,3 +1,4 @@ +use rustc_cfg::Cfg; use std::collections::HashMap; use std::env; use std::fs; @@ -158,4 +159,13 @@ mod gen { ", ) .unwrap(); + + // Build pre kstart init asm code for aarch64 + let cfg = Cfg::new(env::var_os("TARGET").unwrap()).unwrap(); + if cfg.target_arch == "aarch64" { + println!("cargo:rerun-if-changed=src/arch/aarch64/init/pre_kstart/early_init.S"); + cc::Build::new() + .file("src/arch/aarch64/init/pre_kstart/early_init.S") + .compile("early_init"); + } } diff --git a/linkers/aarch64.ld b/linkers/aarch64.ld new file mode 100644 index 0000000..bb23c80 --- /dev/null +++ b/linkers/aarch64.ld @@ -0,0 +1,60 @@ +ENTRY(early_init) +OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64", "elf64-littleaarch64") + +KERNEL_OFFSET = 0xffffff0000000000; + +SECTIONS { + . = KERNEL_OFFSET; + + . += SIZEOF_HEADERS; + . = ALIGN(4096); + + .text : AT(ADDR(.text) - KERNEL_OFFSET) { + __text_start = .; + *(.early_init.text*) + . = ALIGN(4096); + *(.text*) + . = ALIGN(4096); + __text_end = .; + } + + .rodata : AT(ADDR(.rodata) - KERNEL_OFFSET) { + __rodata_start = .; + *(.rodata*) + . = ALIGN(4096); + __rodata_end = .; + } + + .data : AT(ADDR(.data) - KERNEL_OFFSET) { + __data_start = .; + *(.data*) + . = ALIGN(4096); + __data_end = .; + __bss_start = .; + *(.bss*) + . = ALIGN(4096); + __bss_end = .; + } + + .tdata : AT(ADDR(.tdata) - KERNEL_OFFSET) { + __tdata_start = .; + *(.tdata*) + . = ALIGN(4096); + __tdata_end = .; + __tbss_start = .; + *(.tbss*) + . += 8; + . = ALIGN(4096); + __tbss_end = .; + } + + __end = .; + + /DISCARD/ : { + *(.comment*) + *(.eh_frame*) + *(.gcc_except_table*) + *(.note*) + *(.rel.eh_frame*) + } +} diff --git a/rmm b/rmm index cdbeecf..132d91d 160000 --- a/rmm +++ b/rmm @@ -1 +1 @@ -Subproject commit cdbeecfffedf802a6fd61d93b767ff273c055d80 +Subproject commit 132d91d3aaa624d1bc8709555a64ff289f7d5e4f diff --git a/src/arch/aarch64/consts.rs b/src/arch/aarch64/consts.rs new file mode 100644 index 0000000..59c5f0c --- /dev/null +++ b/src/arch/aarch64/consts.rs @@ -0,0 +1,112 @@ +// Because the memory map is so important to not be aliased, it is defined here, in one place +// The lower 256 PML4 entries are reserved for userspace +// Each PML4 entry references up to 512 GB of memory +// The top (511) PML4 is reserved for recursive mapping +// The second from the top (510) PML4 is reserved for the kernel + /// The size of a single PML4 + pub const PML4_SIZE: usize = 0x0000_0080_0000_0000; + pub const PML4_MASK: usize = 0x0000_ff80_0000_0000; + + /// Size of a page and frame + pub const PAGE_SIZE: usize = 4096; + + /// Offset of recursive paging + pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize; + pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK)/PML4_SIZE; + + /// Offset of kernel + pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE; + pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK)/PML4_SIZE; + + /// Kernel stack size - must be kept in sync with early_init.S. Used by memory::init + pub const KERNEL_STACK_SIZE: usize = PAGE_SIZE; + + /// Offset to kernel heap + pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE; + pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK)/PML4_SIZE; + + /// Size of kernel heap + pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB + + /// Offset of device map region + pub const KERNEL_DEVMAP_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE; + + /// Offset of environment region + pub const KERNEL_ENV_OFFSET: usize = KERNEL_DEVMAP_OFFSET - PML4_SIZE; + + /// Offset of temporary mapping for misc kernel bring-up actions + pub const KERNEL_TMP_MISC_OFFSET: usize = KERNEL_ENV_OFFSET - PML4_SIZE; + + /// Offset of FDT DTB image + pub const KERNEL_DTB_OFFSET: usize = KERNEL_TMP_MISC_OFFSET - PML4_SIZE; + pub const KERNEL_DTB_MAX_SIZE: usize = 2 * 1024 * 1024; // 2 MB + + /// Offset to kernel percpu variables + //TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE; + pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_DTB_OFFSET - PML4_SIZE; + + /// Size of kernel percpu variables + pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB + + /// Offset to user image + pub const USER_OFFSET: usize = 0; + pub const USER_PML4: usize = (USER_OFFSET & PML4_MASK)/PML4_SIZE; + + /// Offset to user TCB + pub const USER_TCB_OFFSET: usize = 0xB000_0000; + + /// Offset to user arguments + pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE/2; + + /// Offset to user heap + pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE; + pub const USER_HEAP_PML4: usize = (USER_HEAP_OFFSET & PML4_MASK)/PML4_SIZE; + + /// Offset to user grants + pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE; + pub const USER_GRANT_PML4: usize = (USER_GRANT_OFFSET & PML4_MASK)/PML4_SIZE; + + /// Offset to user stack + pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE; + pub const USER_STACK_PML4: usize = (USER_STACK_OFFSET & PML4_MASK)/PML4_SIZE; + /// Size of user stack + pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB + + /// Offset to user sigstack + pub const USER_SIGSTACK_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE; + pub const USER_SIGSTACK_PML4: usize = (USER_SIGSTACK_OFFSET & PML4_MASK)/PML4_SIZE; + /// Size of user sigstack + pub const USER_SIGSTACK_SIZE: usize = 256 * 1024; // 256 KB + + /// Offset to user TLS + pub const USER_TLS_OFFSET: usize = USER_SIGSTACK_OFFSET + PML4_SIZE; + pub const USER_TLS_PML4: usize = (USER_TLS_OFFSET & PML4_MASK)/PML4_SIZE; + pub const USER_TLS_SIZE: usize = 64 * 1024; + + /// Offset to user temporary image (used when cloning) + pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE; + pub const USER_TMP_PML4: usize = (USER_TMP_OFFSET & PML4_MASK)/PML4_SIZE; + + /// Offset to user temporary heap (used when cloning) + pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE; + pub const USER_TMP_HEAP_PML4: usize = (USER_TMP_HEAP_OFFSET & PML4_MASK)/PML4_SIZE; + + /// Offset to user temporary page for grants + pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE; + pub const USER_TMP_GRANT_PML4: usize = (USER_TMP_GRANT_OFFSET & PML4_MASK)/PML4_SIZE; + + /// Offset to user temporary stack (used when cloning) + pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE; + pub const USER_TMP_STACK_PML4: usize = (USER_TMP_STACK_OFFSET & PML4_MASK)/PML4_SIZE; + + /// Offset to user temporary sigstack (used when cloning) + pub const USER_TMP_SIGSTACK_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE; + pub const USER_TMP_SIGSTACK_PML4: usize = (USER_TMP_SIGSTACK_OFFSET & PML4_MASK)/PML4_SIZE; + + /// Offset to user temporary tls (used when cloning) + pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_SIGSTACK_OFFSET + PML4_SIZE; + pub const USER_TMP_TLS_PML4: usize = (USER_TMP_TLS_OFFSET & PML4_MASK)/PML4_SIZE; + + /// Offset for usage in other temporary pages + pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE; + pub const USER_TMP_MISC_PML4: usize = (USER_TMP_MISC_OFFSET & PML4_MASK)/PML4_SIZE; diff --git a/src/arch/aarch64/debug.rs b/src/arch/aarch64/debug.rs new file mode 100644 index 0000000..24ff375 --- /dev/null +++ b/src/arch/aarch64/debug.rs @@ -0,0 +1,48 @@ +use core::fmt; +use spin::MutexGuard; + +use crate::log::{LOG, Log}; + +#[cfg(feature = "serial_debug")] +use super::device::{ + serial::COM1, + uart_pl011::SerialPort, +}; + +pub struct Writer<'a> { + log: MutexGuard<'a, Option>, + #[cfg(feature = "serial_debug")] + serial: MutexGuard<'a, Option>, +} + +impl<'a> Writer<'a> { + pub fn new() -> Writer<'a> { + Writer { + log: LOG.lock(), + #[cfg(feature = "serial_debug")] + serial: COM1.lock(), + } + } + + pub fn write(&mut self, buf: &[u8]) { + { + if let Some(ref mut log) = *self.log { + log.write(buf); + } + } + + #[cfg(feature = "serial_debug")] + { + if let Some(ref mut serial) = *self.serial { + serial.write(buf); + } + } + } +} + +impl<'a> fmt::Write for Writer<'a> { + fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { + self.write(s.as_bytes()); + Ok(()) + } +} diff --git a/src/arch/aarch64/device/cpu/mod.rs b/src/arch/aarch64/device/cpu/mod.rs new file mode 100644 index 0000000..d13e46a --- /dev/null +++ b/src/arch/aarch64/device/cpu/mod.rs @@ -0,0 +1,205 @@ +use core::fmt::{Result, Write}; + +use crate::device::cpu::registers::{control_regs}; + +pub mod registers; + +bitfield! { + pub struct MachineId(u32); + get_implementer, _: 31, 24; + get_variant, _: 23, 20; + get_architecture, _: 19, 16; + get_part_number, _: 15, 4; + get_revision, _: 3, 0; +} + +enum ImplementerID { + Unknown, + Arm, + Broadcom, + Cavium, + Digital, + Infineon, + Motorola, + Nvidia, + AMCC, + Qualcomm, + Marvell, + Intel, +} + +const IMPLEMENTERS: [&'static str; 12] = [ + "Unknown", + "Arm", + "Broadcom", + "Cavium", + "Digital", + "Infineon", + "Motorola", + "Nvidia", + "AMCC", + "Qualcomm", + "Marvell", + "Intel", +]; + + +enum VariantID { + Unknown, +} + +const VARIANTS: [&'static str; 1] = [ + "Unknown", +]; + +enum ArchitectureID { + Unknown, + V4, + V4T, + V5, + V5T, + V5TE, + V5TEJ, + V6, +} + +const ARCHITECTURES: [&'static str; 8] = [ + "Unknown", + "v4", + "v4T", + "v5", + "v5T", + "v5TE", + "v5TEJ", + "v6", +]; + +enum PartNumberID { + Unknown, + Thunder, + Foundation, + CortexA35, + CortexA53, + CortexA55, + CortexA57, + CortexA72, + CortexA73, + CortexA75, +} + +const PART_NUMBERS: [&'static str; 10] = [ + "Unknown", + "Thunder", + "Foundation", + "Cortex-A35", + "Cortex-A53", + "Cortex-A55", + "Cortex-A57", + "Cortex-A72", + "Cortex-A73", + "Cortex-A75", +]; + +enum RevisionID { + Unknown, + Thunder1_0, + Thunder1_1, +} + +const REVISIONS: [&'static str; 3] = [ + "Unknown", + "Thunder-1.0", + "Thunder-1.1", +]; + +struct CpuInfo { + implementer: &'static str, + variant: &'static str, + architecture: &'static str, + part_number: &'static str, + revision: &'static str, +} + +impl CpuInfo { + fn new() -> CpuInfo { + let midr = unsafe { control_regs::midr() }; + println!("MIDR: 0x{:x}", midr); + let midr = MachineId(midr); + + let implementer = match midr.get_implementer() { + 0x41 => IMPLEMENTERS[ImplementerID::Arm as usize], + 0x42 => IMPLEMENTERS[ImplementerID::Broadcom as usize], + 0x43 => IMPLEMENTERS[ImplementerID::Cavium as usize], + 0x44 => IMPLEMENTERS[ImplementerID::Digital as usize], + 0x49 => IMPLEMENTERS[ImplementerID::Infineon as usize], + 0x4d => IMPLEMENTERS[ImplementerID::Motorola as usize], + 0x4e => IMPLEMENTERS[ImplementerID::Nvidia as usize], + 0x50 => IMPLEMENTERS[ImplementerID::AMCC as usize], + 0x51 => IMPLEMENTERS[ImplementerID::Qualcomm as usize], + 0x56 => IMPLEMENTERS[ImplementerID::Marvell as usize], + 0x69 => IMPLEMENTERS[ImplementerID::Intel as usize], + _ => IMPLEMENTERS[ImplementerID::Unknown as usize], + }; + + let variant = match midr.get_variant() { + _ => VARIANTS[VariantID::Unknown as usize], + }; + + let architecture = match midr.get_architecture() { + 0b0001 => ARCHITECTURES[ArchitectureID::V4 as usize], + 0b0010 => ARCHITECTURES[ArchitectureID::V4T as usize], + 0b0011 => ARCHITECTURES[ArchitectureID::V5 as usize], + 0b0100 => ARCHITECTURES[ArchitectureID::V5T as usize], + 0b0101 => ARCHITECTURES[ArchitectureID::V5TE as usize], + 0b0110 => ARCHITECTURES[ArchitectureID::V5TEJ as usize], + 0b0111 => ARCHITECTURES[ArchitectureID::V6 as usize], + _ => ARCHITECTURES[ArchitectureID::Unknown as usize], + }; + + let part_number = match midr.get_part_number() { + 0x0a1 => PART_NUMBERS[PartNumberID::Thunder as usize], + 0xd00 => PART_NUMBERS[PartNumberID::Foundation as usize], + 0xd04 => PART_NUMBERS[PartNumberID::CortexA35 as usize], + 0xd03 => PART_NUMBERS[PartNumberID::CortexA53 as usize], + 0xd05 => PART_NUMBERS[PartNumberID::CortexA55 as usize], + 0xd07 => PART_NUMBERS[PartNumberID::CortexA57 as usize], + 0xd08 => PART_NUMBERS[PartNumberID::CortexA72 as usize], + 0xd09 => PART_NUMBERS[PartNumberID::CortexA73 as usize], + 0xd0a => PART_NUMBERS[PartNumberID::CortexA75 as usize], + _ => PART_NUMBERS[PartNumberID::Unknown as usize], + }; + + let revision = match part_number { + "Thunder" => { + let val = match midr.get_revision() { + 0x00 => REVISIONS[RevisionID::Thunder1_0 as usize], + 0x01 => REVISIONS[RevisionID::Thunder1_1 as usize], + _ => REVISIONS[RevisionID::Unknown as usize], + }; + val + }, + _ => REVISIONS[RevisionID::Unknown as usize], + }; + + CpuInfo { + implementer, + variant, + architecture, + part_number, + revision, + } + } +} + +pub fn cpu_info(w: &mut W) -> Result { + let cpuinfo = CpuInfo::new(); + + write!(w, "Implementer: {}\n", cpuinfo.implementer)?; + write!(w, "Variant: {}\n", cpuinfo.variant)?; + write!(w, "Architecture version: {}\n", cpuinfo.architecture)?; + write!(w, "Part Number: {}\n", cpuinfo.part_number)?; + write!(w, "Revision: {}\n", cpuinfo.revision)?; + write!(w, "\n")?; + + Ok(()) +} diff --git a/src/arch/aarch64/device/cpu/registers/control_regs.rs b/src/arch/aarch64/device/cpu/registers/control_regs.rs new file mode 100644 index 0000000..5c02d43 --- /dev/null +++ b/src/arch/aarch64/device/cpu/registers/control_regs.rs @@ -0,0 +1,85 @@ +//! Functions to read and write control registers. + +bitflags! { + pub struct MairEl1: u64 { + const DEVICE_MEMORY = 0x00; + const NORMAL_UNCACHED_MEMORY = 0x44 << 8; + const NORMAL_WRITEBACK_MEMORY = 0xff << 16; + } +} + +pub unsafe fn ttbr0_el1() -> u64 { + let ret: u64; + llvm_asm!("mrs $0, ttbr0_el1" : "=r" (ret)); + ret +} + +pub unsafe fn ttbr0_el1_write(val: u64) { + llvm_asm!("msr ttbr0_el1, $0" :: "r" (val) : "memory"); +} + +pub unsafe fn ttbr1_el1() -> u64 { + let ret: u64; + llvm_asm!("mrs $0, ttbr1_el1" : "=r" (ret)); + ret +} + +pub unsafe fn ttbr1_el1_write(val: u64) { + llvm_asm!("msr ttbr1_el1, $0" :: "r" (val) : "memory"); +} + +pub unsafe fn mair_el1() -> MairEl1 { + let ret: u64; + llvm_asm!("mrs $0, mair_el1" : "=r" (ret)); + MairEl1::from_bits_truncate(ret) +} + +pub unsafe fn mair_el1_write(val: MairEl1) { + llvm_asm!("msr mair_el1, $0" :: "r" (val.bits()) : "memory"); +} + +pub unsafe fn tpidr_el0_write(val: u64) { + llvm_asm!("msr tpidr_el0, $0" :: "r" (val) : "memory"); +} + +pub unsafe fn tpidr_el1_write(val: u64) { + llvm_asm!("msr tpidr_el1, $0" :: "r" (val) : "memory"); +} + +pub unsafe fn esr_el1() -> u32 { + let ret: u32; + llvm_asm!("mrs $0, esr_el1" : "=r" (ret)); + ret +} + +pub unsafe fn cntfreq_el0() -> u32 { + let ret: u32; + llvm_asm!("mrs $0, cntfrq_el0" : "=r" (ret)); + ret +} + +pub unsafe fn tmr_ctrl() -> u32 { + let ret: u32; + llvm_asm!("mrs $0, cntp_ctl_el0" : "=r" (ret)); + ret +} + +pub unsafe fn tmr_ctrl_write(val: u32) { + llvm_asm!("msr cntp_ctl_el0, $0" :: "r" (val) : "memory"); +} + +pub unsafe fn tmr_tval() -> u32 { + let ret: u32; + llvm_asm!("mrs $0, cntp_tval_el0" : "=r" (ret)); + ret +} + +pub unsafe fn tmr_tval_write(val: u32) { + llvm_asm!("msr cntp_tval_el0, $0" :: "r" (val) : "memory"); +} + +pub unsafe fn midr() -> u32 { + let ret: u32; + llvm_asm!("mrs $0, midr_el1" : "=r" (ret)); + ret +} diff --git a/src/arch/aarch64/device/cpu/registers/mod.rs b/src/arch/aarch64/device/cpu/registers/mod.rs new file mode 100644 index 0000000..9d09aab --- /dev/null +++ b/src/arch/aarch64/device/cpu/registers/mod.rs @@ -0,0 +1,2 @@ +pub mod control_regs; +pub mod tlb; diff --git a/src/arch/aarch64/device/cpu/registers/tlb.rs b/src/arch/aarch64/device/cpu/registers/tlb.rs new file mode 100644 index 0000000..8630574 --- /dev/null +++ b/src/arch/aarch64/device/cpu/registers/tlb.rs @@ -0,0 +1,9 @@ +//! Functions to flush the translation lookaside buffer (TLB). + +pub unsafe fn flush(_addr: usize) { + llvm_asm!("tlbi vmalle1is"); +} + +pub unsafe fn flush_all() { + llvm_asm!("tlbi vmalle1is"); +} diff --git a/src/arch/aarch64/device/generic_timer.rs b/src/arch/aarch64/device/generic_timer.rs new file mode 100644 index 0000000..d8e6b6b --- /dev/null +++ b/src/arch/aarch64/device/generic_timer.rs @@ -0,0 +1,80 @@ +use crate::arch::device::gic; +use crate::device::cpu::registers::{control_regs}; + +bitflags! { + struct TimerCtrlFlags: u32 { + const ENABLE = 1 << 0; + const IMASK = 1 << 1; + const ISTATUS = 1 << 2; + } +} + +pub static mut GENTIMER: GenericTimer = GenericTimer { + clk_freq: 0, + reload_count: 0, +}; + +pub unsafe fn init() { + GENTIMER.init(); +} + +/* +pub unsafe fn clear_irq() { + GENTIMER.clear_irq(); +} + +pub unsafe fn reload() { + GENTIMER.reload_count(); +} +*/ + +pub struct GenericTimer { + pub clk_freq: u32, + pub reload_count: u32, +} + +impl GenericTimer { + pub fn init(&mut self) { + let clk_freq = unsafe { control_regs::cntfreq_el0() }; + self.clk_freq = clk_freq;; + self.reload_count = clk_freq / 100; + + unsafe { control_regs::tmr_tval_write(self.reload_count) }; + + let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() }); + ctrl.insert(TimerCtrlFlags::ENABLE); + ctrl.remove(TimerCtrlFlags::IMASK); + unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) }; + + gic::irq_enable(30); + } + + fn disable() { + let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() }); + ctrl.remove(TimerCtrlFlags::ENABLE); + unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) }; + } + + pub fn set_irq(&mut self) { + let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() }); + ctrl.remove(TimerCtrlFlags::IMASK); + unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) }; + } + + pub fn clear_irq(&mut self) { + let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() }); + + if ctrl.contains(TimerCtrlFlags::ISTATUS) { + ctrl.insert(TimerCtrlFlags::IMASK); + unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) }; + } + } + + pub fn reload_count(&mut self) { + let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() }); + ctrl.insert(TimerCtrlFlags::ENABLE); + ctrl.remove(TimerCtrlFlags::IMASK); + unsafe { control_regs::tmr_tval_write(self.reload_count) }; + unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) }; + } +} diff --git a/src/arch/aarch64/device/gic.rs b/src/arch/aarch64/device/gic.rs new file mode 100644 index 0000000..582854b --- /dev/null +++ b/src/arch/aarch64/device/gic.rs @@ -0,0 +1,182 @@ +use core::intrinsics::{volatile_load, volatile_store}; + +use crate::memory::Frame; +use crate::paging::{ActivePageTable, PhysicalAddress, Page, PageTableType, VirtualAddress}; +use crate::paging::entry::EntryFlags; + +static GICD_CTLR: u32 = 0x000; +static GICD_TYPER: u32 = 0x004; +static GICD_ISENABLER: u32 = 0x100; +static GICD_ICENABLER: u32 = 0x180; +static GICD_IPRIORITY: u32 = 0x400; +static GICD_ITARGETSR: u32 = 0x800; +static GICD_ICFGR: u32 = 0xc00; + +static GICC_EOIR: u32 = 0x0010; +static GICC_IAR: u32 = 0x000c; +static GICC_CTLR: u32 = 0x0000; +static GICC_PMR: u32 = 0x0004; + +static mut GIC_DIST_IF: GicDistIf = GicDistIf { + address: 0, + ncpus: 0, + nirqs: 0, +}; + +static mut GIC_CPU_IF: GicCpuIf = GicCpuIf { + address: 0, +}; + +pub unsafe fn init() { + GIC_DIST_IF.init(); + GIC_CPU_IF.init(); +} + +pub fn irq_enable(irq_num: u32) { + unsafe { GIC_DIST_IF.irq_enable(irq_num) }; +} + +pub fn irq_disable(irq_num: u32) { + unsafe { GIC_DIST_IF.irq_disable(irq_num) }; +} + +pub unsafe fn irq_ack() -> u32 { + GIC_CPU_IF.irq_ack() +} + +pub unsafe fn irq_eoi(irq_num: u32) { + GIC_CPU_IF.irq_eoi(irq_num); +} + +pub struct GicDistIf { + pub address: usize, + pub ncpus: u32, + pub nirqs: u32, +} + +impl GicDistIf { + unsafe fn init(&mut self) { + // Map in the Distributor interface + let mut active_table = ActivePageTable::new(/* TODO PageTableType::Kernel */); + + let start_frame = Frame::containing_address(PhysicalAddress::new(0x08000000)); + let end_frame = Frame::containing_address(PhysicalAddress::new(0x08000000 + 0x10000 - 1)); + for frame in Frame::range_inclusive(start_frame, end_frame) { + let page = Page::containing_address(VirtualAddress::new(frame.start_address().data() + crate::KERNEL_DEVMAP_OFFSET)); + let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE); + result.flush(&mut active_table); + } + + self.address = crate::KERNEL_DEVMAP_OFFSET + 0x08000000; + + // Map in CPU0's interface + let start_frame = Frame::containing_address(PhysicalAddress::new(0x08010000)); + let end_frame = Frame::containing_address(PhysicalAddress::new(0x08010000 + 0x10000 - 1)); + for frame in Frame::range_inclusive(start_frame, end_frame) { + let page = Page::containing_address(VirtualAddress::new(frame.start_address().data() + crate::KERNEL_DEVMAP_OFFSET)); + let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE); + result.flush(&mut active_table); + } + + GIC_CPU_IF.address = crate::KERNEL_DEVMAP_OFFSET + 0x08010000; + + // Disable IRQ Distribution + self.write(GICD_CTLR, 0); + + let typer = self.read(GICD_TYPER); + self.ncpus = ((typer & (0x7 << 5)) >> 5) + 1; + self.nirqs = ((typer & 0x1f) + 1) * 32; + println!("gic: Distributor supports {:?} CPUs and {:?} IRQs", self.ncpus, self.nirqs); + + // Set all SPIs to level triggered + for irq in (32..self.nirqs).step_by(16) { + self.write(GICD_ICFGR + ((irq / 16) * 4), 0); + } + + // Disable all SPIs + for irq in (32..self.nirqs).step_by(32) { + self.write(GICD_ICENABLER + ((irq / 32) * 4), 0xffff_ffff); + } + + // Affine all SPIs to CPU0 and set priorities for all IRQs + for irq in 0..self.nirqs { + if irq > 31 { + let ext_offset = GICD_ITARGETSR + (4 * (irq / 4)); + let int_offset = irq % 4; + let mut val = self.read(ext_offset); + val |= 0b0000_0001 << (8 * int_offset); + self.write(ext_offset, val); + } + + let ext_offset = GICD_IPRIORITY + (4 * (irq / 4)); + let int_offset = irq % 4; + let mut val = self.read(ext_offset); + val |= 0b0000_0000 << (8 * int_offset); + self.write(ext_offset, val); + } + + // Enable CPU0's GIC interface + GIC_CPU_IF.write(GICC_CTLR, 1); + + // Set CPU0's Interrupt Priority Mask + GIC_CPU_IF.write(GICC_PMR, 0xff); + + // Enable IRQ distribution + self.write(GICD_CTLR, 0x1); + } + + unsafe fn irq_enable(&mut self, irq: u32) { + let offset = GICD_ISENABLER + (4 * (irq / 32)); + let shift = 1 << (irq % 32); + let mut val = self.read(offset); + val |= shift; + self.write(offset, val); + } + + unsafe fn irq_disable(&mut self, irq: u32) { + let offset = GICD_ICENABLER + (4 * (irq / 32)); + let shift = 1 << (irq % 32); + let mut val = self.read(offset); + val |= shift; + self.write(offset, val); + } + + unsafe fn read(&self, reg: u32) -> u32 { + let val = volatile_load((self.address + reg as usize) as *const u32); + val + } + + unsafe fn write(&mut self, reg: u32, value: u32) { + volatile_store((self.address + reg as usize) as *mut u32, value); + } +} + +pub struct GicCpuIf { + pub address: usize, +} + +impl GicCpuIf { + unsafe fn init(&mut self) { + } + + unsafe fn irq_ack(&mut self) -> u32 { + let irq = self.read(GICC_IAR) & 0x1ff; + if irq == 1023 { + panic!("irq_ack: got ID 1023!!!"); + } + irq + } + + unsafe fn irq_eoi(&mut self, irq: u32) { + self.write(GICC_EOIR, irq); + } + + unsafe fn read(&self, reg: u32) -> u32 { + let val = volatile_load((self.address + reg as usize) as *const u32); + val + } + + unsafe fn write(&mut self, reg: u32, value: u32) { + volatile_store((self.address + reg as usize) as *mut u32, value); + } +} diff --git a/src/arch/aarch64/device/mod.rs b/src/arch/aarch64/device/mod.rs new file mode 100644 index 0000000..bdd3169 --- /dev/null +++ b/src/arch/aarch64/device/mod.rs @@ -0,0 +1,25 @@ +use crate::paging::ActivePageTable; + +pub mod cpu; +pub mod gic; +pub mod generic_timer; +pub mod serial; +pub mod rtc; +pub mod uart_pl011; + +pub unsafe fn init(_active_table: &mut ActivePageTable) { + println!("GIC INIT"); + gic::init(); + println!("GIT INIT"); + generic_timer::init(); +} + +pub unsafe fn init_noncore() { + println!("SERIAL INIT"); + serial::init(); + println!("RTC INIT"); + rtc::init(); +} + +pub unsafe fn init_ap() { +} diff --git a/src/arch/aarch64/device/rtc.rs b/src/arch/aarch64/device/rtc.rs new file mode 100644 index 0000000..de95ab4 --- /dev/null +++ b/src/arch/aarch64/device/rtc.rs @@ -0,0 +1,59 @@ +use core::intrinsics::{volatile_load, volatile_store}; + +use crate::memory::Frame; +use crate::paging::{ActivePageTable, PhysicalAddress, Page, PageTableType, VirtualAddress}; +use crate::paging::entry::EntryFlags; +use crate::time; + +static RTC_DR: u32 = 0x000; +static RTC_MR: u32 = 0x004; +static RTC_LR: u32 = 0x008; +static RTC_CR: u32 = 0x00c; +static RTC_IMSC: u32 = 0x010; +static RTC_RIS: u32 = 0x014; +static RTC_MIS: u32 = 0x018; +static RTC_ICR: u32 = 0x01c; + +static mut PL031_RTC: Pl031rtc = Pl031rtc { + address: 0, +}; + +pub unsafe fn init() { + PL031_RTC.init(); + time::START.lock().0 = PL031_RTC.time(); +} + +struct Pl031rtc { + pub address: usize, +} + +impl Pl031rtc { + unsafe fn init(&mut self) { + let mut active_table = ActivePageTable::new(/* TODO PageTableType::Kernel */); + + let start_frame = Frame::containing_address(PhysicalAddress::new(0x09010000)); + let end_frame = Frame::containing_address(PhysicalAddress::new(0x09010000 + 0x1000 - 1)); + + for frame in Frame::range_inclusive(start_frame, end_frame) { + let page = Page::containing_address(VirtualAddress::new(frame.start_address().data() + crate::KERNEL_DEVMAP_OFFSET)); + let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE); + result.flush(&mut active_table); + } + + self.address = crate::KERNEL_DEVMAP_OFFSET + 0x09010000; + } + + unsafe fn read(&self, reg: u32) -> u32 { + let val = volatile_load((self.address + reg as usize) as *const u32); + val + } + + unsafe fn write(&mut self, reg: u32, value: u32) { + volatile_store((self.address + reg as usize) as *mut u32, value); + } + + pub fn time(&mut self) -> u64 { + let seconds = unsafe { self.read(RTC_DR) } as u64; + seconds + } +} diff --git a/src/arch/aarch64/device/serial.rs b/src/arch/aarch64/device/serial.rs new file mode 100644 index 0000000..a852fde --- /dev/null +++ b/src/arch/aarch64/device/serial.rs @@ -0,0 +1,38 @@ +use core::sync::atomic::{Ordering}; +use spin::Mutex; + +use crate::device::uart_pl011::SerialPort; +use crate::init::device_tree; +use crate::memory::Frame; +use crate::paging::mapper::{MapperFlushAll, MapperType}; +use crate::paging::{ActivePageTable, Page, PageTableType, PhysicalAddress, VirtualAddress}; +use crate::paging::entry::EntryFlags; + +pub static COM1: Mutex> = Mutex::new(None); + +pub unsafe fn init() { + if let Some(ref mut serial_port) = *COM1.lock() { + return; + } + let (base, size) = device_tree::diag_uart_range(crate::KERNEL_DTB_OFFSET, crate::KERNEL_DTB_MAX_SIZE).unwrap(); + + let mut active_ktable = unsafe { ActivePageTable::new(/* TODO PageTableType::Kernel */) }; + let mut flush_all = MapperFlushAll::new(); + + let start_frame = Frame::containing_address(PhysicalAddress::new(base)); + let end_frame = Frame::containing_address(PhysicalAddress::new(base + size - 1)); + for frame in Frame::range_inclusive(start_frame, end_frame) { + let page = Page::containing_address(VirtualAddress::new(frame.start_address().data() + crate::KERNEL_DEVMAP_OFFSET)); + let result = active_ktable.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE); + flush_all.consume(result); + }; + flush_all.flush(&mut active_ktable); + + let start_frame = Frame::containing_address(PhysicalAddress::new(base)); + let vaddr = start_frame.start_address().data() + crate::KERNEL_DEVMAP_OFFSET; + + *COM1.lock() = Some(SerialPort::new(vaddr)); + if let Some(ref mut serial_port) = *COM1.lock() { + serial_port.init(true); + } +} diff --git a/src/arch/aarch64/device/uart_pl011.rs b/src/arch/aarch64/device/uart_pl011.rs new file mode 100644 index 0000000..ef9f778 --- /dev/null +++ b/src/arch/aarch64/device/uart_pl011.rs @@ -0,0 +1,170 @@ +use core::fmt::{self, Write}; +use core::ptr; + +use crate::device::gic; +use crate::scheme::debug::debug_input; + +bitflags! { + /// UARTFR + struct UartFrFlags: u16 { + const TXFE = 1 << 7; + const RXFF = 1 << 6; + const TXFF = 1 << 5; + const RXFE = 1 << 4; + const BUSY = 1 << 3; + } +} + +bitflags! { + /// UARTCR + struct UartCrFlags: u16 { + const RXE = 1 << 9; + const TXE = 1 << 8; + const UARTEN = 1 << 0; + } +} + +bitflags! { + // UARTIMSC + struct UartImscFlags: u16 { + const RTIM = 1 << 6; + const TXIM = 1 << 5; + const RXIM = 1 << 4; + } +} + +bitflags! { + // UARTICR + struct UartIcrFlags: u16 { + const RTIC = 1 << 6; + const TXIC = 1 << 5; + const RXIC = 1 << 4; + } +} + +bitflags! { + //UARTMIS + struct UartMisFlags: u16 { + const TXMIS = 1 << 5; + const RXMIS = 1 << 4; + } +} + +bitflags! { + //UARTLCR_H + struct UartLcrhFlags: u16 { + const FEN = 1 << 4; + } +} + +#[allow(dead_code)] +pub struct SerialPort { + base: usize, + data_reg: u8, + rcv_stat_reg: u8, + flag_reg: u8, + int_baud_reg: u8, + frac_baud_reg: u8, + line_ctrl_reg: u8, + ctrl_reg: u8, + intr_fifo_ls_reg: u8, + intr_mask_setclr_reg: u8, + raw_intr_stat_reg: u8, + masked_intr_stat_reg: u8, + intr_clr_reg: u8, + dma_ctrl_reg: u8 +} + +impl SerialPort { + pub const fn new(base: usize) -> SerialPort { + SerialPort { + base: base, + data_reg: 0x00, + rcv_stat_reg: 0x04, + flag_reg: 0x18, + int_baud_reg: 0x24, + frac_baud_reg: 0x28, + line_ctrl_reg: 0x2c, + ctrl_reg: 0x30, + intr_fifo_ls_reg: 0x34, + intr_mask_setclr_reg: 0x38, + raw_intr_stat_reg: 0x3c, + masked_intr_stat_reg: 0x40, + intr_clr_reg: 0x44, + dma_ctrl_reg: 0x48, + } + } + + pub fn read_reg(&self, register: u8) -> u16 { + unsafe { ptr::read_volatile((self.base + register as usize) as *mut u16) } + } + + pub fn write_reg(&self, register: u8, data: u16) { + unsafe { ptr::write_volatile((self.base + register as usize) as *mut u16, data); } + } + + pub fn init(&mut self, with_irq: bool) { + // Enable RX, TX, UART + let flags = UartCrFlags::RXE | UartCrFlags::TXE | UartCrFlags::UARTEN; + self.write_reg(self.ctrl_reg, flags.bits()); + + // Disable FIFOs (use character mode instead) + let mut flags = UartLcrhFlags::from_bits_truncate(self.read_reg(self.line_ctrl_reg)); + flags.remove(UartLcrhFlags::FEN); + self.write_reg(self.line_ctrl_reg, flags.bits()); + + if with_irq { + // Enable IRQs + let flags = UartImscFlags::RXIM; + self.write_reg(self.intr_mask_setclr_reg, flags.bits); + + // Clear pending interrupts + self.write_reg(self.intr_clr_reg, 0x7ff); + + // Enable interrupt at GIC distributor + gic::irq_enable(33); + } + } + + fn line_sts(&self) -> UartFrFlags { + UartFrFlags::from_bits_truncate(self.read_reg(self.flag_reg)) + } + + pub fn receive(&mut self) { + while self.line_sts().contains(UartFrFlags::RXFF) { + debug_input(self.read_reg(self.data_reg) as u8); + } + } + + pub fn send(&mut self, data: u8) { + while ! self.line_sts().contains(UartFrFlags::TXFE) {} + self.write_reg(self.data_reg, data as u16); + } + + pub fn send_dbg(&mut self, data: u16) { + if self.base != 0 { + self.write_reg(self.data_reg, data); + } + } + + pub fn clear_all_irqs(&mut self) { + let flags = UartIcrFlags::RXIC; + self.write_reg(self.intr_clr_reg, flags.bits()); + } + + pub fn disable_irq(&mut self) { + self.write_reg(self.intr_mask_setclr_reg, 0); + } + + pub fn enable_irq(&mut self) { + let flags = UartImscFlags::RXIM; + self.write_reg(self.intr_mask_setclr_reg, flags.bits()); + } + + pub fn write(&mut self, buf: &[u8]) { + //TODO: some character conversion like in uart_16550.rs + for &b in buf { + self.send_dbg(b as u16); + } + } +} diff --git a/src/arch/aarch64/init/device_tree/mod.rs b/src/arch/aarch64/init/device_tree/mod.rs new file mode 100644 index 0000000..cff4047 --- /dev/null +++ b/src/arch/aarch64/init/device_tree/mod.rs @@ -0,0 +1,127 @@ +extern crate fdt; +extern crate byteorder; + +use alloc::vec::Vec; +use core::slice; +use crate::memory::MemoryArea; +use self::byteorder::{ByteOrder, BE}; + +pub static mut MEMORY_MAP: [MemoryArea; 512] = [MemoryArea { + base_addr: 0, + length: 0, + _type: 0, + acpi: 0, +}; 512]; + +fn root_cell_sz(dt: &fdt::DeviceTree) -> Option<(u32, u32)> { + let root_node = dt.nodes().nth(0).unwrap(); + let address_cells = root_node.properties().find(|p| p.name.contains("#address-cells")).unwrap(); + let size_cells = root_node.properties().find(|p| p.name.contains("#size-cells")).unwrap(); + + Some((BE::read_u32(&size_cells.data), BE::read_u32(&size_cells.data))) +} + +fn memory_ranges(dt: &fdt::DeviceTree, address_cells: usize, size_cells: usize, ranges: &mut [(usize, usize); 10]) -> usize { + + let memory_node = dt.find_node("/memory").unwrap(); + let reg = memory_node.properties().find(|p| p.name.contains("reg")).unwrap(); + let chunk_sz = (address_cells + size_cells) * 4; + let chunk_count = (reg.data.len() / chunk_sz); + let mut index = 0; + for chunk in reg.data.chunks(chunk_sz as usize) { + if index == chunk_count { + return index; + } + let (base, size) = chunk.split_at((address_cells * 4) as usize); + let mut b = 0; + for base_chunk in base.rchunks(4) { + b += BE::read_u32(base_chunk); + } + let mut s = 0; + for sz_chunk in size.rchunks(4) { + s += BE::read_u32(sz_chunk); + } + ranges[index] = (b as usize, s as usize); + index += 1; + } + index +} + +pub fn diag_uart_range(dtb_base: usize, dtb_size: usize) -> Option<(usize, usize)> { + let data = unsafe { slice::from_raw_parts(dtb_base as *const u8, dtb_size) }; + let dt = fdt::DeviceTree::new(data).unwrap(); + + let chosen_node = dt.find_node("/chosen").unwrap(); + let stdout_path = chosen_node.properties().find(|p| p.name.contains("stdout-path")).unwrap(); + let uart_node_name = core::str::from_utf8(stdout_path.data).unwrap() + .split('/') + .collect::>()[1].trim_end(); + let len = uart_node_name.len(); + let uart_node_name = &uart_node_name[0..len-1]; + let uart_node = dt.nodes().find(|n| n.name.contains(uart_node_name)).unwrap(); + let reg = uart_node.properties().find(|p| p.name.contains("reg")).unwrap(); + + let (address_cells, size_cells) = root_cell_sz(&dt).unwrap(); + let chunk_sz = (address_cells + size_cells) * 4; + let (base, size) = reg.data.split_at((address_cells * 4) as usize); + let mut b = 0; + for base_chunk in base.rchunks(4) { + b += BE::read_u32(base_chunk); + } + let mut s = 0; + for sz_chunk in size.rchunks(4) { + s += BE::read_u32(sz_chunk); + } + Some((b as usize, s as usize)) +} + +fn compatible_node_present<'a>(dt: &fdt::DeviceTree<'a>, compat_string: &str) -> bool { + for node in dt.nodes() { + if let Some(compatible) = node.properties().find(|p| p.name.contains("compatible")) { + let s = core::str::from_utf8(compatible.data).unwrap(); + if s.contains(compat_string) { + return true; + } + } + } + false +} + +pub fn fill_env_data(dtb_base: usize, dtb_size: usize, env_base: usize) -> usize { + let data = unsafe { slice::from_raw_parts(dtb_base as *const u8, dtb_size) }; + let dt = fdt::DeviceTree::new(data).unwrap(); + + let chosen_node = dt.find_node("/chosen").unwrap(); + if let Some(bootargs) = chosen_node.properties().find(|p| p.name.contains("bootargs")) { + let bootargs_len = bootargs.data.len(); + + let env_base_slice = unsafe { slice::from_raw_parts_mut(env_base as *mut u8, bootargs_len) }; + env_base_slice[..bootargs_len].clone_from_slice(bootargs.data); + + bootargs_len + } else { + 0 + } +} + +pub fn fill_memory_map(dtb_base: usize, dtb_size: usize) { + let data = unsafe { slice::from_raw_parts(dtb_base as *const u8, dtb_size) }; + let dt = fdt::DeviceTree::new(data).unwrap(); + + let (address_cells, size_cells) = root_cell_sz(&dt).unwrap(); + let mut ranges: [(usize, usize); 10] = [(0,0); 10]; + + let nranges = memory_ranges(&dt, address_cells as usize, size_cells as usize, &mut ranges); + + for index in (0..nranges) { + let (base, size) = ranges[index]; + unsafe { + MEMORY_MAP[index] = MemoryArea { + base_addr: base as u64, + length: size as u64, + _type: 1, + acpi: 0, + }; + } + } +} diff --git a/src/arch/aarch64/init/mod.rs b/src/arch/aarch64/init/mod.rs new file mode 100644 index 0000000..16a16cd --- /dev/null +++ b/src/arch/aarch64/init/mod.rs @@ -0,0 +1 @@ +pub mod device_tree; diff --git a/src/arch/aarch64/init/pre_kstart/early_init.S b/src/arch/aarch64/init/pre_kstart/early_init.S new file mode 100644 index 0000000..0718d16 --- /dev/null +++ b/src/arch/aarch64/init/pre_kstart/early_init.S @@ -0,0 +1,51 @@ +// Early initialisation for AArch64 systems. +// +// This code is responsible for taking over control of the boot CPU from +// the bootloader and setting up enough of the CPU so Rust code can take +// over (in kstart). +// +// Readers are recommended to refer to the Arm Architecture Reference Manual +// when studying this code. The latest version of the Arm Arm can be found at: +// +// https://developer.arm.com/products/architecture/cpu-architecture/a-profile/docs +// +// The code is structured such that different phases/functionality are +// in separate files included by this central one. +// +// This is hopefully easier to grok and study than one gigantic file. +// +// The emphasis is on clarity and not optimisation. Clarity is hard without +// a decent understanding of the Arm architecture. +// +// Optimisation is not too much of a concern given that this is boot code. +// That said, future revisions will aim to optimise. + +#include "helpers/consts.h" + +#include "helpers/pre_mmu_enabled.S" +#include "helpers/build_page_tables.S" +#include "helpers/post_mmu_enabled.S" +#include "helpers/vectors.S" + +// Entry point for the boot CPU. We assume that x0 contains the physical address of a DTB image +// passed in by the bootloader. +// +// Note that the kernel linker script arranges for this code to lie at the start of the kernel +// image. + + .text + .align 2 + .pushsection ".early_init.text", "ax" + .globl early_init +early_init: + bl early_setup + bl disable_mmu + bl create_page_tables + bl enable_mmu + b mmu_on_trampoline // With the mmu now on, this returns below to + // mmu_on using Virtual Addressing + +mmu_on: + bl setup_kstart_context // Setup environment for kstart + b kstart // Let the show begin! :) + .popsection diff --git a/src/arch/aarch64/init/pre_kstart/helpers/build_page_tables.S b/src/arch/aarch64/init/pre_kstart/helpers/build_page_tables.S new file mode 100644 index 0000000..5e141ed --- /dev/null +++ b/src/arch/aarch64/init/pre_kstart/helpers/build_page_tables.S @@ -0,0 +1,249 @@ + // Creates the following MMU mappings: + // + // 1. Identity mapping for the kernel (VA == PA) to be able to switch on the MMU + // 2. Mapping for the kernel with high VAs from KERNEL_OFFSET onwards + // 3. Mapping for the kernel stack + // 4. Mapping for the DTB Image + // 5. Optional Mapping for a diagnostic UART + +create_page_tables: + mov x22, x30 + adr x0, addr_marker // x0: Physical address of addr_marker + ldr x1, [x0] // x1: Virtual address of addr_marker + ldr x2, =KERNEL_OFFSET // x2: Virtual address of kernel base + sub x3, x1, x2 // x3: 'Distance' of addr_marker from kernel base + sub x0, x0, x3 // x0: Physical address of kernel base + mov x11,x0 // x11: Stash away the Physical address of the kernel image base + + ldr x1, =KERNEL_OFFSET // x1: Virtual address of kernel start addr + ldr x2, =__end // x2: Virtual address of kernel end addr + sub x12, x2, x1 // x12: Size of the kernel image + add x12, x12, #(0x200000) // x12: Align to 2MB (Add 2MB, then clear low bits if any) + and x3, x12, #0xffffffffffe00000 + cmp x12, #0x200, lsl #12 + csel x12, x3, x12, hi + add x13, x1, x12 // x13: Stack top vaddr (kbase.vaddr + ksize) + mov x14, #(EARLY_KSTACK_SIZE) // x14: Stack size + ldr x15, =KERNEL_OFFSET // x15: Kernel base vaddr + + // From this point on, the following registers are not to be modified for convenience: + // x11: PA of kernel image base + // x12: Kernel image size (2MB aligned) + // x13: VA of stack top + // x14: Stack size + // x15: VA of kernel Base + + // Zero out all the tables +zero_tables: + adr x0, identkmap_l0_ptable + mov x1, #(PAGE_SIZE) + mov x2, #(NUM_TABLES) // There are normally 12 tables to clear (2 L0, 5 L1, 5 L2, 1 env) + mul x1, x1, x2 + lsr x1, x1, #3 + mov x2, xzr +zero_loop: + str xzr, [x0, x2] + add x2, x2, #8 + cmp x1, x2 + b.ne zero_loop + + // Identity map the kernel + mov x0, x11 // x0: Paddr of kernel image base + mov x1, x11 // x1: Paddr of kernel image base + mov x2, x12 // x2: Kernel image size + mov x3, #(NORMAL_UNCACHED_MEM) // x3: Attributes to apply + adr x4, identkmap_l0_ptable // x5: Ptr to L0 table for identity mapping the kernel + adr x5, identkmap_l1_ptable // x6: Ptr to L1 table for identity mapping the kernel + adr x6, identkmap_l2_ptable // x7: Ptr to L2 table for identity mapping the kernel + bl build_map + + // Map the kernel + ldr x0, =KERNEL_OFFSET // x0: Vaddr of kernel base + mov x1, x11 // x1: Paddr of kernel base + mov x2, x12 // x2: Kernel image size + mov x3, #(NORMAL_CACHED_MEM) // x3: Attributes to apply + adr x4, kernmap_l0_ptable // x5: Ptr to L0 table for mapping the kernel + adr x5, kernmap_l1_ptable // x6: Ptr to L1 table for mapping the kernel + adr x6, kernmap_l2_ptable // x7: Ptr to L2 table for mapping the kernel + bl build_map + + // Map the kernel stack + ldr x0, =KERNEL_OFFSET // x0: Vaddr of kernel stack top + add x0, x0, x12 + sub x1, x11, x14 // x1: Paddr of kernel stack top (kbase.paddr - kstack size) + mov x2, #(EARLY_KSTACK_SIZE) // x2: Size of kernel stack + mov x3, #(NORMAL_CACHED_MEM) // x3: Attributes to apply + adr x4, kernmap_l0_ptable // x5: Ptr to the kernel L0 table + adr x5, kstack_l1_ptable // x6: Ptr to L1 table for mapping the kernel stack + adr x6, kstack_l2_ptable // x7: Ptr to L2 table for mapping the kernel stack + bl build_map + + // Map first GIGABYTE at PHYS_OFFSET + mov x1, #0 // x1: Physical address + adr x6, physmap_1gb_l2_ptable // x7: Ptr to L2 table + bl build_physmap + + // Map second GIGABYTE at PHYS_OFFSET + GIGABYTE + mov x1, #(GIGABYTE) // x1: Physical address + adr x6, physmap_2gb_l2_ptable // x7: Ptr to L2 table + bl build_physmap + + // Map third GIGABYTE at PHYS_OFFSET + 2*GIGABYTE + mov x1, #(2*GIGABYTE) // x1: Physical address + adr x6, physmap_3gb_l2_ptable // x7: Ptr to L2 table + bl build_physmap + + // Map fourth GIGABYTE at PHYS_OFFSET + 3*GIGABYTE + mov x1, #(3*GIGABYTE) // x1: Physical address + adr x6, physmap_4gb_l2_ptable // x7: Ptr to L2 table + bl build_physmap + + // Set up recursive paging for TTBR1 + + adr x0, kernmap_l0_ptable + add x1, x0, #(511 * 8) + orr x0, x0, #((DESC_TYPE_TABLE << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT)) + orr x0, x0, #(ACCESS_FLAG_BIT) + str x0, [x1] + + // Set up recursive paging for TTBR0 + + adr x0, identkmap_l0_ptable + add x1, x0, #(511 * 8) + orr x0, x0, #((DESC_TYPE_TABLE << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT)) + orr x0, x0, #(ACCESS_FLAG_BIT) + str x0, [x1] + + mov x30, x22 + + ret + +// Add a physmap entry +// x1: physical address, a multiple of GIGABYTE +// x6: address of l2 page table +build_physmap: + ldr x0, =DEVMAP_VBASE // x0: Virtual address + add x0, x0, x1 + mov x2, #(GIGABYTE - 1) // x2: Size (minus one to work around errors) + mov x3, #(DEVICE_MEM) // x3: Attributes to apply + adr x4, kernmap_l0_ptable // x5: Ptr to L0 table + adr x5, physmap_l1_ptable // x6: Ptr to L1 table + b build_map + + // Generic routine to build mappings. Requires the following inputs: + // + // x0: Vaddr to map to Paddr + // x1: Paddr to map Vaddr to + // x2: Length (in bytes) of region to map + // x3: Region attributes + // x4: Paddr of L0 table to use for mapping + // x5: Paddr of L1 table to use for mapping + // x6: Paddr of L2 table to use for mapping + // + // To keep things simple everything is mapped using 2MB blocks. This implies that the length + // is explicitly aligned to 2MB to prevent any translation aliases. Since block translations + // at L2 cover 2MB blocks, that suits us nicely so everything uses 2MB L2 blocks. Wasteful + // perhaps but at this stage it's convenient and in any case will get ripped out and + // reprogrammed in kstart. + +build_map: + lsr x8, x0, #39 // First group of 9 bits of VA + and x8, x8, #0x1ff + lsl x8, x8, #3 // x8: Index into L0 table + ldr x9, [x4, x8] + cbnz x9, l1_idx_prefilled + + mov x9, x5 // Get L1 base + bfm w9, wzr, #0, #11 + orr x9, x9, #((DESC_TYPE_TABLE << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT)) + orr x9, x9, #(ACCESS_FLAG_BIT) + str x9, [x4, x8] // L0[Index]: L1 + +l1_idx_prefilled: + lsr x8, x0, #30 // Second group of 9 bits of VA + and x8, x8, #0x1ff + lsl x8, x8, #3 // x8: Index into L1 table + ldr x9, [x5, x8] + cbnz x9, l2_idx_prefilled + +build_map_l2: + mov x9, x6 // Get L2 base + bfm w9, wzr, #0, #11 + orr x9, x9, #((DESC_TYPE_TABLE << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT)) + orr x9, x9, #(ACCESS_FLAG_BIT) + lsl x4, x3, #2 + orr x9, x9, x4 + str x9, [x5, x8] // L1[Index]: Base of L2 table + +l2_idx_prefilled: + lsr x2, x2, #21 // Number of 2MB blocks needed */ + add x2, x2, #1 //TODO: remove this and remove workarounds + + lsr x8, x0, #21 // Third group of 9 bits of VA + and x8, x8, #0x1ff + lsl x8, x8, #3 // x8: Index into L2 table + ldr x9, [x6, x8] + cbnz x9, build_map_error + +build_map_l2_loop: + mov x9, x1 + bfm w9, wzr, #0, #11 + orr x9, x9, #((DESC_TYPE_BLOCK << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT)) + orr x9, x9, #(ACCESS_FLAG_BIT) + lsl x4, x3, #2 + orr x9, x9, x4 + ldr x10, [x6, x8] + mov x7, #(DESC_VALID << DESC_VALID_BIT) + and x10, x10, x7 + cmp x10, x7 + b.eq build_map_error + str x9, [x6, x8] // L2[Index]: PA of 2MB region to map to + + mov x9, #1 + add x1, x1, x9, lsl #21 + add x8, x8, #8 + sub x2, x2, #1 + cbnz x2, build_map_l2_loop + + ret + +build_map_error: + wfi + b build_map_error + + // Statically allocated tables consumed by build_map. + + .align 12 +identkmap_l0_ptable: + .space PAGE_SIZE +identkmap_l1_ptable: + .space PAGE_SIZE +identkmap_l2_ptable: + .space PAGE_SIZE +kernmap_l0_ptable: + .space PAGE_SIZE +kernmap_l1_ptable: + .space PAGE_SIZE +kernmap_l2_ptable: + .space PAGE_SIZE +kstack_l1_ptable: + .space PAGE_SIZE +kstack_l2_ptable: + .space PAGE_SIZE +physmap_l1_ptable: + .space PAGE_SIZE +physmap_1gb_l2_ptable: + .space PAGE_SIZE +physmap_2gb_l2_ptable: + .space PAGE_SIZE +physmap_3gb_l2_ptable: + .space PAGE_SIZE +physmap_4gb_l2_ptable: + .space PAGE_SIZE +env_region: + .space PAGE_SIZE + + // Misc scratch memory used by this file + +addr_marker: + .quad addr_marker diff --git a/src/arch/aarch64/init/pre_kstart/helpers/consts.h b/src/arch/aarch64/init/pre_kstart/helpers/consts.h new file mode 100644 index 0000000..d64ac8c --- /dev/null +++ b/src/arch/aarch64/init/pre_kstart/helpers/consts.h @@ -0,0 +1,26 @@ +#define PAGE_SIZE 4096 +#define GIGABYTE 0x40000000 +#define VIRT_BITS 48 +#define NUM_TABLES 14 + +#define EARLY_KSTACK_SIZE (PAGE_SIZE) // Initial stack + +#define DEVMAP_VBASE 0xfffffe0000000000 + +#define SCTLR_M 0x00000001 // SCTLR_M bit used to control MMU on/off + +#define DEVICE_MEM 0 // Memory type specifiers +#define NORMAL_UNCACHED_MEM 1 +#define NORMAL_CACHED_MEM 2 + +#define DESC_VALID_BIT 0 // Descriptor validity setting +#define DESC_VALID 1 +#define DESC_INVALID 0 + +#define DESC_TYPE_BIT 1 // Descriptor type +#define DESC_TYPE_TABLE 1 +#define DESC_TYPE_PAGE 1 +#define DESC_TYPE_BLOCK 0 + +#define BLOCK_DESC_MASK (~((0xffff << 48) | (0xffff))) // Convenience mask for block desciptors +#define ACCESS_FLAG_BIT (1 << 10) diff --git a/src/arch/aarch64/init/pre_kstart/helpers/post_mmu_enabled.S b/src/arch/aarch64/init/pre_kstart/helpers/post_mmu_enabled.S new file mode 100644 index 0000000..cc81b9e --- /dev/null +++ b/src/arch/aarch64/init/pre_kstart/helpers/post_mmu_enabled.S @@ -0,0 +1,95 @@ + // Populates misc arguments, sets up the stack, clears all other registers. + +setup_kstart_context: + adr x0, args.kernel_base // Physical address of kernel base + str x11, [x0] + + adr x0, args.kernel_size // Size of kernel image + str x12, [x0] + + adr x0, args.stack_base // Virtual address of kernel stack base + ldr x1, =KERNEL_OFFSET + add x1, x1, x12 + str x1, [x0] + + adr x0, args.stack_size // Size of kernel stack + mov x1, #(EARLY_KSTACK_SIZE) + str x1, [x0] + + adr x0, args.env_base // Virtual address of environment base + adr x1, env_region_marker + ldr x1, [x1] + str x1, [x0] + + adr x0, args.env_size // Size of environment (populated later in kstart) + ldr x1, =PAGE_SIZE + str x1, [x0] + + adr x0, args.dtb_base // Physical address of DTB Image's base + str x19, [x0] + + adr x0, args.dtb_size // Size of DTB image + mov w1, w21 + str w1, [x0] + + add x1, x15, x12 // Initialize the stack pointer, everything is 2MB aligned + add x1, x1, x14 // sp = (kbase.vaddr + ksize + stksize) - sizeof(word) + sub x1, x1, #16 + mov sp, x1 + + adr x0, tmp_zero // Store a zero at tmp_zero + str xzr, [x0] // Note: x0 points to addr_marker so we use it below as-is + + ldp x2, x3, [x0, #0]! // Zero x1:x31 + ldp x4, x5, [x0, #0]! + ldp x6, x7, [x0, #0]! + ldp x8, x9, [x0, #0]! + ldp x10, x11, [x0, #0]! + ldp x12, x13, [x0, #0]! + ldp x14, x15, [x0, #0]! + ldp x16, x17, [x0, #0]! + ldp x18, x19, [x0, #0]! + ldp x20, x21, [x0, #0]! + ldp x22, x23, [x0, #0]! + ldp x24, x25, [x0, #0]! + ldp x26, x27, [x0, #0]! + ldp x28, x29, [x0, #0]! + + ldr x0, =args.kernel_base // x0 = Start of argument block + mov x1, #0 + + ret + +mmu_on_trampoline: + adr x0, mmu_on_marker // x0: paddr of mmu_on_marker + ldr x0, [x0] // x0: vaddr of mmu_on + br x0 // MMU now On. Jump to mmu_on using it's vaddr + + // Statically allocated space to hold misc arguments for kstart. + + .align 3 +args.kernel_base: + .space 8 +args.kernel_size: + .space 8 +args.stack_base: + .space 8 +args.stack_size: + .space 8 +args.env_base: + .space 8 +args.env_size: + .space 8 +args.dtb_base: + .space 8 +args.dtb_size: + .space 8 + + // Misc scratch memory used by this file + +env_region_marker: + .quad env_region +mmu_on_marker: + .quad mmu_on +tmp_zero: + .quad tmp_zero diff --git a/src/arch/aarch64/init/pre_kstart/helpers/pre_mmu_enabled.S b/src/arch/aarch64/init/pre_kstart/helpers/pre_mmu_enabled.S new file mode 100644 index 0000000..4fabb48 --- /dev/null +++ b/src/arch/aarch64/init/pre_kstart/helpers/pre_mmu_enabled.S @@ -0,0 +1,66 @@ + // Stashes DTB size for use later + // Sets up the exception vectors +early_setup: + mov x19, x0 // Store paddr of DTB in x19 + ldr w21, [x0, #4] // x0[4] has the DTB size in Big Endian Format + rev w21, w21 // Swizzle to little endian + + msr contextidr_el1, xzr // Set contextID reg + dsb sy + + ldr x0, =exception_vector_base + msr vbar_el1, x0 + + ret + +disable_mmu: + mrs x0, sctlr_el1 + bic x0, x0, SCTLR_M + msr sctlr_el1, x0 + isb + + ret + + + // Programs the TTBR registers, MAIR registers, TCR and SCTLR registers. +enable_mmu: + dsb sy + + adr x0, identkmap_l0_ptable // Setup TTBRx_EL1 + msr ttbr0_el1, x0 // ttbr0_el1: Lower vaddrs + adr x1, kernmap_l0_ptable + msr ttbr1_el1, x1 // ttbr1_el1: Higher vaddrs + isb + + tlbi vmalle1is // Invalidate the TLB + + ldr x2, mair // Setup MAIR + msr mair_el1, x2 + + ldr x2, tcr // Setup TCR ()ID_AA64MMFR0_EL1) + mrs x3, id_aa64mmfr0_el1 + bfi x2, x3, #32, #3 + msr tcr_el1, x2 + isb + + ldr x2, sctlr_set_bits // Setup SCTLR + ldr x3, sctlr_clr_bits + mrs x1, sctlr_el1 + bic x1, x1, x3 + orr x1, x1, x2 + msr sctlr_el1, x1 + isb + mrs x1, sctlr_el1 + + ret + + // Magic config runes (Too much detail to enumerate here: grep the ARM ARM for details) + .align 3 +mair: + .quad 0xff4400 // MAIR: Arrange for Device, Normal Non-Cache, Normal Write-Back access types +tcr: + .quad 0x1085100510 // Setup TCR: (TxSZ, ASID_16, TG1_4K, Cache Attrs, SMP Attrs) +sctlr_set_bits: + .quad 0x3485d13d // Set SCTLR bits: (LSMAOE, nTLSMD, UCI, SPAN, nTWW, nTWI, UCT, DZE, I, SED, SA0, SA, C, M, CP15BEN) +sctlr_clr_bits: + .quad 0x32802c2 // Clear SCTLR bits: (EE, EOE, IESB, WXN, UMA, ITD, THEE, A) diff --git a/src/arch/aarch64/init/pre_kstart/helpers/vectors.S b/src/arch/aarch64/init/pre_kstart/helpers/vectors.S new file mode 100644 index 0000000..c368bae --- /dev/null +++ b/src/arch/aarch64/init/pre_kstart/helpers/vectors.S @@ -0,0 +1,101 @@ + // Exception vector stubs + // + // The hex values in x18 are to aid debugging + // Unhandled exceptions spin in a wfi loop for the moment + // This can be macro-ified + + .align 11 +exception_vector_base: + + .align 7 +__vec_00: + mov x18, #0xb0b0 + wfi + b __vec_00 + + .align 7 +__vec_01: + mov x18, #0xb0b1 + wfi + b __vec_01 + + .align 7 +__vec_02: + mov x18, #0xb0b2 + wfi + b __vec_02 + + .align 7 +__vec_03: + mov x18, #0xb0b3 + wfi + b __vec_03 + + .align 7 +__vec_04: + b do_report_exception + wfi + b __vec_04 + + .align 7 +__vec_05: + b do_irq // First level interrupt handler + wfi + b __vec_05 + + .align 7 +__vec_06: + mov x18, #0xb0b6 + wfi + b __vec_06 + + .align 7 +__vec_07: + mov x18, #0xb0b7 + wfi + b __vec_07 + + .align 7 +__vec_08: + b do_syscall // Syscall handler + wfi + b __vec_08 + + .align 7 +__vec_09: + b do_irq // First level interrupt handler + wfi + b __vec_09 + + .align 7 +__vec_10: + mov x18, #0xb0bb + wfi + b __vec_10 + + .align 7 +__vec_11: + mov x18, #0xb0bc + wfi + b __vec_11 + + .align 7 +__vec_12: + mov x18, #0xb0bd + wfi + b __vec_12 + + .align 7 +__vec_13: + mov x18, #0xb0be + wfi + b __vec_13 + + .align 7 +__vec_14: + mov x18, #0xb0bf + wfi + b __vec_14 + + .align 7 +exception_vector_end: diff --git a/src/arch/aarch64/interrupt/handler.rs b/src/arch/aarch64/interrupt/handler.rs new file mode 100644 index 0000000..17f48ae --- /dev/null +++ b/src/arch/aarch64/interrupt/handler.rs @@ -0,0 +1,116 @@ +#[derive(Default)] +#[repr(packed)] +pub struct ScratchRegisters { + pub x18: usize, + pub x17: usize, + pub x16: usize, + pub x15: usize, + pub x14: usize, + pub x13: usize, + pub x12: usize, + pub x11: usize, + pub x10: usize, + pub x9: usize, + pub x8: usize, + pub x7: usize, + pub x6: usize, + pub x5: usize, + pub x4: usize, + pub x3: usize, + pub x2: usize, + pub x1: usize, + pub x0: usize, +} + +impl ScratchRegisters { + pub fn dump(&self) { + println!("X0: {:>016X}", { self.x0 }); + println!("X1: {:>016X}", { self.x1 }); + println!("X2: {:>016X}", { self.x2 }); + println!("X3: {:>016X}", { self.x3 }); + println!("X4: {:>016X}", { self.x4 }); + println!("X5: {:>016X}", { self.x5 }); + println!("X6: {:>016X}", { self.x6 }); + println!("X7: {:>016X}", { self.x7 }); + println!("X8: {:>016X}", { self.x8 }); + println!("X9: {:>016X}", { self.x9 }); + println!("X10: {:>016X}", { self.x10 }); + println!("X11: {:>016X}", { self.x11 }); + println!("X12: {:>016X}", { self.x12 }); + println!("X13: {:>016X}", { self.x13 }); + println!("X14: {:>016X}", { self.x14 }); + println!("X15: {:>016X}", { self.x15 }); + println!("X16: {:>016X}", { self.x16 }); + println!("X17: {:>016X}", { self.x17 }); + println!("X18: {:>016X}", { self.x18 }); + } +} + +#[derive(Default)] +#[repr(packed)] +pub struct PreservedRegisters { + //TODO: is X30 a preserved register? + pub x30: usize, + pub x29: usize, + pub x28: usize, + pub x27: usize, + pub x26: usize, + pub x25: usize, + pub x24: usize, + pub x23: usize, + pub x22: usize, + pub x21: usize, + pub x20: usize, + pub x19: usize, +} + +impl PreservedRegisters { + pub fn dump(&self) { + println!("X19: {:>016X}", { self.x19 }); + println!("X20: {:>016X}", { self.x20 }); + println!("X21: {:>016X}", { self.x21 }); + println!("X22: {:>016X}", { self.x22 }); + println!("X23: {:>016X}", { self.x23 }); + println!("X24: {:>016X}", { self.x24 }); + println!("X25: {:>016X}", { self.x25 }); + println!("X26: {:>016X}", { self.x26 }); + println!("X27: {:>016X}", { self.x27 }); + println!("X28: {:>016X}", { self.x28 }); + println!("X29: {:>016X}", { self.x29 }); + println!("X30: {:>016X}", { self.x30 }); + } +} + +#[derive(Default)] +#[repr(packed)] +pub struct InterruptStack { + pub elr_el1: usize, + //TODO: should this push be removed? + pub unkknown: usize, + pub tpidr_el0: usize, + pub tpidrro_el0: usize, + pub spsr_el1: usize, + pub esr_el1: usize, + pub sp_el0: usize, + pub preserved: PreservedRegisters, + pub scratch: ScratchRegisters, + //TODO: eret registers +} + +impl InterruptStack { + pub fn dump(&self) { + self.scratch.dump(); + self.preserved.dump(); + println!("SP_EL0: {:>016X}", { self.sp_el0 }); + println!("ESR_EL1: {:>016X}", { self.esr_el1 }); + println!("SPSR_EL1: {:>016X}", { self.spsr_el1 }); + println!("TPIDRRO_EL0: {:>016X}", { self.tpidrro_el0 }); + println!("TPIDR_EL0: {:>016X}", { self.tpidr_el0 }); + println!("UNKNOWN: {:>016X}", { self.unkknown }); + println!("ELR_EL1: {:>016X}", { self.elr_el1 }); + } + + //TODO + pub fn is_singlestep(&self) -> bool { false } + pub fn set_singlestep(&mut self, singlestep: bool) {} +} diff --git a/src/arch/aarch64/interrupt/irq.rs b/src/arch/aarch64/interrupt/irq.rs new file mode 100644 index 0000000..72de8c1 --- /dev/null +++ b/src/arch/aarch64/interrupt/irq.rs @@ -0,0 +1,175 @@ +use core::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; + +use crate::context; +use crate::context::timeout; +use crate::device::generic_timer::{GENTIMER}; +use crate::device::{gic}; +use crate::device::serial::{COM1}; +use crate::time; + +//resets to 0 in context::switch() +pub static PIT_TICKS: AtomicUsize = ATOMIC_USIZE_INIT; + +#[naked] +#[no_mangle] +pub unsafe extern fn do_irq() { + #[inline(never)] + unsafe fn inner() { + irq_demux(); + } + + llvm_asm!("str x0, [sp, #-8]! + str x1, [sp, #-8]! + str x2, [sp, #-8]! + str x3, [sp, #-8]! + str x4, [sp, #-8]! + str x5, [sp, #-8]! + str x6, [sp, #-8]! + str x7, [sp, #-8]! + str x8, [sp, #-8]! + str x9, [sp, #-8]! + str x10, [sp, #-8]! + str x11, [sp, #-8]! + str x12, [sp, #-8]! + str x13, [sp, #-8]! + str x14, [sp, #-8]! + str x15, [sp, #-8]! + str x16, [sp, #-8]! + str x17, [sp, #-8]! + str x18, [sp, #-8]! + str x19, [sp, #-8]! + str x20, [sp, #-8]! + str x21, [sp, #-8]! + str x22, [sp, #-8]! + str x23, [sp, #-8]! + str x24, [sp, #-8]! + str x25, [sp, #-8]! + str x26, [sp, #-8]! + str x27, [sp, #-8]! + str x28, [sp, #-8]! + str x29, [sp, #-8]! + str x30, [sp, #-8]! + + mrs x18, sp_el0 + str x18, [sp, #-8]! + + mrs x18, esr_el1 + str x18, [sp, #-8]! + + mrs x18, spsr_el1 + str x18, [sp, #-8]! + + mrs x18, tpidrro_el0 + str x18, [sp, #-8]! + + mrs x18, tpidr_el0 + str x18, [sp, #-8]! + + str x18, [sp, #-8]! + + mrs x18, elr_el1 + str x18, [sp, #-8]!" + : : : : "volatile"); + + inner(); + + llvm_asm!("ldr x18, [sp], #8 + msr elr_el1, x18 + + ldr x18, [sp], #8 + + ldr x18, [sp], #8 + msr tpidr_el0, x18 + + ldr x18, [sp], #8 + msr tpidrro_el0, x18 + + ldr x18, [sp], #8 + msr spsr_el1, x18 + + ldr x18, [sp], #8 + msr esr_el1, x18 + + ldr x18, [sp], #8 + msr sp_el0, x18 + + ldr x30, [sp], #8 + ldr x29, [sp], #8 + ldr x28, [sp], #8 + ldr x27, [sp], #8 + ldr x26, [sp], #8 + ldr x25, [sp], #8 + ldr x24, [sp], #8 + ldr x23, [sp], #8 + ldr x22, [sp], #8 + ldr x21, [sp], #8 + ldr x20, [sp], #8 + ldr x19, [sp], #8 + ldr x18, [sp], #8 + ldr x17, [sp], #8 + ldr x16, [sp], #8 + ldr x15, [sp], #8 + ldr x14, [sp], #8 + ldr x13, [sp], #8 + ldr x12, [sp], #8 + ldr x11, [sp], #8 + ldr x10, [sp], #8 + ldr x9, [sp], #8 + ldr x8, [sp], #8 + ldr x7, [sp], #8 + ldr x6, [sp], #8 + ldr x5, [sp], #8 + ldr x4, [sp], #8 + ldr x3, [sp], #8 + ldr x2, [sp], #8 + ldr x1, [sp], #8 + ldr x0, [sp], #8" + : : : : "volatile"); + + llvm_asm!("eret" :::: "volatile"); +} + +unsafe fn trigger(irq: u32) { + extern { + fn irq_trigger(irq: u32); + } + + irq_trigger(irq); + gic::irq_eoi(irq); +} + +pub unsafe fn acknowledge(_irq: usize) { +} + +pub unsafe fn irq_handler_com1(irq: u32) { + if let Some(ref mut serial_port) = *COM1.lock() { + serial_port.receive(); + }; + trigger(irq); +} + +pub unsafe fn irq_handler_gentimer(irq: u32) { + GENTIMER.clear_irq(); + { + let mut offset = time::OFFSET.lock(); + let sum = offset.1 + GENTIMER.clk_freq as u64; + offset.1 = sum % 1_000_000_000; + offset.0 += sum / 1_000_000_000; + } + + timeout::trigger(); + + if PIT_TICKS.fetch_add(1, Ordering::SeqCst) >= 10 { + let _ = context::switch(); + } + trigger(irq); + GENTIMER.reload_count(); +} + +unsafe fn irq_demux() { + match gic::irq_ack() { + 30 => irq_handler_gentimer(30), + 33 => irq_handler_com1(33), + _ => panic!("irq_demux: unregistered IRQ"), + } +} diff --git a/src/arch/aarch64/interrupt/mod.rs b/src/arch/aarch64/interrupt/mod.rs new file mode 100644 index 0000000..b7451af --- /dev/null +++ b/src/arch/aarch64/interrupt/mod.rs @@ -0,0 +1,73 @@ +//! Interrupt instructions + +pub mod handler; +pub mod irq; +pub mod syscall; +pub mod trace; +pub mod unhandled_exceptions; + +pub use self::handler::InterruptStack; +pub use self::trace::stack_trace; + +/// Clear interrupts +#[inline(always)] +pub unsafe fn disable() { + llvm_asm!("msr daifset, #2"); +} + +/// Set interrupts +#[inline(always)] +pub unsafe fn enable() { + llvm_asm!("msr daifclr, #2"); +} + +/// Set interrupts and halt +/// This will atomically wait for the next interrupt +/// Performing enable followed by halt is not guaranteed to be atomic, use this instead! +#[inline(always)] +pub unsafe fn enable_and_halt() { + llvm_asm!("msr daifclr, #2"); + llvm_asm!("wfi"); +} + +/// Set interrupts and nop +/// This will enable interrupts and allow the IF flag to be processed +/// Simply enabling interrupts does not gurantee that they will trigger, use this instead! +#[inline(always)] +pub unsafe fn enable_and_nop() { + llvm_asm!("msr daifclr, #2"); + llvm_asm!("nop"); +} + +/// Halt instruction +#[inline(always)] +pub unsafe fn halt() { + llvm_asm!("wfi"); +} + +/// Pause instruction +/// Safe because it is similar to a NOP, and has no memory effects +#[inline(always)] +pub fn pause() { + unsafe { llvm_asm!("wfi") }; +} + +pub fn available_irqs_iter(cpu_id: usize) -> impl Iterator + 'static { + 0..0 +} + +pub fn bsp_apic_id() -> Option { + //TODO + None +} + +#[inline] +pub fn is_reserved(cpu_id: usize, index: u8) -> bool { + //TODO + true +} + +#[inline] +pub fn set_reserved(cpu_id: usize, index: u8, reserved: bool) { + //TODO +} diff --git a/src/arch/aarch64/interrupt/syscall.rs b/src/arch/aarch64/interrupt/syscall.rs new file mode 100644 index 0000000..26f47a0 --- /dev/null +++ b/src/arch/aarch64/interrupt/syscall.rs @@ -0,0 +1,197 @@ +use crate::interrupt::InterruptStack; +use crate::syscall; + +#[naked] +#[no_mangle] +pub unsafe extern fn do_syscall() { + #[inline(never)] + unsafe fn inner(stack: &mut InterruptStack) -> usize { + let fp; + llvm_asm!("" : "={fp}"(fp) : : : "volatile"); + + syscall::syscall( + stack.scratch.x8, + stack.scratch.x0, + stack.scratch.x1, + stack.scratch.x2, + stack.scratch.x3, + stack.scratch.x4, + fp, + stack + ) + } + + llvm_asm!("str x0, [sp, #-8]! + str x1, [sp, #-8]! + str x2, [sp, #-8]! + str x3, [sp, #-8]! + str x4, [sp, #-8]! + str x5, [sp, #-8]! + str x6, [sp, #-8]! + str x7, [sp, #-8]! + str x8, [sp, #-8]! + str x9, [sp, #-8]! + str x10, [sp, #-8]! + str x11, [sp, #-8]! + str x12, [sp, #-8]! + str x13, [sp, #-8]! + str x14, [sp, #-8]! + str x15, [sp, #-8]! + str x16, [sp, #-8]! + str x17, [sp, #-8]! + str x18, [sp, #-8]! + str x19, [sp, #-8]! + str x20, [sp, #-8]! + str x21, [sp, #-8]! + str x22, [sp, #-8]! + str x23, [sp, #-8]! + str x24, [sp, #-8]! + str x25, [sp, #-8]! + str x26, [sp, #-8]! + str x27, [sp, #-8]! + str x28, [sp, #-8]! + str x29, [sp, #-8]! + str x30, [sp, #-8]! + + mrs x18, sp_el0 + str x18, [sp, #-8]! + + mrs x18, esr_el1 + str x18, [sp, #-8]! + + mrs x18, spsr_el1 + str x18, [sp, #-8]! + + mrs x18, tpidrro_el0 + str x18, [sp, #-8]! + + mrs x18, tpidr_el0 + str x18, [sp, #-8]! + + str x18, [sp, #-8]! + + mrs x18, elr_el1 + str x18, [sp, #-8]!" + : : : : "volatile"); + + let sp: usize; + llvm_asm!("" : "={sp}"(sp) : : : "volatile"); + llvm_asm!("mov x29, sp" : : : : "volatile"); + + let a = inner(&mut *(sp as *mut InterruptStack)); + + llvm_asm!("" : : "{x0}"(a) : : "volatile"); + + llvm_asm!("ldr x18, [sp], #8 + msr elr_el1, x18 + + ldr x18, [sp], #8 + + ldr x18, [sp], #8 + msr tpidr_el0, x18 + + ldr x18, [sp], #8 + msr tpidrro_el0, x18 + + ldr x18, [sp], #8 + msr spsr_el1, x18 + + ldr x18, [sp], #8 + msr esr_el1, x18 + + ldr x18, [sp], #8 + msr sp_el0, x18 + + ldr x30, [sp], #8 + ldr x29, [sp], #8 + ldr x28, [sp], #8 + ldr x27, [sp], #8 + ldr x26, [sp], #8 + ldr x25, [sp], #8 + ldr x24, [sp], #8 + ldr x23, [sp], #8 + ldr x22, [sp], #8 + ldr x21, [sp], #8 + ldr x20, [sp], #8 + ldr x19, [sp], #8 + ldr x18, [sp], #8 + ldr x17, [sp], #8 + ldr x16, [sp], #8 + ldr x15, [sp], #8 + ldr x14, [sp], #8 + ldr x13, [sp], #8 + ldr x12, [sp], #8 + ldr x11, [sp], #8 + ldr x10, [sp], #8 + ldr x9, [sp], #8 + ldr x8, [sp], #8 + ldr x7, [sp], #8 + ldr x6, [sp], #8 + ldr x5, [sp], #8 + ldr x4, [sp], #8 + ldr x3, [sp], #8 + ldr x2, [sp], #8 + ldr x1, [sp], #8 + add sp, sp, #8" /* Skip over x0 - it's got the retval of inner already */ + : : : : "volatile"); + + llvm_asm!("eret" :::: "volatile"); +} + +#[allow(dead_code)] +#[repr(packed)] +pub struct SyscallStack { + pub elr_el1: usize, + pub padding: usize, + pub tpidr: usize, + pub tpidrro: usize, + pub rflags: usize, + pub esr: usize, + pub sp: usize, + pub lr: usize, + pub fp: usize, + pub x28: usize, + pub x27: usize, + pub x26: usize, + pub x25: usize, + pub x24: usize, + pub x23: usize, + pub x22: usize, + pub x21: usize, + pub x20: usize, + pub x19: usize, + pub x18: usize, + pub x17: usize, + pub x16: usize, + pub x15: usize, + pub x14: usize, + pub x13: usize, + pub x12: usize, + pub x11: usize, + pub x10: usize, + pub x9: usize, + pub x8: usize, + pub x7: usize, + pub x6: usize, + pub x5: usize, + pub x4: usize, + pub x3: usize, + pub x2: usize, + pub x1: usize, + pub x0: usize, +} + +#[naked] +pub unsafe extern fn clone_ret() { + llvm_asm!("ldp x29, x30, [sp], #16"); + llvm_asm!("mov x0, 0"); +} + +/* +#[naked] +pub unsafe extern fn clone_ret() { + llvm_asm!("add sp, sp, #16"); + llvm_asm!("ldp x29, x30, [sp], #16"); + llvm_asm!("mov x0, 0"); +} +*/ diff --git a/src/arch/aarch64/interrupt/trace.rs b/src/arch/aarch64/interrupt/trace.rs new file mode 100644 index 0000000..7db7bf1 --- /dev/null +++ b/src/arch/aarch64/interrupt/trace.rs @@ -0,0 +1,144 @@ +use core::mem; +use goblin::elf::sym; + +use crate::paging::{ActivePageTable, PageTableType, VirtualAddress}; + +/// Get a stack trace +//TODO: Check for stack being mapped before dereferencing +#[inline(never)] +pub unsafe fn stack_trace() { + let mut fp: usize; + llvm_asm!("" : "={fp}"(fp) : : : "volatile"); + + println!("TRACE: {:>016x}", fp); + //Maximum 64 frames + let active_table = ActivePageTable::new(/* TODO PageTableType::Kernel */); + for _frame in 0..64 { + if let Some(pc_fp) = fp.checked_add(mem::size_of::()) { + if active_table.translate(VirtualAddress::new(fp)).is_some() && active_table.translate(VirtualAddress::new(pc_fp)).is_some() { + let pc = *(pc_fp as *const usize); + if pc == 0 { + println!(" {:>016x}: EMPTY RETURN", fp); + break; + } + println!(" {:>016x}: {:>016x}", fp, pc); + fp = *(fp as *const usize); +// symbol_trace(pc); + } else { + println!(" {:>016x}: GUARD PAGE", fp); + break; + } + } else { + println!(" {:>016x}: fp OVERFLOW", fp); + } + } +} +/// +/// Get a symbol +//TODO: Do not create Elf object for every symbol lookup +#[inline(never)] +pub unsafe fn symbol_trace(addr: usize) { + use core::slice; + use core::sync::atomic::Ordering; + + use crate::elf::Elf; + use crate::start::{KERNEL_BASE, KERNEL_SIZE}; + + let kernel_ptr = (KERNEL_BASE.load(Ordering::SeqCst) + crate::KERNEL_OFFSET) as *const u8; + let kernel_slice = slice::from_raw_parts(kernel_ptr, KERNEL_SIZE.load(Ordering::SeqCst)); + + println!("symbol_trace: 0, kernel_ptr = 0x{:x}", kernel_ptr as usize); + + match Elf::from(kernel_slice) { + Ok(elf) => { + println!("symbol_trace: 1"); + let mut strtab_opt = None; + for section in elf.sections() { + if section.sh_type == ::goblin::elf::section_header::SHT_STRTAB { + strtab_opt = Some(section); + break; + } + } + + println!("symbol_trace: 2"); + + if let Some(symbols) = elf.symbols() { + println!("symbol_trace: 3"); + for sym in symbols { + if sym::st_type(sym.st_info) == sym::STT_FUNC + && addr >= sym.st_value as usize + && addr < (sym.st_value + sym.st_size) as usize + { + println!(" {:>016X}+{:>04X}", sym.st_value, addr - sym.st_value as usize); + + if let Some(strtab) = strtab_opt { + let start = strtab.sh_offset as usize + sym.st_name as usize; + let mut end = start; + while end < elf.data.len() { + let b = elf.data[end]; + end += 1; + if b == 0 { + break; + } + } + + if end > start { + let sym_name = &elf.data[start .. end]; + + print!(" "); + + if sym_name.starts_with(b"_ZN") { + // Skip _ZN + let mut i = 3; + let mut first = true; + while i < sym_name.len() { + // E is the end character + if sym_name[i] == b'E' { + break; + } + + // Parse length string + let mut len = 0; + while i < sym_name.len() { + let b = sym_name[i]; + if b >= b'0' && b <= b'9' { + i += 1; + len *= 10; + len += (b - b'0') as usize; + } else { + break; + } + } + + // Print namespace seperator, if required + if first { + first = false; + } else { + print!("::"); + } + + // Print name string + let end = i + len; + while i < sym_name.len() && i < end { + print!("{}", sym_name[i] as char); + i += 1; + } + } + } else { + for &b in sym_name.iter() { + print!("{}", b as char); + } + } + + println!(""); + } + } + } + } + } + }, + Err(_e) => { + println!("WTF ?"); + } + } +} diff --git a/src/arch/aarch64/interrupt/unhandled_exceptions.rs b/src/arch/aarch64/interrupt/unhandled_exceptions.rs new file mode 100644 index 0000000..6897d89 --- /dev/null +++ b/src/arch/aarch64/interrupt/unhandled_exceptions.rs @@ -0,0 +1,145 @@ +use crate::{ + context, + cpu_id, + interrupt::{self, InterruptStack, stack_trace}, +}; + +bitflags! { + pub struct ExceptionClasses: u32 { + const SVC_INSN_IN_AARCH64_STATE = 0b10101 << 26; + const DATA_ABORT_FROM_LOWER_EL = 0b100100 << 26; + const BKPT_INSN_IN_AARCH64_STATE = 0b111100 << 26; + } +} + +#[inline(never)] +pub unsafe extern fn report_exception(stack: &InterruptStack) { + println!("Unhandled exception"); + + stack.dump(); + stack_trace(); + + println!("CPU {}, PID {:?}", cpu_id(), context::context_id()); + //WARNING: name cannot be grabed, it may deadlock + + println!("HALT"); + loop { + interrupt::halt(); + } +} + +#[naked] +#[no_mangle] +pub unsafe extern fn do_report_exception() { + llvm_asm!("str x0, [sp, #-8]! + str x1, [sp, #-8]! + str x2, [sp, #-8]! + str x3, [sp, #-8]! + str x4, [sp, #-8]! + str x5, [sp, #-8]! + str x6, [sp, #-8]! + str x7, [sp, #-8]! + str x8, [sp, #-8]! + str x9, [sp, #-8]! + str x10, [sp, #-8]! + str x11, [sp, #-8]! + str x12, [sp, #-8]! + str x13, [sp, #-8]! + str x14, [sp, #-8]! + str x15, [sp, #-8]! + str x16, [sp, #-8]! + str x17, [sp, #-8]! + str x18, [sp, #-8]! + str x19, [sp, #-8]! + str x20, [sp, #-8]! + str x21, [sp, #-8]! + str x22, [sp, #-8]! + str x23, [sp, #-8]! + str x24, [sp, #-8]! + str x25, [sp, #-8]! + str x26, [sp, #-8]! + str x27, [sp, #-8]! + str x28, [sp, #-8]! + str x29, [sp, #-8]! + str x30, [sp, #-8]! + + mrs x18, sp_el0 + str x18, [sp, #-8]! + + mrs x18, esr_el1 + str x18, [sp, #-8]! + + mrs x18, spsr_el1 + str x18, [sp, #-8]! + + mrs x18, tpidrro_el0 + str x18, [sp, #-8]! + + mrs x18, tpidr_el0 + str x18, [sp, #-8]! + + str x18, [sp, #-8]! + + mrs x18, elr_el1 + str x18, [sp, #-8]!" + : : : : "volatile"); + + let sp: usize; + llvm_asm!("" : "={sp}"(sp) : : : "volatile"); + report_exception(&*(sp as *const InterruptStack)); + + llvm_asm!("ldr x18, [sp], #8 + msr elr_el1, x18 + + ldr x18, [sp], #8 + + ldr x18, [sp], #8 + msr tpidr_el0, x18 + + ldr x18, [sp], #8 + msr tpidrro_el0, x18 + + ldr x18, [sp], #8 + msr spsr_el1, x18 + + ldr x18, [sp], #8 + msr esr_el1, x18 + + ldr x18, [sp], #8 + msr sp_el0, x18 + + ldr x30, [sp], #8 + ldr x29, [sp], #8 + ldr x28, [sp], #8 + ldr x27, [sp], #8 + ldr x26, [sp], #8 + ldr x25, [sp], #8 + ldr x24, [sp], #8 + ldr x23, [sp], #8 + ldr x22, [sp], #8 + ldr x21, [sp], #8 + ldr x20, [sp], #8 + ldr x19, [sp], #8 + ldr x18, [sp], #8 + ldr x17, [sp], #8 + ldr x16, [sp], #8 + ldr x15, [sp], #8 + ldr x14, [sp], #8 + ldr x13, [sp], #8 + ldr x12, [sp], #8 + ldr x11, [sp], #8 + ldr x10, [sp], #8 + ldr x9, [sp], #8 + ldr x8, [sp], #8 + ldr x7, [sp], #8 + ldr x6, [sp], #8 + ldr x5, [sp], #8 + ldr x4, [sp], #8 + ldr x3, [sp], #8 + ldr x2, [sp], #8 + ldr x1, [sp], #8 + ldr x0, [sp], #8" + : : : : "volatile"); + + llvm_asm!("eret" :::: "volatile"); +} diff --git a/src/arch/aarch64/ipi.rs b/src/arch/aarch64/ipi.rs new file mode 100644 index 0000000..3f8e5cd --- /dev/null +++ b/src/arch/aarch64/ipi.rs @@ -0,0 +1,24 @@ +#[derive(Clone, Copy, Debug)] +#[repr(u8)] +pub enum IpiKind { + Wakeup = 0x40, + Tlb = 0x41, + Switch = 0x42, + Pit = 0x43, +} + +#[derive(Clone, Copy, Debug)] +#[repr(u8)] +pub enum IpiTarget { + Current = 1, + All = 2, + Other = 3, +} + +#[cfg(not(feature = "multi_core"))] +#[inline(always)] +pub fn ipi(_kind: IpiKind, _target: IpiTarget) {} + +#[cfg(feature = "multi_core")] +#[inline(always)] +pub fn ipi(kind: IpiKind, target: IpiTarget) {} diff --git a/src/arch/aarch64/macros.rs b/src/arch/aarch64/macros.rs new file mode 100644 index 0000000..4e3566f --- /dev/null +++ b/src/arch/aarch64/macros.rs @@ -0,0 +1,16 @@ +/// Print to console +#[macro_export] +macro_rules! print { + ($($arg:tt)*) => ({ + use core::fmt::Write; + let _ = write!($crate::arch::debug::Writer::new(), $($arg)*); + }); +} + +/// Print with new line to console +#[macro_export] +macro_rules! println { + () => (print!("\n")); + ($fmt:expr) => (print!(concat!($fmt, "\n"))); + ($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*)); +} diff --git a/src/arch/aarch64/mod.rs b/src/arch/aarch64/mod.rs new file mode 100644 index 0000000..dd140e6 --- /dev/null +++ b/src/arch/aarch64/mod.rs @@ -0,0 +1,31 @@ +#[macro_use] +pub mod macros; + +/// Constants like memory locations +pub mod consts; + +/// Debugging support +pub mod debug; + +/// Devices +pub mod device; + +/// Interrupt instructions +pub mod interrupt; + +/// Inter-processor interrupts +pub mod ipi; + +/// Paging +pub mod paging; + +pub mod rmm; + +/// Initialization and start function +pub mod start; + +/// Stop function +pub mod stop; + +/// Early init support +pub mod init; diff --git a/src/arch/aarch64/paging/entry.rs b/src/arch/aarch64/paging/entry.rs new file mode 100644 index 0000000..cee4701 --- /dev/null +++ b/src/arch/aarch64/paging/entry.rs @@ -0,0 +1,163 @@ +//! # Page table entry +//! Some code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html) + +use crate::memory::Frame; + +use super::PhysicalAddress; + +/// A page table entry +#[derive(Debug)] +pub struct Entry(u64); + +/// A page descriptor +#[derive(Debug)] +pub struct PageDescriptor(u64); + +bitflags! { + pub struct TableDescriptorFlags: u64 { + const VALID = 1 << 0; + const TABLE = 1 << 1; + const AF = 1 << 10; /* NOTE: TableDescriptors don't actually have an AF bit! */ + const PRESENT = 1 << 58; /* ARM ARM says this is an IGNORED bit, so using it here should be OK */ + const PXNTABLE = 1 << 59; + const UXNTABLE = 1 << 60; + const APTABLE_0 = 1 << 61; + const APTABLE_1 = 1 << 62; + const SUBLEVEL_NO_EL0_ACCESS = (0 << 62) | (1 << 61); + const SUBLEVEL_NO_WANY_ACCESS = (1 << 62) | (0 << 61); + const SUBLEVEL_NO_WANY_NO_REL0 = (1 << 62) | (1 << 61); + const NSTABLE = 1 << 63; + } +} + +bitflags! { + pub struct PageDescriptorFlags: u64 { + const VALID = 1 << 0; + const PAGE = 1 << 1; + const ATTR_INDEX_0 = 1 << 2; + const ATTR_INDEX_1 = 1 << 3; + const ATTR_INDEX_2 = 1 << 4; + const NS = 1 << 5; + const AP_1 = 1 << 6; + const AP_2 = 1 << 7; + const SH_0 = 1 << 8; + const SH_1 = 1 << 9; + const AF = 1 << 10; + const NG = 1 << 11; + const DBM = 1 << 51; + const CONTIGUOUS = 1 << 52; + const PXN = 1 << 53; + const UXN = 1 << 54; + const PRESENT = 1 << 58; /* Assuming DBM can be overloaded as PRESENT */ + } +} + +// These are 'virtual' flags that are used to minimise changes to the generic paging code. +// These are translated to AArch64 specific Page and Table descriptors as and when needed. +bitflags! { + #[derive(Default)] + pub struct EntryFlags: u64 { + const PRESENT = 1 << 0; + const HUGE_PAGE = 1 << 1; + const GLOBAL = 1 << 2; + const NO_EXECUTE = 1 << 3; + const USER_ACCESSIBLE = 1 << 4; + const WRITABLE = 1 << 5; + const TLS = 1 << 6; + const AF = 1 << 10; + } +} + +pub const ADDRESS_MASK: usize = 0x0000_ffff_ffff_f000; +pub const COUNTER_MASK: u64 = 0x0008_0000_0000_0000; + +impl Entry { + /// Clear entry + pub fn set_zero(&mut self) { + self.0 = 0; + } + + /// Is the entry unused? + pub fn is_unused(&self) -> bool { + self.0 == (self.0 & COUNTER_MASK) + } + + /// Make the entry unused + pub fn set_unused(&mut self) { + self.0 &= COUNTER_MASK; + } + + /// Get the address this page references + pub fn address(&self) -> PhysicalAddress { + PhysicalAddress::new(self.0 as usize & ADDRESS_MASK) + } + + /// Get the current entry flags + pub fn page_table_entry_flags(&self) -> TableDescriptorFlags { + TableDescriptorFlags::from_bits_truncate(self.0) + } + + pub fn page_descriptor_entry_flags(&self) -> PageDescriptorFlags { + PageDescriptorFlags::from_bits_truncate(self.0) + } + + /// Get the current entry flags + pub fn flags(&self) -> EntryFlags { + EntryFlags::from_bits_truncate(self.0) + } + + /// Get the associated frame, if available, for a level 4, 3, or 2 page + pub fn pointed_frame(&self) -> Option { + if self.page_table_entry_flags().contains(TableDescriptorFlags::PRESENT) { + Some(Frame::containing_address(self.address())) + } else { + None + } + } + + /// Get the associated frame, if available, for a level 1 page + pub fn pointed_frame_at_l1(&self) -> Option { + if self.page_descriptor_entry_flags().contains(PageDescriptorFlags::PRESENT) { + Some(Frame::containing_address(self.address())) + } else { + None + } + } + + pub fn page_table_entry_set(&mut self, frame: Frame, flags: TableDescriptorFlags) { + debug_assert!(frame.start_address().data() & !ADDRESS_MASK == 0); + // ODDNESS Alert: We need to set the AF bit - despite this being a TableDescriptor!!! + // The Arm ARM says this bit (bit 10) is IGNORED in Table Descriptors so hopefully this is OK + let access_flag = TableDescriptorFlags::AF; + self.0 = (frame.start_address().data() as u64) | flags.bits() | access_flag.bits() | (self.0 & COUNTER_MASK); + } + + pub fn page_descriptor_entry_set(&mut self, frame: Frame, flags: PageDescriptorFlags) { + debug_assert!(frame.start_address().data() & !ADDRESS_MASK == 0); + let access_flag = PageDescriptorFlags::AF; + self.0 = (frame.start_address().data() as u64) | flags.bits() | access_flag.bits() | (self.0 & COUNTER_MASK); + } + + pub fn set(&mut self, frame: Frame, flags: EntryFlags) { + debug_assert!(frame.start_address().data() & !ADDRESS_MASK == 0); + // ODDNESS Alert: We need to set the AF bit - despite this being a TableDescriptor!!! + // The Arm ARM says this bit (bit 10) is IGNORED in Table Descriptors so hopefully this is OK + let mut translated_flags = TableDescriptorFlags::AF | TableDescriptorFlags::VALID | TableDescriptorFlags::TABLE; + + if flags.contains(EntryFlags::PRESENT) { + translated_flags.insert(TableDescriptorFlags::PRESENT); + } + + self.0 = (frame.start_address().data() as u64) | translated_flags.bits() | (self.0 & COUNTER_MASK); + } + + /// Get bit 51 in entry, used as 1 of 9 bits (in 9 entries) used as a counter for the page table + pub fn counter_bits(&self) -> u64 { + (self.0 & COUNTER_MASK) >> 51 + } + + /// Set bit 51 in entry, used as 1 of 9 bits (in 9 entries) used as a counter for the page table + pub fn set_counter_bits(&mut self, count: u64) { + self.0 = (self.0 & !COUNTER_MASK) | ((count & 0x1) << 51); + } +} diff --git a/src/arch/aarch64/paging/mapper.rs b/src/arch/aarch64/paging/mapper.rs new file mode 100644 index 0000000..3c2fe45 --- /dev/null +++ b/src/arch/aarch64/paging/mapper.rs @@ -0,0 +1,350 @@ +use core::mem; +use core::ptr::Unique; + +use crate::memory::{allocate_frames, deallocate_frames, Frame}; + +use super::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress, VAddrType}; +use super::entry::{EntryFlags, PageDescriptorFlags}; +use super::table::{self, Table, Level4}; + +/// In order to enforce correct paging operations in the kernel, these types +/// are returned on any mapping operation to get the code involved to specify +/// how it intends to flush changes to a page table +#[must_use = "The page table must be flushed, or the changes unsafely ignored"] +pub struct MapperFlush(Page); + +impl MapperFlush { + /// Create a new page flush promise + pub fn new(page: Page) -> MapperFlush { + MapperFlush(page) + } + + /// Flush this page in the active table + pub fn flush(self, table: &mut ActivePageTable) { + table.flush(self.0); + mem::forget(self); + } + + /// Ignore the flush. This is unsafe, and a reason should be provided for use + pub unsafe fn ignore(self) { + mem::forget(self); + } +} + +/// A flush cannot be dropped, it must be consumed +impl Drop for MapperFlush { + fn drop(&mut self) { + panic!("Mapper flush was not utilized"); + } +} + +/// To allow for combining multiple flushes into one, we have a way of flushing +/// the active table, which can consume `MapperFlush` structs +#[must_use = "The page table must be flushed, or the changes unsafely ignored"] +pub struct MapperFlushAll(bool); + +impl MapperFlushAll { + /// Create a new promise to flush all mappings + pub fn new() -> MapperFlushAll { + MapperFlushAll(false) + } + + /// Consume a single page flush + pub fn consume(&mut self, flush: MapperFlush) { + self.0 = true; + mem::forget(flush); + } + + /// Flush the active page table + pub fn flush(self, table: &mut ActivePageTable) { + if self.0 { + table.flush_all(); + } + mem::forget(self); + } + + /// Ignore the flush. This is unsafe, and a reason should be provided for use + pub unsafe fn ignore(self) { + mem::forget(self); + } +} + +/// A flush cannot be dropped, it must be consumed +impl Drop for MapperFlushAll { + fn drop(&mut self) { + panic!("Mapper flush all was not utilized"); + } +} + +pub struct Mapper { + p4: Unique>, + pub mapper_type: MapperType +} + +pub enum MapperType { + User, + Kernel +} + +impl Mapper { + /// Create a new page table + pub unsafe fn new(mapper_type: MapperType) -> Mapper { + match mapper_type { + MapperType::User => Mapper { p4: Unique::new_unchecked(table::U4), mapper_type }, + MapperType::Kernel => Mapper { p4: Unique::new_unchecked(table::P4), mapper_type } + } + } + + pub fn p4(&self) -> &Table { + unsafe { self.p4.as_ref() } + } + + pub fn p4_mut(&mut self) -> &mut Table { + unsafe { self.p4.as_mut() } + } + + /// Map a page to a frame + pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags) -> MapperFlush { + let p3 = self.p4_mut().next_table_create(page.p4_index()); + let p2 = p3.next_table_create(page.p3_index()); + let p1 = p2.next_table_create(page.p2_index()); + let mut translated_flags: PageDescriptorFlags = PageDescriptorFlags::VALID | PageDescriptorFlags::PAGE | PageDescriptorFlags::AF; + + translated_flags.insert(PageDescriptorFlags::PRESENT); + + if flags.contains(EntryFlags::NO_EXECUTE) { + match page.start_address().get_type() { + VAddrType::User => { + translated_flags.insert(PageDescriptorFlags::UXN); + }, + VAddrType::Kernel => { + translated_flags.insert(PageDescriptorFlags::PXN); + }, + } + } + + if flags.contains(EntryFlags::WRITABLE) { + if flags.contains(EntryFlags::USER_ACCESSIBLE) { + translated_flags.remove(PageDescriptorFlags::AP_2); + translated_flags.insert(PageDescriptorFlags::AP_1); + } else { + translated_flags.remove(PageDescriptorFlags::AP_2); + translated_flags.remove(PageDescriptorFlags::AP_1); + } + } else { + if flags.contains(EntryFlags::USER_ACCESSIBLE) { + translated_flags.insert(PageDescriptorFlags::AP_2); + translated_flags.insert(PageDescriptorFlags::AP_1); + } else { + translated_flags.insert(PageDescriptorFlags::AP_2); + translated_flags.remove(PageDescriptorFlags::AP_1); + } + } + + assert!(p1[page.p1_index()].is_unused(), + "{:X}: Set to {:X}: {:?}, requesting {:X}: {:?}", + page.start_address().data(), + p1[page.p1_index()].address().data(), p1[page.p1_index()].page_descriptor_entry_flags(), + frame.start_address().data(), translated_flags); + p1.increment_entry_count(); + p1[page.p1_index()].page_descriptor_entry_set(frame, translated_flags); + MapperFlush::new(page) + } + + /// Map a page to the next free frame + pub fn map(&mut self, page: Page, flags: EntryFlags) -> MapperFlush { + let frame = allocate_frames(1).expect("out of frames"); + self.map_to(page, frame, flags) + } + + /// Update flags for a page + pub fn remap(&mut self, page: Page, flags: EntryFlags) -> MapperFlush { + let p3 = self.p4_mut().next_table_mut(page.p4_index()).expect("failed to remap: no p3"); + let p2 = p3.next_table_mut(page.p3_index()).expect("failed to remap: no p2"); + let p1 = p2.next_table_mut(page.p2_index()).expect("failed to remap: no p1"); + let frame = p1[page.p1_index()].pointed_frame_at_l1().expect("failed to remap: not mapped"); + let mut translated_flags: PageDescriptorFlags = PageDescriptorFlags::VALID | PageDescriptorFlags::PAGE | PageDescriptorFlags::AF; + + translated_flags.insert(PageDescriptorFlags::PRESENT); + + if flags.contains(EntryFlags::NO_EXECUTE) { + match page.start_address().get_type() { + VAddrType::User => { + translated_flags.insert(PageDescriptorFlags::UXN); + }, + VAddrType::Kernel => { + translated_flags.insert(PageDescriptorFlags::PXN); + }, + } + } + + if flags.contains(EntryFlags::WRITABLE) { + if flags.contains(EntryFlags::USER_ACCESSIBLE) { + translated_flags.remove(PageDescriptorFlags::AP_2); + translated_flags.insert(PageDescriptorFlags::AP_1); + } else { + translated_flags.remove(PageDescriptorFlags::AP_2); + translated_flags.remove(PageDescriptorFlags::AP_1); + } + } else { + if flags.contains(EntryFlags::USER_ACCESSIBLE) { + translated_flags.insert(PageDescriptorFlags::AP_2); + translated_flags.insert(PageDescriptorFlags::AP_1); + } else { + translated_flags.insert(PageDescriptorFlags::AP_2); + translated_flags.remove(PageDescriptorFlags::AP_1); + } + } + + p1[page.p1_index()].page_descriptor_entry_set(frame, translated_flags); + MapperFlush::new(page) + } + + /// Identity map a frame + pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags) -> MapperFlush { + let page = Page::containing_address(VirtualAddress::new(frame.start_address().data())); + self.map_to(page, frame, flags) + } + + fn unmap_inner(&mut self, page: &Page, keep_parents: bool) -> Frame { + let frame; + + let p4 = self.p4_mut(); + if let Some(p3) = p4.next_table_mut(page.p4_index()) { + if let Some(p2) = p3.next_table_mut(page.p3_index()) { + if let Some(p1) = p2.next_table_mut(page.p2_index()) { + frame = if let Some(frame) = p1[page.p1_index()].pointed_frame_at_l1() { + frame + } else { + panic!("unmap_inner({:X}): frame not found", page.start_address().data()) + }; + + p1.decrement_entry_count(); + p1[page.p1_index()].set_unused(); + + if keep_parents || ! p1.is_unused() { + return frame; + } + } else { + panic!("unmap_inner({:X}): p1 not found", page.start_address().data()); + } + + if let Some(p1_frame) = p2[page.p2_index()].pointed_frame() { + //println!("unmap_inner: Free p1 {:?}", p1_frame); + p2.decrement_entry_count(); + p2[page.p2_index()].set_unused(); + deallocate_frames(p1_frame, 1); + } else { + panic!("unmap_inner({:X}): p1_frame not found", page.start_address().data()); + } + + if ! p2.is_unused() { + return frame; + } + } else { + panic!("unmap_inner({:X}): p2 not found", page.start_address().data()); + } + + if let Some(p2_frame) = p3[page.p3_index()].pointed_frame() { + //println!("unmap_inner: Free p2 {:?}", p2_frame); + p3.decrement_entry_count(); + p3[page.p3_index()].set_unused(); + deallocate_frames(p2_frame, 1); + } else { + panic!("unmap_inner({:X}): p2_frame not found", page.start_address().data()); + } + + if ! p3.is_unused() { + return frame; + } + } else { + panic!("unmap_inner({:X}): p3 not found", page.start_address().data()); + } + + if let Some(p3_frame) = p4[page.p4_index()].pointed_frame() { + //println!("unmap_inner: Free p3 {:?}", p3_frame); + p4.decrement_entry_count(); + p4[page.p4_index()].set_unused(); + deallocate_frames(p3_frame, 1); + } else { + panic!("unmap_inner({:X}): p3_frame not found", page.start_address().data()); + } + + frame + } + + /// Unmap a page + pub fn unmap(&mut self, page: Page) -> MapperFlush { + let frame = self.unmap_inner(&page, false); + deallocate_frames(frame, 1); + MapperFlush::new(page) + } + + /// Unmap a page, return frame without free + pub fn unmap_return(&mut self, page: Page, keep_parents: bool) -> (MapperFlush, Frame) { + let frame = self.unmap_inner(&page, keep_parents); + (MapperFlush::new(page), frame) + } + + pub fn translate_page(&self, page: Page) -> Option { + self.p4().next_table(page.p4_index()) + .and_then(|p3| p3.next_table(page.p3_index())) + .and_then(|p2| p2.next_table(page.p2_index())) + .and_then(|p1| p1[page.p1_index()].pointed_frame()) + } + + pub fn translate_page_flags(&self, page: Page) -> Option { + let mut translated_flags: EntryFlags = Default::default(); + + if let Some(flags) = self.p4().next_table(page.p4_index()) + .and_then(|p3| p3.next_table(page.p3_index())) + .and_then(|p2| p2.next_table(page.p2_index())) + .and_then(|p1| Some(p1[page.p1_index()].page_descriptor_entry_flags())) { + + if flags.contains(PageDescriptorFlags::PRESENT) { + translated_flags.insert(EntryFlags::PRESENT); + } + + if flags.contains(PageDescriptorFlags::AF) { + translated_flags.insert(EntryFlags::AF); + } + translated_flags.insert(EntryFlags::AF); + + if flags.contains(PageDescriptorFlags::UXN) || flags.contains(PageDescriptorFlags::PXN) { + translated_flags.insert(EntryFlags::NO_EXECUTE); + } + + if !flags.contains(PageDescriptorFlags::AP_2) && !flags.contains(PageDescriptorFlags::AP_1) { + translated_flags.insert(EntryFlags::WRITABLE); + translated_flags.remove(EntryFlags::USER_ACCESSIBLE); + } + + if !flags.contains(PageDescriptorFlags::AP_2) && flags.contains(PageDescriptorFlags::AP_1) { + translated_flags.insert(EntryFlags::WRITABLE); + translated_flags.insert(EntryFlags::USER_ACCESSIBLE); + } + + if flags.contains(PageDescriptorFlags::AP_2) && !flags.contains(PageDescriptorFlags::AP_1) { + translated_flags.remove(EntryFlags::WRITABLE); + translated_flags.remove(EntryFlags::USER_ACCESSIBLE); + } + + if flags.contains(PageDescriptorFlags::AP_2) && flags.contains(PageDescriptorFlags::AP_1) { + translated_flags.remove(EntryFlags::WRITABLE); + translated_flags.insert(EntryFlags::USER_ACCESSIBLE); + } + + Some(translated_flags) + } + else { + None + } + } + + /// Translate a virtual address to a physical one + pub fn translate(&self, virtual_address: VirtualAddress) -> Option { + let offset = virtual_address.data() % PAGE_SIZE; + self.translate_page(Page::containing_address(virtual_address)) + .map(|frame| PhysicalAddress::new(frame.start_address().data() + offset)) + } +} diff --git a/src/arch/aarch64/paging/mod.rs b/src/arch/aarch64/paging/mod.rs new file mode 100644 index 0000000..73cc6db --- /dev/null +++ b/src/arch/aarch64/paging/mod.rs @@ -0,0 +1,483 @@ +//! # Paging +//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html) + +use core::{mem, ptr}; +use core::ops::{Deref, DerefMut}; +use spin::Mutex; + +use crate::device::cpu::registers::{control_regs, tlb}; +use crate::memory::{allocate_frames, Frame}; + +use self::entry::{EntryFlags, TableDescriptorFlags}; +use self::mapper::{Mapper, MapperFlushAll, MapperType}; +use self::temporary_page::TemporaryPage; + +pub mod entry; +pub mod mapper; +pub mod table; +pub mod temporary_page; + +/// Number of entries per page table +pub const ENTRY_COUNT: usize = 512; + +/// Size of pages +pub const PAGE_SIZE: usize = 4096; + +//TODO: This is a rudimentary recursive mutex used to naively fix multi_core issues, replace it! +pub struct PageTableLock { + cpu_id: usize, + count: usize, +} + +pub static PAGE_TABLE_LOCK: Mutex = Mutex::new(PageTableLock { + cpu_id: 0, + count: 0, +}); + +fn page_table_lock() { + let cpu_id = crate::cpu_id(); + loop { + { + let mut lock = PAGE_TABLE_LOCK.lock(); + if lock.count == 0 || lock.cpu_id == cpu_id { + lock.cpu_id = cpu_id; + lock.count += 1; + return; + } + } + crate::arch::interrupt::pause(); + } +} + +fn page_table_unlock() { + let mut lock = PAGE_TABLE_LOCK.lock(); + lock.count -= 1; +} + +/// Setup Memory Access Indirection Register +unsafe fn init_mair() { + let mut val: control_regs::MairEl1 = control_regs::mair_el1(); + + val.insert(control_regs::MairEl1::DEVICE_MEMORY); + val.insert(control_regs::MairEl1::NORMAL_UNCACHED_MEMORY); + val.insert(control_regs::MairEl1::NORMAL_WRITEBACK_MEMORY); + + control_regs::mair_el1_write(val); +} + +/// Map TSS +unsafe fn map_tss(cpu_id: usize, mapper: &mut Mapper) -> MapperFlushAll { + extern "C" { + /// The starting byte of the thread data segment + static mut __tdata_start: u8; + /// The ending byte of the thread data segment + static mut __tdata_end: u8; + /// The starting byte of the thread BSS segment + static mut __tbss_start: u8; + /// The ending byte of the thread BSS segment + static mut __tbss_end: u8; + } + + let size = &__tbss_end as *const _ as usize - &__tdata_start as *const _ as usize; + let start = crate::KERNEL_PERCPU_OFFSET + crate::KERNEL_PERCPU_SIZE * cpu_id; + let end = start + size; + + let mut flush_all = MapperFlushAll::new(); + let start_page = Page::containing_address(VirtualAddress::new(start)); + let end_page = Page::containing_address(VirtualAddress::new(end - 1)); + for page in Page::range_inclusive(start_page, end_page) { + let result = mapper.map( + page, + EntryFlags::PRESENT + | EntryFlags::GLOBAL + | EntryFlags::NO_EXECUTE + | EntryFlags::WRITABLE, + ); + flush_all.consume(result); + } + flush_all +} + +/// Copy tdata, clear tbss, set TCB self pointer +unsafe fn init_tcb(cpu_id: usize) -> usize { + extern "C" { + /// The starting byte of the thread data segment + static mut __tdata_start: u8; + /// The ending byte of the thread data segment + static mut __tdata_end: u8; + /// The starting byte of the thread BSS segment + static mut __tbss_start: u8; + /// The ending byte of the thread BSS segment + static mut __tbss_end: u8; + } + + let tcb_offset; + { + let size = &__tbss_end as *const _ as usize - &__tdata_start as *const _ as usize; + let tbss_offset = &__tbss_start as *const _ as usize - &__tdata_start as *const _ as usize; + + let start = crate::KERNEL_PERCPU_OFFSET + crate::KERNEL_PERCPU_SIZE * cpu_id; + println!("SET TPIDR_EL1 TO {:X}", start - 0x10); + // FIXME: Empirically initializing tpidr to 16 bytes below start works. I do not know + // whether this is the correct way to handle TLS. Will need to revisit. + control_regs::tpidr_el1_write((start - 0x10) as u64); + println!("SET TPIDR_EL1 DONE"); + + let end = start + size; + tcb_offset = end - mem::size_of::(); + + ptr::copy(&__tdata_start as *const u8, start as *mut u8, tbss_offset); + ptr::write_bytes((start + tbss_offset) as *mut u8, 0, size - tbss_offset); + + *(tcb_offset as *mut usize) = end; + } + tcb_offset +} + +/// Initialize paging +/// +/// Returns page table and thread control block offset +pub unsafe fn init( + cpu_id: usize, +) -> (ActivePageTable, usize) { + extern "C" { + /// The starting byte of the text (code) data segment. + static mut __text_start: u8; + /// The ending byte of the text (code) data segment. + static mut __text_end: u8; + /// The starting byte of the _.rodata_ (read-only data) segment. + static mut __rodata_start: u8; + /// The ending byte of the _.rodata_ (read-only data) segment. + static mut __rodata_end: u8; + /// The starting byte of the _.data_ segment. + static mut __data_start: u8; + /// The ending byte of the _.data_ segment. + static mut __data_end: u8; + /// The starting byte of the thread data segment + static mut __tdata_start: u8; + /// The ending byte of the thread data segment + static mut __tdata_end: u8; + /// The starting byte of the thread BSS segment + static mut __tbss_start: u8; + /// The ending byte of the thread BSS segment + static mut __tbss_end: u8; + /// The starting byte of the _.bss_ (uninitialized data) segment. + static mut __bss_start: u8; + /// The ending byte of the _.bss_ (uninitialized data) segment. + static mut __bss_end: u8; + } + + init_mair(); + + let mut active_table = ActivePageTable::new_unlocked(); + + let flush_all = map_tss(cpu_id, &mut active_table); + flush_all.flush(&mut active_table); + + return (active_table, init_tcb(cpu_id)); +} + +pub unsafe fn init_ap( + cpu_id: usize, + bsp_table: usize, +) -> usize { + init_mair(); + + let mut active_table = ActivePageTable::new_unlocked(); + + let mut new_table = InactivePageTable::from_address(bsp_table); + + let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new( + crate::USER_TMP_MISC_OFFSET, + ))); + + active_table.with(&mut new_table, &mut temporary_page, |mapper| { + let flush_all = map_tss(cpu_id, mapper); + // The flush can be ignored as this is not the active table. See later active_table.switch + flush_all.ignore(); + }); + + // This switches the active table, which is setup by the bootloader, to a correct table + // setup by the lambda above. This will also flush the TLB + active_table.switch(new_table); + + init_tcb(cpu_id) +} + +pub struct ActivePageTable { + mapper: Mapper, + locked: bool, +} + +pub enum PageTableType { + User, + Kernel +} + +impl Deref for ActivePageTable { + type Target = Mapper; + + fn deref(&self) -> &Mapper { + &self.mapper + } +} + +impl DerefMut for ActivePageTable { + fn deref_mut(&mut self) -> &mut Mapper { + &mut self.mapper + } +} + +impl ActivePageTable { + //TODO: table_type argument + pub unsafe fn new() -> ActivePageTable { + let table_type = PageTableType::Kernel; + page_table_lock(); + ActivePageTable { + mapper: Mapper::new(match table_type { + PageTableType::User => MapperType::User, + PageTableType::Kernel => MapperType::Kernel, + }), + locked: true, + } + } + + //TODO: table_type argument + pub unsafe fn new_unlocked() -> ActivePageTable { + let table_type = PageTableType::Kernel; + ActivePageTable { + mapper: Mapper::new(match table_type { + PageTableType::User => MapperType::User, + PageTableType::Kernel => MapperType::Kernel, + }), + locked: false, + } + } + + pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable { + let old_table: InactivePageTable; + + match self.mapper.mapper_type { + MapperType::User => { + old_table = InactivePageTable { p4_frame: Frame::containing_address(PhysicalAddress::new(unsafe { control_regs::ttbr0_el1() } as usize)) }; + unsafe { control_regs::ttbr0_el1_write(new_table.p4_frame.start_address().data() as u64) }; + }, + MapperType::Kernel => { + old_table = InactivePageTable { p4_frame: Frame::containing_address(PhysicalAddress::new(unsafe { control_regs::ttbr1_el1() } as usize)) }; + unsafe { control_regs::ttbr1_el1_write(new_table.p4_frame.start_address().data() as u64) }; + } + } + + unsafe { tlb::flush_all() }; + old_table + } + + pub fn flush(&mut self, page: Page) { + unsafe { + tlb::flush(page.start_address().data()); + } + } + + pub fn flush_all(&mut self) { + unsafe { + tlb::flush_all(); + } + } + + pub fn with(&mut self, table: &mut InactivePageTable, temporary_page: &mut TemporaryPage, f: F) + where F: FnOnce(&mut Mapper) + { + { + let backup: Frame; + + match self.mapper.mapper_type { + MapperType::User => backup = Frame::containing_address(PhysicalAddress::new(unsafe { control_regs::ttbr0_el1() as usize })), + MapperType::Kernel => backup = Frame::containing_address(PhysicalAddress::new(unsafe { control_regs::ttbr1_el1() as usize })) + } + + // map temporary_kpage to current p4 table + let p4_table = temporary_page.map_table_frame(backup.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE, self); + + // overwrite recursive mapping + self.p4_mut()[crate::RECURSIVE_PAGE_PML4].page_table_entry_set( + table.p4_frame.clone(), + TableDescriptorFlags::PRESENT | TableDescriptorFlags::VALID | TableDescriptorFlags::TABLE, + ); + self.flush_all(); + + // execute f in the new context + f(self); + + // restore recursive mapping to original p4 table + p4_table[crate::RECURSIVE_PAGE_PML4].page_table_entry_set( + backup, + TableDescriptorFlags::PRESENT | TableDescriptorFlags::VALID | TableDescriptorFlags::TABLE, + ); + self.flush_all(); + } + + temporary_page.unmap(self); + } + + pub unsafe fn address(&self) -> usize { + match self.mapper.mapper_type { + MapperType::User => control_regs::ttbr0_el1() as usize, + MapperType::Kernel => control_regs::ttbr1_el1() as usize, + } + } +} + +impl Drop for ActivePageTable { + fn drop(&mut self) { + if self.locked { + page_table_unlock(); + self.locked = false; + } + } +} + +pub struct InactivePageTable { + p4_frame: Frame, +} + +impl InactivePageTable { + pub fn new( + frame: Frame, + active_table: &mut ActivePageTable, + temporary_page: &mut TemporaryPage, + ) -> InactivePageTable { + { + let table = temporary_page.map_table_frame( + frame.clone(), + EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE, + active_table, + ); + // now we are able to zero the table + table.zero(); + // set up recursive mapping for the table + table[crate::RECURSIVE_PAGE_PML4].page_table_entry_set( + frame.clone(), + TableDescriptorFlags::PRESENT | TableDescriptorFlags::VALID | TableDescriptorFlags::TABLE + ); + } + temporary_page.unmap(active_table); + + InactivePageTable { p4_frame: frame } + } + + pub unsafe fn from_address(address: usize) -> InactivePageTable { + InactivePageTable { + p4_frame: Frame::containing_address(PhysicalAddress::new(address)), + } + } + + pub unsafe fn address(&self) -> usize { + self.p4_frame.start_address().data() + } +} + +/// A physical address. +#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] +pub struct PhysicalAddress(usize); + +impl PhysicalAddress { + pub fn new(address: usize) -> Self { + PhysicalAddress(address) + } + + pub fn data(&self) -> usize { + self.0 + } +} + +/// A virtual address. +#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] +pub struct VirtualAddress(usize); + +pub enum VAddrType { + User, + Kernel +} + +impl VirtualAddress { + pub fn new(address: usize) -> Self { + VirtualAddress(address) + } + + pub fn data(&self) -> usize { + self.0 + } + + pub fn get_type(&self) -> VAddrType { + if ((self.0 >> 48) & 0xffff) == 0xffff { + VAddrType::Kernel + } else { + VAddrType::User + } + } +} + +/// Page +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct Page { + number: usize, +} + +impl Page { + pub fn start_address(self) -> VirtualAddress { + VirtualAddress::new(self.number * PAGE_SIZE) + } + + pub fn p4_index(self) -> usize { + (self.number >> 27) & 0o777 + } + + pub fn p3_index(self) -> usize { + (self.number >> 18) & 0o777 + } + + pub fn p2_index(self) -> usize { + (self.number >> 9) & 0o777 + } + + pub fn p1_index(self) -> usize { + self.number & 0o777 + } + + pub fn containing_address(address: VirtualAddress) -> Page { + //TODO assert!(address.data() < 0x0000_8000_0000_0000 || address.data() >= 0xffff_8000_0000_0000, + // "invalid address: 0x{:x}", address.data()); + Page { + number: address.data() / PAGE_SIZE, + } + } + + pub fn range_inclusive(start: Page, end: Page) -> PageIter { + PageIter { start, end } + } + + pub fn next(self) -> Page { + Self { + number: self.number + 1, + } + } +} + +pub struct PageIter { + start: Page, + end: Page, +} + +impl Iterator for PageIter { + type Item = Page; + + fn next(&mut self) -> Option { + if self.start <= self.end { + let page = self.start; + self.start = self.start.next(); + Some(page) + } else { + None + } + } +} diff --git a/src/arch/aarch64/paging/table.rs b/src/arch/aarch64/paging/table.rs new file mode 100644 index 0000000..8827315 --- /dev/null +++ b/src/arch/aarch64/paging/table.rs @@ -0,0 +1,161 @@ +//! # Page table +//! Code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html) + +use core::marker::PhantomData; +use core::ops::{Index, IndexMut}; + +use crate::memory::allocate_frames; + +use super::entry::{TableDescriptorFlags, Entry}; +use super::ENTRY_COUNT; + +pub const P4: *mut Table = 0xffff_ffff_ffff_f000 as *mut _; +pub const U4: *mut Table = 0x0000_ffff_ffff_f000 as *mut _; + +const KSPACE_ADDR_MASK: usize = 0xffff_0000_0000_0000; +const USPACE_ADDR_MASK: usize = 0x0000_ffff_ffff_ffff; + +pub trait TableLevel {} + +pub enum Level4 {} +pub enum Level3 {} +pub enum Level2 {} +pub enum Level1 {} + +impl TableLevel for Level4 {} +impl TableLevel for Level3 {} +impl TableLevel for Level2 {} +impl TableLevel for Level1 {} + +pub trait HierarchicalLevel: TableLevel { + type NextLevel: TableLevel; +} + +impl HierarchicalLevel for Level4 { + type NextLevel = Level3; +} + +impl HierarchicalLevel for Level3 { + type NextLevel = Level2; +} + +impl HierarchicalLevel for Level2 { + type NextLevel = Level1; +} + +pub struct Table { + entries: [Entry; ENTRY_COUNT], + level: PhantomData, +} + +impl Table where L: TableLevel { + pub fn is_unused(&self) -> bool { + if self.entry_count() > 0 { + return false; + } + + true + } + + pub fn zero(&mut self) { + for entry in self.entries.iter_mut() { + entry.set_zero(); + } + } + + /// Set number of entries in first table entry + /// FIXMES: + /// Only 1 bit per table entry seems to work. So we need 9 entries (!). + /// This is one reason why we need to have a non-recursive paging scheme. + /// These updates require memory barriers and TLB invalidations. + fn set_entry_count(&mut self, count: u64) { + debug_assert!(count <= ENTRY_COUNT as u64, "count can't be greater than ENTRY_COUNT"); + self.entries[0].set_counter_bits((count >> 0) & 0x1); + self.entries[1].set_counter_bits((count >> 1) & 0x1); + self.entries[2].set_counter_bits((count >> 2) & 0x1); + self.entries[3].set_counter_bits((count >> 3) & 0x1); + self.entries[4].set_counter_bits((count >> 4) & 0x1); + self.entries[5].set_counter_bits((count >> 5) & 0x1); + self.entries[6].set_counter_bits((count >> 6) & 0x1); + self.entries[7].set_counter_bits((count >> 7) & 0x1); + self.entries[8].set_counter_bits((count >> 8) & 0x1); + } + + /// Get number of entries from first table entry + fn entry_count(&self) -> u64 { + let mut count: u64 = (self.entries[0].counter_bits() & 0x1) << 0; + count |= (self.entries[1].counter_bits() & 0x1) << 1; + count |= (self.entries[2].counter_bits() & 0x1) << 2; + count |= (self.entries[3].counter_bits() & 0x1) << 3; + count |= (self.entries[4].counter_bits() & 0x1) << 4; + count |= (self.entries[5].counter_bits() & 0x1) << 5; + count |= (self.entries[6].counter_bits() & 0x1) << 6; + count |= (self.entries[7].counter_bits() & 0x1) << 7; + count |= (self.entries[8].counter_bits() & 0x1) << 8; + count + } + + pub fn increment_entry_count(&mut self) { + let current_count = self.entry_count(); + self.set_entry_count(current_count + 1); + } + + pub fn decrement_entry_count(&mut self) { + let current_count = self.entry_count(); + self.set_entry_count(current_count - 1); + } +} + +impl Table where L: HierarchicalLevel { + pub fn next_table(&self, index: usize) -> Option<&Table> { + self.next_table_address(index).map(|address| unsafe { &*(address as *const _) }) + } + + pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table> { + self.next_table_address(index).map(|address| unsafe { &mut *(address as *mut _) }) + } + + pub fn next_table_create(&mut self, index: usize) -> &mut Table { + if self.next_table(index).is_none() { + let frame = allocate_frames(1).expect("no frames available"); + self.increment_entry_count(); + + /* Allow users to go down the page table, implement permissions at the page level */ + let mut perms = TableDescriptorFlags::PRESENT; + perms |= TableDescriptorFlags::VALID; + perms |= TableDescriptorFlags::TABLE; + + self[index].page_table_entry_set(frame, perms); + self.next_table_mut(index).unwrap().zero(); + } + self.next_table_mut(index).unwrap() + } + + fn next_table_address(&self, index: usize) -> Option { + let entry_flags = self[index].page_table_entry_flags(); + if entry_flags.contains(TableDescriptorFlags::PRESENT) { + let table_address = self as *const _ as usize; + if (table_address & KSPACE_ADDR_MASK) != 0 { + Some((table_address << 9) | (index << 12)) + } else { + Some(((table_address << 9) | (index << 12)) & USPACE_ADDR_MASK) + } + } else { + None + } + } +} + +impl Index for Table where L: TableLevel { + type Output = Entry; + + fn index(&self, index: usize) -> &Entry { + &self.entries[index] + } +} + +impl IndexMut for Table where L: TableLevel { + fn index_mut(&mut self, index: usize) -> &mut Entry { + &mut self.entries[index] + } +} diff --git a/src/arch/aarch64/paging/temporary_page.rs b/src/arch/aarch64/paging/temporary_page.rs new file mode 100644 index 0000000..8ccf441 --- /dev/null +++ b/src/arch/aarch64/paging/temporary_page.rs @@ -0,0 +1,45 @@ +//! Temporarily map a page +//! From [Phil Opp's Blog](http://os.phil-opp.com/remap-the-kernel.html) + +use crate::memory::Frame; + +use super::{ActivePageTable, Page, VirtualAddress}; +use super::entry::EntryFlags; +use super::table::{Table, Level1}; + +pub struct TemporaryPage { + page: Page, +} + +impl TemporaryPage { + pub fn new(page: Page) -> TemporaryPage { + TemporaryPage { + page: page, + } + } + + pub fn start_address (&self) -> VirtualAddress { + self.page.start_address() + } + + /// Maps the temporary page to the given frame in the active table. + /// Returns the start address of the temporary page. + pub fn map(&mut self, frame: Frame, flags: EntryFlags, active_table: &mut ActivePageTable) -> VirtualAddress { + assert!(active_table.translate_page(self.page).is_none(), "temporary page is already mapped"); + let result = active_table.map_to(self.page, frame, flags); + result.flush(active_table); + self.page.start_address() + } + + /// Maps the temporary page to the given page table frame in the active + /// table. Returns a reference to the now mapped table. + pub fn map_table_frame(&mut self, frame: Frame, flags: EntryFlags, active_table: &mut ActivePageTable) -> &mut Table { + unsafe { &mut *(self.map(frame, flags, active_table).data() as *mut Table) } + } + + /// Unmaps the temporary page in the active table. + pub fn unmap(&mut self, active_table: &mut ActivePageTable) { + let (result, _frame) = active_table.unmap_return(self.page, true); + result.flush(active_table); + } +} diff --git a/src/arch/aarch64/rmm.rs b/src/arch/aarch64/rmm.rs new file mode 100644 index 0000000..3f7c179 --- /dev/null +++ b/src/arch/aarch64/rmm.rs @@ -0,0 +1,292 @@ +use rmm::{ + KILOBYTE, + MEGABYTE, + AArch64Arch, + Arch, + BuddyAllocator, + BumpAllocator, + FrameAllocator, + FrameCount, + FrameUsage, + MemoryArea, + PageFlags, + PageMapper, + PageTable, + PhysicalAddress, + VirtualAddress, +}; + +use spin::Mutex; + +extern "C" { + /// The starting byte of the text (code) data segment. + static mut __text_start: u8; + /// The ending byte of the text (code) data segment. + static mut __text_end: u8; + /// The starting byte of the _.rodata_ (read-only data) segment. + static mut __rodata_start: u8; + /// The ending byte of the _.rodata_ (read-only data) segment. + static mut __rodata_end: u8; +} + +unsafe fn page_flags(virt: VirtualAddress) -> PageFlags { + let virt_addr = virt.data(); + + // Test for being inside a region + macro_rules! in_section { + ($n: ident) => { + virt_addr >= &concat_idents!(__, $n, _start) as *const u8 as usize + && virt_addr < &concat_idents!(__, $n, _end) as *const u8 as usize + }; + } + + if in_section!(text) { + // Remap text read-only, execute + PageFlags::new().write(false).execute(true) + } else if in_section!(rodata) { + // Remap rodata read-only, no execute + PageFlags::new().write(false).execute(false) + } else { + // Remap everything else read-write, no execute + PageFlags::new().write(true).execute(false) + } +} + +unsafe fn dump_tables(table: PageTable) { + let level = table.level(); + for i in 0..A::PAGE_ENTRIES { + if let Some(entry) = table.entry(i) { + if entry.present() { + let base = table.entry_base(i).unwrap(); + let address = entry.address(); + let flags = entry.flags(); + for level in level..A::PAGE_LEVELS { + print!(" "); + } + println!( + "{}: map 0x{:X} to 0x{:X} flags 0x{:X}", + i, + base.data(), + address.data(), + flags + ); + // This somewhat handles block entries + if flags & (1 << 1) != 0 { + if let Some(next) = table.next(i) { + for level in level..A::PAGE_LEVELS { + print!(" "); + } + println!("{{"); + + dump_tables(next); + + for level in level..A::PAGE_LEVELS { + print!(" "); + } + println!("}}"); + } + } + } + } + } +} + +unsafe fn inner(areas: &'static [MemoryArea], kernel_base: usize, kernel_size_aligned: usize, bump_offset: usize) -> BuddyAllocator { + // First, calculate how much memory we have + let mut size = 0; + for area in areas.iter() { + if area.size > 0 { + println!("{:X?}", area); + size += area.size; + } + } + + println!("Memory: {} MB", (size + (MEGABYTE - 1)) / MEGABYTE); + + // Create a basic allocator for the first pages + let mut bump_allocator = BumpAllocator::::new(areas, bump_offset); + + { + let mut mapper = PageMapper::::current( + &mut bump_allocator + ); + + println!("Old Table: {:X}", mapper.table().phys().data()); + //dump_tables(mapper.table()); + } + + { + let mut mapper = PageMapper::::create( + &mut bump_allocator + ).expect("failed to create Mapper"); + + // Map all physical areas at PHYS_OFFSET + for area in areas.iter() { + for i in 0..area.size / A::PAGE_SIZE { + let phys = area.base.add(i * A::PAGE_SIZE); + let virt = A::phys_to_virt(phys); + let flags = page_flags::(virt); + let flush = mapper.map_phys( + virt, + phys, + flags + ).expect("failed to map frame"); + flush.ignore(); // Not the active table + } + } + + //TODO: this is a hack to add the aarch64 kernel mapping + for i in 0..kernel_size_aligned / A::PAGE_SIZE { + let phys = PhysicalAddress::new(kernel_base + i * A::PAGE_SIZE); + let virt = VirtualAddress::new(crate::KERNEL_OFFSET + i * A::PAGE_SIZE); + let flags = page_flags::(virt); + let flush = mapper.map_phys( + virt, + phys, + flags + ).expect("failed to map frame"); + flush.ignore(); // Not the active table + } + + //TODO: this is another hack to map our UART + { + let phys = PhysicalAddress::new(0x9000000); + let virt = A::phys_to_virt(phys); + let flags = page_flags::(virt); + let flush = mapper.map_phys( + virt, + phys, + flags + ).expect("failed to map frame"); + flush.ignore(); // Not the active table + } + + //TODO: remove backwards compatible recursive mapping + mapper.table().set_entry(511, rmm::PageEntry::new( + mapper.table().phys().data() | A::ENTRY_FLAG_READWRITE | A::ENTRY_FLAG_DEFAULT_TABLE + )); + + println!("New Table: {:X}", mapper.table().phys().data()); + //dump_tables(mapper.table()); + + // Use the new table + mapper.make_current(); + } + + // Create the physical memory map + let offset = bump_allocator.offset(); + println!("Permanently used: {} KB", (offset + (KILOBYTE - 1)) / KILOBYTE); + + BuddyAllocator::::new(bump_allocator).expect("failed to create BuddyAllocator") +} + +pub struct LockedAllocator { + inner: Mutex>>, +} + +impl LockedAllocator { + const fn new() -> Self { + Self { + inner: Mutex::new(None) + } + } +} + +impl FrameAllocator for LockedAllocator { + unsafe fn allocate(&mut self, count: FrameCount) -> Option { + if let Some(ref mut allocator) = *self.inner.lock() { + allocator.allocate(count) + } else { + None + } + } + + unsafe fn free(&mut self, address: PhysicalAddress, count: FrameCount) { + if let Some(ref mut allocator) = *self.inner.lock() { + allocator.free(address, count) + } + } + + unsafe fn usage(&self) -> FrameUsage { + if let Some(ref allocator) = *self.inner.lock() { + allocator.usage() + } else { + FrameUsage::new(FrameCount::new(0), FrameCount::new(0)) + } + } +} + +static mut AREAS: [MemoryArea; 512] = [MemoryArea { + base: PhysicalAddress::new(0), + size: 0, +}; 512]; + +pub static mut FRAME_ALLOCATOR: LockedAllocator = LockedAllocator::new(); + +pub unsafe fn mapper_new(table_addr: PhysicalAddress) -> PageMapper<'static, AArch64Arch, LockedAllocator> { + PageMapper::new(table_addr, &mut FRAME_ALLOCATOR) +} + +//TODO: global paging lock? +pub unsafe fn mapper_create() -> Option> { + PageMapper::create(&mut FRAME_ALLOCATOR) +} + +pub unsafe fn mapper_current() -> PageMapper<'static, AArch64Arch, LockedAllocator> { + PageMapper::current(&mut FRAME_ALLOCATOR) +} + +pub unsafe fn init(kernel_base: usize, kernel_size: usize) { + type A = AArch64Arch; + + let kernel_size_aligned = ((kernel_size + (A::PAGE_SIZE - 1))/A::PAGE_SIZE) * A::PAGE_SIZE; + let kernel_end = kernel_base + kernel_size_aligned; + println!("kernel_end: {:X}", kernel_end); + + // Copy memory map from bootloader location, and page align it + let mut area_i = 0; + let mut bump_offset = 0; + for i in 0..512 { + let old = &crate::init::device_tree::MEMORY_MAP[i]; + if old._type != 1 { + // Not a free area + continue; + } + + let mut base = old.base_addr as usize; + let mut size = old.length as usize; + + // Page align base + let base_offset = (A::PAGE_SIZE - (base & A::PAGE_OFFSET_MASK)) & A::PAGE_OFFSET_MASK; + if base_offset > size { + // Area is too small to page align base + continue; + } + base += base_offset; + size -= base_offset; + + // Page align size + size &= !A::PAGE_OFFSET_MASK; + if size == 0 { + // Area is zero sized + continue; + } + + if base + size < kernel_end { + // Area is below static kernel data + bump_offset += size; + } else if base < kernel_end { + // Area contains static kernel data + bump_offset += kernel_end - base; + } + + AREAS[area_i].base = PhysicalAddress::new(base); + AREAS[area_i].size = size; + area_i += 1; + } + + println!("bump_offset: {:X}", bump_offset); + + let allocator = inner::(&AREAS, kernel_base, kernel_size_aligned, bump_offset); + *FRAME_ALLOCATOR.inner.lock() = Some(allocator); +} diff --git a/src/arch/aarch64/start.rs b/src/arch/aarch64/start.rs new file mode 100644 index 0000000..e4d3530 --- /dev/null +++ b/src/arch/aarch64/start.rs @@ -0,0 +1,191 @@ +/// This function is where the kernel sets up IRQ handlers +/// It is increcibly unsafe, and should be minimal in nature +/// It must create the IDT with the correct entries, those entries are +/// defined in other files inside of the `arch` module + +use core::slice; +use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; + +use crate::memory::{Frame}; +use crate::paging::{ActivePageTable, PageTableType, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress}; +use crate::paging::entry::{EntryFlags}; + +use crate::allocator; +use crate::device; +use crate::init::device_tree; +use crate::interrupt; +use crate::log::{self, info}; +use crate::paging; + +/// Test of zero values in BSS. +static BSS_TEST_ZERO: usize = 0; +/// Test of non-zero values in data. +static DATA_TEST_NONZERO: usize = 0xFFFF_FFFF_FFFF_FFFF; +/// Test of zero values in thread BSS +#[thread_local] +static mut TBSS_TEST_ZERO: usize = 0; +/// Test of non-zero values in thread data. +#[thread_local] +static mut TDATA_TEST_NONZERO: usize = 0xFFFF_FFFF_FFFF_FFFF; + +pub static KERNEL_BASE: AtomicUsize = AtomicUsize::new(0); +pub static KERNEL_SIZE: AtomicUsize = AtomicUsize::new(0); +pub static CPU_COUNT: AtomicUsize = AtomicUsize::new(0); +pub static AP_READY: AtomicBool = AtomicBool::new(false); +static BSP_READY: AtomicBool = AtomicBool::new(false); + +#[repr(packed)] +pub struct KernelArgs { + kernel_base: u64, + kernel_size: u64, + stack_base: u64, + stack_size: u64, + env_base: u64, + env_size: u64, + dtb_base: u64, + dtb_size: u64, +} + +/// The entry to Rust, all things must be initialized +#[no_mangle] +pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! { + let env = { + let args = &*args_ptr; + + let kernel_base = args.kernel_base as usize; + let kernel_size = args.kernel_size as usize; + let stack_base = args.stack_base as usize; + let stack_size = args.stack_size as usize; + let env_base = args.env_base as usize; + let env_size = args.env_size as usize; + let dtb_base = args.dtb_base as usize; + let dtb_size = args.dtb_size as usize; + + //TODO: remove this hack for early console, use device tree + { + let mut serial = device::uart_pl011::SerialPort::new(crate::KERNEL_DEVMAP_OFFSET + 0x9000000); + serial.init(false); + serial.send(b'T'); + serial.send(b'E'); + serial.send(b'S'); + serial.send(b'T'); + serial.send(b'\r'); + serial.send(b'\n'); + *device::serial::COM1.lock() = Some(serial); + } + + // BSS should already be zero + { + assert_eq!(BSS_TEST_ZERO, 0); + assert_eq!(DATA_TEST_NONZERO, 0xFFFF_FFFF_FFFF_FFFF); + } + + KERNEL_BASE.store(kernel_base, Ordering::SeqCst); + KERNEL_SIZE.store(kernel_size, Ordering::SeqCst); + + // Initialize logger + log::init_logger(|r| { + use core::fmt::Write; + let _ = write!( + crate::debug::Writer::new(), + "{}:{} -- {}\n", + r.target(), + r.level(), + r.args() + ); + }); + + info!("Redox OS starting..."); + info!("Kernel: {:X}:{:X}", kernel_base, kernel_base + kernel_size); + info!("Stack: {:X}:{:X}", stack_base, stack_base + stack_size); + info!("Env: {:X}:{:X}", env_base, env_base + env_size); + info!("DTB: {:X}:{:X}", dtb_base, dtb_base + dtb_size); + + //TODO: Until fixed, the DTB is at DEVMAP_OFFSET + dtb_base + // This is not required after paging is enabled because paging fixes this + device_tree::fill_memory_map(crate::KERNEL_DEVMAP_OFFSET + dtb_base, dtb_size); + let env_size = device_tree::fill_env_data(crate::KERNEL_DEVMAP_OFFSET + dtb_base, dtb_size, env_base); + + // Initialize RMM + println!("RMM INIT START"); + crate::arch::rmm::init(kernel_base, kernel_size + stack_size); + println!("RMM INIT COMPLETE"); + + // Initialize paging + println!("PAGING INIT START"); + let (mut active_table, _tcb_offset) = paging::init(0); + println!("PAGING INIT COMPLETE"); + + // Test tdata and tbss + { + assert_eq!(TBSS_TEST_ZERO, 0); + TBSS_TEST_ZERO += 1; + assert_eq!(TBSS_TEST_ZERO, 1); + assert_eq!(TDATA_TEST_NONZERO, 0xFFFF_FFFF_FFFF_FFFF); + TDATA_TEST_NONZERO -= 1; + assert_eq!(TDATA_TEST_NONZERO, 0xFFFF_FFFF_FFFF_FFFE); + } + + // Reset AP variables + CPU_COUNT.store(1, Ordering::SeqCst); + AP_READY.store(false, Ordering::SeqCst); + BSP_READY.store(false, Ordering::SeqCst); + + // Setup kernel heap + println!("ALLOCATOR INIT START"); + allocator::init(&mut active_table); + println!("ALLOCATOR INIT COMPLETE"); + + // Activate memory logging + println!("LOG INIT START"); + log::init(); + println!("LOG INIT COMPLETE"); + + // Initialize devices + println!("DEVICE INIT START"); + device::init(&mut active_table); + println!("DEVICE INIT COMPLETE"); + + // Initialize all of the non-core devices not otherwise needed to complete initialization + println!("DEVICE INIT NONCORE START"); + device::init_noncore(); + println!("DEVICE INIT NONCORE COMPLETE"); + + BSP_READY.store(true, Ordering::SeqCst); + + slice::from_raw_parts(env_base as *const u8, env_size) + }; + + println!("KMAIN"); + crate::kmain(CPU_COUNT.load(Ordering::SeqCst), env); +} + +#[repr(packed)] +pub struct KernelArgsAp { + cpu_id: u64, + page_table: u64, + stack_start: u64, + stack_end: u64, +} + +/// Entry to rust for an AP +pub unsafe extern fn kstart_ap(args_ptr: *const KernelArgsAp) -> ! { + loop{} +} + +#[naked] +pub unsafe fn usermode(ip: usize, sp: usize, arg: usize, singlestep: bool) -> ! { + let cpu_id: usize = 0; + let uspace_tls_start = (crate::USER_TLS_OFFSET + crate::USER_TLS_SIZE * cpu_id); + let spsr: u32 = 0; + + llvm_asm!("msr tpidr_el0, $0" : : "r"(uspace_tls_start) : : "volatile"); + llvm_asm!("msr spsr_el1, $0" : : "r"(spsr) : : "volatile"); + llvm_asm!("msr elr_el1, $0" : : "r"(ip) : : "volatile"); + llvm_asm!("msr sp_el0, $0" : : "r"(sp) : : "volatile"); + + llvm_asm!("mov x0, $0" : : "r"(arg) : : "volatile"); + llvm_asm!("eret" : : : : "volatile"); + + unreachable!(); +} diff --git a/src/arch/aarch64/stop.rs b/src/arch/aarch64/stop.rs new file mode 100644 index 0000000..b44c759 --- /dev/null +++ b/src/arch/aarch64/stop.rs @@ -0,0 +1,21 @@ +#[no_mangle] +pub unsafe extern fn kreset() -> ! { + println!("kreset"); + + let val: u32 = 0x8400_0009; + llvm_asm!("mov x0, $0" : : "r"(val) : : "volatile"); + llvm_asm!("hvc #0" : : : : "volatile"); + + unreachable!(); +} + +#[no_mangle] +pub unsafe extern fn kstop() -> ! { + println!("kstop"); + + let val: u32 = 0x8400_0008; + llvm_asm!("mov x0, $0" : : "r"(val) : : "volatile"); + llvm_asm!("hvc #0" : : : : "volatile"); + + unreachable!(); +} diff --git a/src/arch/mod.rs b/src/arch/mod.rs index 1abbd03..813ee20 100644 --- a/src/arch/mod.rs +++ b/src/arch/mod.rs @@ -1,5 +1,11 @@ +#[cfg(target_arch = "aarch64")] +#[macro_use] +pub mod aarch64; +#[cfg(target_arch = "aarch64")] +pub use self::aarch64::*; + #[cfg(target_arch = "x86_64")] #[macro_use] pub mod x86_64; #[cfg(target_arch = "x86_64")] -pub use self::x86_64::*; \ No newline at end of file +pub use self::x86_64::*; diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs index 852de3a..1b509fe 100644 --- a/src/arch/x86_64/paging/mod.rs +++ b/src/arch/x86_64/paging/mod.rs @@ -365,9 +365,9 @@ impl InactivePageTable { InactivePageTable { p4_frame: frame } } - pub unsafe fn from_address(cr3: usize) -> InactivePageTable { + pub unsafe fn from_address(address: usize) -> InactivePageTable { InactivePageTable { - p4_frame: Frame::containing_address(PhysicalAddress::new(cr3)), + p4_frame: Frame::containing_address(PhysicalAddress::new(address)), } } diff --git a/src/context/arch/aarch64.rs b/src/context/arch/aarch64.rs new file mode 100644 index 0000000..b57bfa4 --- /dev/null +++ b/src/context/arch/aarch64.rs @@ -0,0 +1,359 @@ +use core::mem; +use core::sync::atomic::{AtomicBool, AtomicUsize, ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT, Ordering}; + +use crate::device::cpu::registers::{control_regs, tlb}; + +/// This must be used by the kernel to ensure that context switches are done atomically +/// Compare and exchange this to true when beginning a context switch on any CPU +/// The `Context::switch_to` function will set it back to false, allowing other CPU's to switch +/// This must be done, as no locks can be held on the stack during switch +pub static CONTEXT_SWITCH_LOCK: AtomicBool = ATOMIC_BOOL_INIT; + +#[derive(Clone, Debug)] +pub struct Context { + elr_el1: usize, + sp_el0: usize, + ttbr0_el1: usize, /* Pointer to U4 translation table for this Context */ + tpidr_el0: usize, /* Pointer to TLS region for this Context */ + tpidrro_el0: usize, /* Pointer to TLS (read-only) region for this Context */ + rflags: usize, + esr_el1: usize, + padding: usize, + sp: usize, /* Stack Pointer (x31) */ + lr: usize, /* Link Register (x30) */ + fp: usize, /* Frame pointer Register (x29) */ + x28: usize, /* Callee saved Register */ + x27: usize, /* Callee saved Register */ + x26: usize, /* Callee saved Register */ + x25: usize, /* Callee saved Register */ + x24: usize, /* Callee saved Register */ + x23: usize, /* Callee saved Register */ + x22: usize, /* Callee saved Register */ + x21: usize, /* Callee saved Register */ + x20: usize, /* Callee saved Register */ + x19: usize, /* Callee saved Register */ + x18: usize, + x17: usize, + x16: usize, + x15: usize, /* Temporary Register */ + x14: usize, /* Temporary Register */ + x13: usize, /* Temporary Register */ + x12: usize, /* Temporary Register */ + x11: usize, /* Temporary Register */ + x10: usize, /* Temporary Register */ + x9: usize, /* Temporary Register */ + x8: usize, /* Indirect location Register */ +} + +static CONTEXT_COUNT: AtomicUsize = ATOMIC_USIZE_INIT; + +impl Context { + pub fn new() -> Context { + Context { + elr_el1: 0, + sp_el0: 0, + ttbr0_el1: 0, + tpidr_el0: 0, + tpidrro_el0: 0, + rflags: 0, /* spsr_el1 */ + esr_el1: 0, + padding: 0xbeef0000 | CONTEXT_COUNT.fetch_add(1, Ordering::SeqCst), + sp: 0, + lr: 0, + fp: 0, + x28: 0, + x27: 0, + x26: 0, + x25: 0, + x24: 0, + x23: 0, + x22: 0, + x21: 0, + x20: 0, + x19: 0, + x18: 0, + x17: 0, + x16: 0, + x15: 0, + x14: 0, + x13: 0, + x12: 0, + x11: 0, + x10: 0, + x9: 0, + x8: 0, + } + } + + pub fn get_page_table(&self) -> usize { + self.ttbr0_el1 + } + + pub fn set_fx(&mut self, _address: usize) { + } + + pub fn set_page_table(&mut self, address: usize) { + self.ttbr0_el1 = address; + } + + pub fn set_stack(&mut self, address: usize) { + self.sp = address; + } + + pub fn set_lr(&mut self, address: usize) { + self.lr = address; + } + + pub fn set_fp(&mut self, address: usize) { + self.fp = address; + } + + pub fn set_context_handle(&mut self) { + let address = self as *const _ as usize; + self.tpidrro_el0 = address; + } + + pub fn get_context_handle(&mut self) -> usize { + self.tpidrro_el0 + } + + pub unsafe fn signal_stack(&mut self, handler: extern fn(usize), sig: u8) { + self.push_stack(sig as usize); + self.push_stack(handler as usize); + let lr = self.lr.clone(); + self.push_stack(lr); + self.set_lr(signal_handler_wrapper as usize); + } + + pub unsafe fn push_stack(&mut self, value: usize) { + self.sp -= 1 * mem::size_of::(); + *(self.sp as *mut usize) = value; + } + + pub unsafe fn pop_stack(&mut self) -> usize { + let value = *(self.sp as *const usize); + self.sp += 1 * mem::size_of::(); + value + } + + #[cold] + #[inline(never)] + #[naked] + pub unsafe fn switch_to(&mut self, next: &mut Context) { + self.ttbr0_el1 = control_regs::ttbr0_el1() as usize; + if next.ttbr0_el1 != self.ttbr0_el1 { + control_regs::ttbr0_el1_write(next.ttbr0_el1 as u64); + tlb::flush_all(); + } + + llvm_asm!("mov $0, x8" : "=r"(self.x8) : : "memory" : "volatile"); + llvm_asm!("mov x8, $0" : : "r"(next.x8) :"memory" : "volatile"); + + llvm_asm!("mov $0, x9" : "=r"(self.x9) : : "memory" : "volatile"); + llvm_asm!("mov x9, $0" : : "r"(next.x9) :"memory" : "volatile"); + + llvm_asm!("mov $0, x10" : "=r"(self.x10) : : "memory" : "volatile"); + llvm_asm!("mov x10, $0" : : "r"(next.x10) :"memory" : "volatile"); + + llvm_asm!("mov $0, x11" : "=r"(self.x11) : : "memory" : "volatile"); + llvm_asm!("mov x11, $0" : : "r"(next.x11) :"memory" : "volatile"); + + llvm_asm!("mov $0, x12" : "=r"(self.x12) : : "memory" : "volatile"); + llvm_asm!("mov x12, $0" : : "r"(next.x12) :"memory" : "volatile"); + + llvm_asm!("mov $0, x13" : "=r"(self.x13) : : "memory" : "volatile"); + llvm_asm!("mov x13, $0" : : "r"(next.x13) :"memory" : "volatile"); + + llvm_asm!("mov $0, x14" : "=r"(self.x14) : : "memory" : "volatile"); + llvm_asm!("mov x14, $0" : : "r"(next.x14) :"memory" : "volatile"); + + llvm_asm!("mov $0, x15" : "=r"(self.x15) : : "memory" : "volatile"); + llvm_asm!("mov x15, $0" : : "r"(next.x15) :"memory" : "volatile"); + + llvm_asm!("mov $0, x16" : "=r"(self.x16) : : "memory" : "volatile"); + llvm_asm!("mov x16, $0" : : "r"(next.x16) :"memory" : "volatile"); + + llvm_asm!("mov $0, x17" : "=r"(self.x17) : : "memory" : "volatile"); + llvm_asm!("mov x17, $0" : : "r"(next.x17) :"memory" : "volatile"); + + llvm_asm!("mov $0, x18" : "=r"(self.x18) : : "memory" : "volatile"); + llvm_asm!("mov x18, $0" : : "r"(next.x18) :"memory" : "volatile"); + + llvm_asm!("mov $0, x19" : "=r"(self.x19) : : "memory" : "volatile"); + llvm_asm!("mov x19, $0" : : "r"(next.x19) :"memory" : "volatile"); + + llvm_asm!("mov $0, x20" : "=r"(self.x20) : : "memory" : "volatile"); + llvm_asm!("mov x20, $0" : : "r"(next.x20) :"memory" : "volatile"); + + llvm_asm!("mov $0, x21" : "=r"(self.x21) : : "memory" : "volatile"); + llvm_asm!("mov x21, $0" : : "r"(next.x21) :"memory" : "volatile"); + + llvm_asm!("mov $0, x22" : "=r"(self.x22) : : "memory" : "volatile"); + llvm_asm!("mov x22, $0" : : "r"(next.x22) :"memory" : "volatile"); + + llvm_asm!("mov $0, x23" : "=r"(self.x23) : : "memory" : "volatile"); + llvm_asm!("mov x23, $0" : : "r"(next.x23) :"memory" : "volatile"); + + llvm_asm!("mov $0, x24" : "=r"(self.x24) : : "memory" : "volatile"); + llvm_asm!("mov x24, $0" : : "r"(next.x24) :"memory" : "volatile"); + + llvm_asm!("mov $0, x25" : "=r"(self.x25) : : "memory" : "volatile"); + llvm_asm!("mov x25, $0" : : "r"(next.x25) :"memory" : "volatile"); + + llvm_asm!("mov $0, x26" : "=r"(self.x26) : : "memory" : "volatile"); + llvm_asm!("mov x26, $0" : : "r"(next.x26) :"memory" : "volatile"); + + llvm_asm!("mov $0, x27" : "=r"(self.x27) : : "memory" : "volatile"); + llvm_asm!("mov x27, $0" : : "r"(next.x27) :"memory" : "volatile"); + + llvm_asm!("mov $0, x28" : "=r"(self.x28) : : "memory" : "volatile"); + llvm_asm!("mov x28, $0" : : "r"(next.x28) :"memory" : "volatile"); + + llvm_asm!("mov $0, x29" : "=r"(self.fp) : : "memory" : "volatile"); + llvm_asm!("mov x29, $0" : : "r"(next.fp) :"memory" : "volatile"); + + llvm_asm!("mov $0, x30" : "=r"(self.lr) : : "memory" : "volatile"); + llvm_asm!("mov x30, $0" : : "r"(next.lr) :"memory" : "volatile"); + + llvm_asm!("mrs $0, elr_el1" : "=r"(self.elr_el1) : : "memory" : "volatile"); + llvm_asm!("msr elr_el1, $0" : : "r"(next.elr_el1) : "memory" : "volatile"); + + llvm_asm!("mrs $0, sp_el0" : "=r"(self.sp_el0) : : "memory" : "volatile"); + llvm_asm!("msr sp_el0, $0" : : "r"(next.sp_el0) : "memory" : "volatile"); + + llvm_asm!("mrs $0, tpidr_el0" : "=r"(self.tpidr_el0) : : "memory" : "volatile"); + llvm_asm!("msr tpidr_el0, $0" : : "r"(next.tpidr_el0) : "memory" : "volatile"); + + llvm_asm!("mrs $0, tpidrro_el0" : "=r"(self.tpidrro_el0) : : "memory" : "volatile"); + llvm_asm!("msr tpidrro_el0, $0" : : "r"(next.tpidrro_el0) : "memory" : "volatile"); + + llvm_asm!("mrs $0, spsr_el1" : "=r"(self.rflags) : : "memory" : "volatile"); + llvm_asm!("msr spsr_el1, $0" : : "r"(next.rflags) : "memory" : "volatile"); + + llvm_asm!("mrs $0, esr_el1" : "=r"(self.esr_el1) : : "memory" : "volatile"); + llvm_asm!("msr esr_el1, $0" : : "r"(next.esr_el1) : "memory" : "volatile"); + + llvm_asm!("mov $0, sp" : "=r"(self.sp) : : "memory" : "volatile"); + llvm_asm!("mov sp, $0" : : "r"(next.sp) : "memory" : "volatile"); + } +} + +#[allow(dead_code)] +#[repr(packed)] +pub struct SignalHandlerStack { + x28: usize, /* Callee saved Register */ + x27: usize, /* Callee saved Register */ + x26: usize, /* Callee saved Register */ + x25: usize, /* Callee saved Register */ + x24: usize, /* Callee saved Register */ + x23: usize, /* Callee saved Register */ + x22: usize, /* Callee saved Register */ + x21: usize, /* Callee saved Register */ + x20: usize, /* Callee saved Register */ + x19: usize, /* Callee saved Register */ + x18: usize, + x17: usize, + x16: usize, + x15: usize, /* Temporary Register */ + x14: usize, /* Temporary Register */ + x13: usize, /* Temporary Register */ + x12: usize, /* Temporary Register */ + x11: usize, /* Temporary Register */ + x10: usize, /* Temporary Register */ + x9: usize, /* Temporary Register */ + x8: usize, /* Indirect location Register */ + x7: usize, + x6: usize, + x5: usize, + x4: usize, + x3: usize, + x2: usize, + x1: usize, + x0: usize, + lr: usize, + handler: extern fn(usize), + sig: usize, +} + +#[naked] +unsafe extern fn signal_handler_wrapper() { + #[inline(never)] + unsafe fn inner(stack: &SignalHandlerStack) { + (stack.handler)(stack.sig); + } + + // Push scratch registers + llvm_asm!("str x0, [sp, #-8]! + str x1, [sp, #-8]! + str x2, [sp, #-8]! + str x3, [sp, #-8]! + str x4, [sp, #-8]! + str x5, [sp, #-8]! + str x6, [sp, #-8]! + str x7, [sp, #-8]! + str x8, [sp, #-8]! + str x9, [sp, #-8]! + str x10, [sp, #-8]! + str x11, [sp, #-8]! + str x12, [sp, #-8]! + str x13, [sp, #-8]! + str x14, [sp, #-8]! + str x15, [sp, #-8]! + str x16, [sp, #-8]! + str x17, [sp, #-8]! + str x18, [sp, #-8]! + str x19, [sp, #-8]! + str x20, [sp, #-8]! + str x21, [sp, #-8]! + str x22, [sp, #-8]! + str x23, [sp, #-8]! + str x24, [sp, #-8]! + str x25, [sp, #-8]! + str x26, [sp, #-8]! + str x27, [sp, #-8]! + str x28, [sp, #-8]!" + : : : : "volatile"); + + // Get reference to stack variables + let sp: usize; + llvm_asm!("" : "={sp}"(sp) : : : "volatile"); + + let ptr = sp as *const SignalHandlerStack; + let final_lr = (*ptr).lr; + + // Call inner rust function + inner(&*(sp as *const SignalHandlerStack)); + + // Pop scratch registers, error code, and return + llvm_asm!("ldr x28, [sp], #8 + ldr x27, [sp], #8 + ldr x26, [sp], #8 + ldr x25, [sp], #8 + ldr x24, [sp], #8 + ldr x23, [sp], #8 + ldr x22, [sp], #8 + ldr x21, [sp], #8 + ldr x20, [sp], #8 + ldr x19, [sp], #8 + ldr x18, [sp], #8 + ldr x17, [sp], #8 + ldr x16, [sp], #8 + ldr x15, [sp], #8 + ldr x14, [sp], #8 + ldr x13, [sp], #8 + ldr x12, [sp], #8 + ldr x11, [sp], #8 + ldr x10, [sp], #8 + ldr x9, [sp], #8 + ldr x8, [sp], #8 + ldr x7, [sp], #8 + ldr x6, [sp], #8 + ldr x5, [sp], #8 + ldr x4, [sp], #8 + ldr x3, [sp], #8 + ldr x2, [sp], #8 + ldr x1, [sp], #8" + : : : : "volatile"); + + llvm_asm!("mov x30, $0" : : "r"(final_lr) : "memory" : "volatile"); +} diff --git a/src/context/mod.rs b/src/context/mod.rs index 3d8b7ec..6c29328 100644 --- a/src/context/mod.rs +++ b/src/context/mod.rs @@ -10,6 +10,11 @@ pub use self::context::{Context, ContextId, ContextSnapshot, Status, WaitpidKey} pub use self::list::ContextList; pub use self::switch::switch; +#[cfg(target_arch = "aarch64")] +#[path = "arch/aarch64.rs"] +mod arch; + +#[cfg(target_arch = "x86_64")] #[path = "arch/x86_64.rs"] mod arch; diff --git a/src/context/switch.rs b/src/context/switch.rs index 877033d..2e17b42 100644 --- a/src/context/switch.rs +++ b/src/context/switch.rs @@ -2,6 +2,7 @@ use core::sync::atomic::Ordering; use crate::context::signal::signal_handler; use crate::context::{arch, contexts, Context, Status, CONTEXT_ID}; +#[cfg(target_arch = "x86_64")] use crate::gdt; use crate::interrupt::irq::PIT_TICKS; use crate::interrupt; @@ -143,10 +144,13 @@ pub unsafe fn switch() -> bool { if to_ptr as usize != 0 { (*from_ptr).running = false; (*to_ptr).running = true; - if let Some(ref stack) = (*to_ptr).kstack { - gdt::set_tss_stack(stack.as_ptr() as usize + stack.len()); + #[cfg(target_arch = "x86_64")] + { + if let Some(ref stack) = (*to_ptr).kstack { + gdt::set_tss_stack(stack.as_ptr() as usize + stack.len()); + } + gdt::set_tcb((*to_ptr).id.into()); } - gdt::set_tcb((*to_ptr).id.into()); CONTEXT_ID.store((*to_ptr).id, Ordering::SeqCst); } diff --git a/src/devices/mod.rs b/src/devices/mod.rs index 9dc8d5b..0fb7194 100644 --- a/src/devices/mod.rs +++ b/src/devices/mod.rs @@ -1 +1 @@ -pub mod uart_16550; \ No newline at end of file +pub mod uart_16550; diff --git a/src/elf.rs b/src/elf.rs index 183ed84..d74fe3f 100644 --- a/src/elf.rs +++ b/src/elf.rs @@ -7,7 +7,10 @@ use goblin::elf::section_header::SHT_SYMTAB; #[cfg(target_arch = "x86")] pub use goblin::elf32::{header, program_header, section_header, sym}; -#[cfg(target_arch = "x86_64")] +#[cfg(any( + target_arch = "aarch64", + target_arch = "x86_64" +))] pub use goblin::elf64::{header, program_header, section_header, sym}; /// An ELF executable diff --git a/src/lib.rs b/src/lib.rs index 38452b7..aad157f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -42,6 +42,7 @@ #![feature(allocator_api)] #![feature(asm)] // TODO: Relax requirements of most asm invocations +#![cfg_attr(target_arch = "aarch64", feature(llvm_asm))] // TODO: Rewrite using asm! #![feature(concat_idents)] #![feature(const_fn)] #![feature(core_intrinsics)] @@ -61,6 +62,8 @@ extern crate alloc; #[macro_use] extern crate bitflags; +#[macro_use] +extern crate bitfield; extern crate goblin; extern crate linked_list_allocator; extern crate rustc_demangle; diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs index 4fd7fc1..68dbf81 100644 --- a/src/scheme/proc.rs +++ b/src/scheme/proc.rs @@ -331,6 +331,13 @@ impl Scheme for ProcScheme { Ok(value) } + #[cfg(target_arch = "aarch64")] + fn read(&self, id: usize, buf: &mut [u8]) -> Result { + //TODO + Err(Error::new(EINVAL)) + } + + #[cfg(target_arch = "x86_64")] fn read(&self, id: usize, buf: &mut [u8]) -> Result { // Don't hold a global lock during the context switch later on let info = { @@ -451,6 +458,13 @@ impl Scheme for ProcScheme { } } + #[cfg(target_arch = "aarch64")] + fn write(&self, id: usize, buf: &[u8]) -> Result { + //TODO + Err(Error::new(EINVAL)) + } + + #[cfg(target_arch = "x86_64")] fn write(&self, id: usize, buf: &[u8]) -> Result { // Don't hold a global lock during the context switch later on let info = { diff --git a/src/scheme/sys/mod.rs b/src/scheme/sys/mod.rs index f0a03d0..ef7947b 100644 --- a/src/scheme/sys/mod.rs +++ b/src/scheme/sys/mod.rs @@ -52,6 +52,7 @@ impl SysScheme { files.insert(b"scheme_num", Box::new(scheme_num::resource)); files.insert(b"syscall", Box::new(syscall::resource)); files.insert(b"uname", Box::new(uname::resource)); + #[cfg(target_arch = "x86_64")] files.insert(b"spurious_irq", Box::new(irq::spurious_irq_resource)); SysScheme { diff --git a/src/syscall/driver.rs b/src/syscall/driver.rs index 93963ff..efbfe3b 100644 --- a/src/syscall/driver.rs +++ b/src/syscall/driver.rs @@ -18,6 +18,12 @@ fn enforce_root() -> Result<()> { } } +#[cfg(target_arch = "aarch64")] +pub fn iopl(level: usize, stack: &mut InterruptStack) -> Result { + Err(Error::new(syscall::error::ENOSYS)) +} + +#[cfg(target_arch = "x86_64")] pub fn iopl(level: usize, stack: &mut InterruptStack) -> Result { enforce_root()?; @@ -88,6 +94,7 @@ pub fn inner_physmap(physical_address: usize, size: usize, flags: PhysmapFlags) if flags.contains(PHYSMAP_WRITE_COMBINE) { entry_flags |= EntryFlags::HUGE_PAGE; } + #[cfg(target_arch = "x86_64")] // TODO: AARCH64 if flags.contains(PHYSMAP_NO_CACHE) { entry_flags |= EntryFlags::NO_CACHE; } diff --git a/src/syscall/mod.rs b/src/syscall/mod.rs index e164dcf..d5e1485 100644 --- a/src/syscall/mod.rs +++ b/src/syscall/mod.rs @@ -125,13 +125,23 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u SYS_GETPPID => getppid().map(ContextId::into), SYS_CLONE => { let b = CloneFlags::from_bits_truncate(b); - let old_rsp = stack.iret.rsp; - if b.contains(flag::CLONE_STACK) { - stack.iret.rsp = c; + + #[cfg(target_arch = "aarch64")] + { + //TODO: CLONE_STACK + clone(b, bp).map(ContextId::into) + } + + #[cfg(target_arch = "x86_64")] + { + let old_rsp = stack.iret.rsp; + if b.contains(flag::CLONE_STACK) { + stack.iret.rsp = c; + } + let ret = clone(b, bp).map(ContextId::into); + stack.iret.rsp = old_rsp; + ret } - let ret = clone(b, bp).map(ContextId::into); - stack.iret.rsp = old_rsp; - ret }, SYS_EXIT => exit((b & 0xFF) << 8), SYS_KILL => kill(ContextId::from(b), c), diff --git a/src/syscall/process.rs b/src/syscall/process.rs index dfed1b1..978f867 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -106,6 +106,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { // Set clone's return value to zero. This is done because // the clone won't return like normal, which means the value // would otherwise never get set. + #[cfg(target_arch = "x86_64")] // TODO if let Some(regs) = ptrace::rebase_regs_ptr_mut(context.regs, Some(&mut new_stack)) { (*regs).scratch.rax = 0; } diff --git a/syscall b/syscall index ee9a923..b12d582 160000 --- a/syscall +++ b/syscall @@ -1 +1 @@ -Subproject commit ee9a92367e6f2b174c45b47a57141b3941447600 +Subproject commit b12d582d4dd805e63972f6a9c8612518d8635889 From ea21fba3aa90458531d7a2329b94defb5cd608d6 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Fri, 15 Jan 2021 12:25:42 +0000 Subject: [PATCH 02/55] build.rs: aarch64: Specify target for cc::Build Oddly, not specifying this or using aarch64-unknown-none (which would be the default that cc gets from the TARGET environment variable) both fail to invoke the appropriate compiler to build the asm code. Using aarch64-unknown-redox works but shouldn't really be needed. This is perhaps because of some odd arrangement of KTARGET, TARGET, the installed prefix toolchain and the kernel target JSON spec. The early_init asm code shall be replaced by a pure Rust bootloader eventually so let's move with this for the moment. --- build.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/build.rs b/build.rs index 8ca068f..249c9e3 100644 --- a/build.rs +++ b/build.rs @@ -166,6 +166,7 @@ mod gen { println!("cargo:rerun-if-changed=src/arch/aarch64/init/pre_kstart/early_init.S"); cc::Build::new() .file("src/arch/aarch64/init/pre_kstart/early_init.S") + .target("aarch64-unknown-redox") .compile("early_init"); } } From aa3839605f0969a054c9427cdad22bffb85a04fc Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Fri, 15 Jan 2021 12:28:34 +0000 Subject: [PATCH 03/55] aarch64: Remove code-model from the JSON spec We use a target-feature for this now. --- targets/aarch64-unknown-none.json | 1 - 1 file changed, 1 deletion(-) diff --git a/targets/aarch64-unknown-none.json b/targets/aarch64-unknown-none.json index d36ded3..429183e 100644 --- a/targets/aarch64-unknown-none.json +++ b/targets/aarch64-unknown-none.json @@ -17,7 +17,6 @@ "dynamic-linking": false, "executables": false, "relocation-model": "pic", - "code-model": "large", "disable-redzone": true, "eliminate-frame-pointer": false, "exe-suffix": "", From 252ec249053b0f20da3762568ca5c7f9fbaa79c0 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Fri, 15 Jan 2021 12:29:28 +0000 Subject: [PATCH 04/55] aarch64: Use target-feature for NEON insn suppression and tpidr_el1 use --- targets/aarch64-unknown-none.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/targets/aarch64-unknown-none.json b/targets/aarch64-unknown-none.json index 429183e..f4abc23 100644 --- a/targets/aarch64-unknown-none.json +++ b/targets/aarch64-unknown-none.json @@ -13,7 +13,7 @@ "pre-link-args": { "gcc": ["-m64", "-nostdlib", "-static"] }, - "features": "+a53,+strict-align,-fp-armv8", + "features": "+strict-align,-neon,-fp-armv8,+tpidr-el1", "dynamic-linking": false, "executables": false, "relocation-model": "pic", From 5f8b004476123499ae5f47f2e7f904c34e646370 Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Fri, 15 Jan 2021 06:56:36 -0700 Subject: [PATCH 05/55] Fix typo in InterruptStack parameter --- src/arch/aarch64/interrupt/handler.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/arch/aarch64/interrupt/handler.rs b/src/arch/aarch64/interrupt/handler.rs index 17f48ae..279c89a 100644 --- a/src/arch/aarch64/interrupt/handler.rs +++ b/src/arch/aarch64/interrupt/handler.rs @@ -86,7 +86,7 @@ impl PreservedRegisters { pub struct InterruptStack { pub elr_el1: usize, //TODO: should this push be removed? - pub unkknown: usize, + pub unknown: usize, pub tpidr_el0: usize, pub tpidrro_el0: usize, pub spsr_el1: usize, @@ -106,7 +106,7 @@ impl InterruptStack { println!("SPSR_EL1: {:>016X}", { self.spsr_el1 }); println!("TPIDRRO_EL0: {:>016X}", { self.tpidrro_el0 }); println!("TPIDR_EL0: {:>016X}", { self.tpidr_el0 }); - println!("UNKNOWN: {:>016X}", { self.unkknown }); + println!("UNKNOWN: {:>016X}", { self.unknown }); println!("ELR_EL1: {:>016X}", { self.elr_el1 }); } From bdea7f553a782f2ebdfdf3bc54f904f936e3befd Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Fri, 15 Jan 2021 06:57:02 -0700 Subject: [PATCH 06/55] Remove PRESENT flag from aarch64 descriptors --- src/arch/aarch64/paging/entry.rs | 10 ++++------ src/arch/aarch64/paging/mapper.rs | 6 +----- src/arch/aarch64/paging/mod.rs | 6 +++--- src/arch/aarch64/paging/table.rs | 5 ++--- 4 files changed, 10 insertions(+), 17 deletions(-) diff --git a/src/arch/aarch64/paging/entry.rs b/src/arch/aarch64/paging/entry.rs index cee4701..249732e 100644 --- a/src/arch/aarch64/paging/entry.rs +++ b/src/arch/aarch64/paging/entry.rs @@ -18,7 +18,6 @@ bitflags! { const VALID = 1 << 0; const TABLE = 1 << 1; const AF = 1 << 10; /* NOTE: TableDescriptors don't actually have an AF bit! */ - const PRESENT = 1 << 58; /* ARM ARM says this is an IGNORED bit, so using it here should be OK */ const PXNTABLE = 1 << 59; const UXNTABLE = 1 << 60; const APTABLE_0 = 1 << 61; @@ -48,7 +47,6 @@ bitflags! { const CONTIGUOUS = 1 << 52; const PXN = 1 << 53; const UXN = 1 << 54; - const PRESENT = 1 << 58; /* Assuming DBM can be overloaded as PRESENT */ } } @@ -108,7 +106,7 @@ impl Entry { /// Get the associated frame, if available, for a level 4, 3, or 2 page pub fn pointed_frame(&self) -> Option { - if self.page_table_entry_flags().contains(TableDescriptorFlags::PRESENT) { + if self.page_table_entry_flags().contains(TableDescriptorFlags::VALID) { Some(Frame::containing_address(self.address())) } else { None @@ -117,7 +115,7 @@ impl Entry { /// Get the associated frame, if available, for a level 1 page pub fn pointed_frame_at_l1(&self) -> Option { - if self.page_descriptor_entry_flags().contains(PageDescriptorFlags::PRESENT) { + if self.page_descriptor_entry_flags().contains(PageDescriptorFlags::VALID) { Some(Frame::containing_address(self.address())) } else { None @@ -142,10 +140,10 @@ impl Entry { debug_assert!(frame.start_address().data() & !ADDRESS_MASK == 0); // ODDNESS Alert: We need to set the AF bit - despite this being a TableDescriptor!!! // The Arm ARM says this bit (bit 10) is IGNORED in Table Descriptors so hopefully this is OK - let mut translated_flags = TableDescriptorFlags::AF | TableDescriptorFlags::VALID | TableDescriptorFlags::TABLE; + let mut translated_flags = TableDescriptorFlags::AF | TableDescriptorFlags::TABLE; if flags.contains(EntryFlags::PRESENT) { - translated_flags.insert(TableDescriptorFlags::PRESENT); + translated_flags.insert(TableDescriptorFlags::VALID); } self.0 = (frame.start_address().data() as u64) | translated_flags.bits() | (self.0 & COUNTER_MASK); diff --git a/src/arch/aarch64/paging/mapper.rs b/src/arch/aarch64/paging/mapper.rs index 3c2fe45..9828b99 100644 --- a/src/arch/aarch64/paging/mapper.rs +++ b/src/arch/aarch64/paging/mapper.rs @@ -110,8 +110,6 @@ impl Mapper { let p1 = p2.next_table_create(page.p2_index()); let mut translated_flags: PageDescriptorFlags = PageDescriptorFlags::VALID | PageDescriptorFlags::PAGE | PageDescriptorFlags::AF; - translated_flags.insert(PageDescriptorFlags::PRESENT); - if flags.contains(EntryFlags::NO_EXECUTE) { match page.start_address().get_type() { VAddrType::User => { @@ -165,8 +163,6 @@ impl Mapper { let frame = p1[page.p1_index()].pointed_frame_at_l1().expect("failed to remap: not mapped"); let mut translated_flags: PageDescriptorFlags = PageDescriptorFlags::VALID | PageDescriptorFlags::PAGE | PageDescriptorFlags::AF; - translated_flags.insert(PageDescriptorFlags::PRESENT); - if flags.contains(EntryFlags::NO_EXECUTE) { match page.start_address().get_type() { VAddrType::User => { @@ -301,7 +297,7 @@ impl Mapper { .and_then(|p2| p2.next_table(page.p2_index())) .and_then(|p1| Some(p1[page.p1_index()].page_descriptor_entry_flags())) { - if flags.contains(PageDescriptorFlags::PRESENT) { + if flags.contains(PageDescriptorFlags::VALID) { translated_flags.insert(EntryFlags::PRESENT); } diff --git a/src/arch/aarch64/paging/mod.rs b/src/arch/aarch64/paging/mod.rs index 73cc6db..b52c79d 100644 --- a/src/arch/aarch64/paging/mod.rs +++ b/src/arch/aarch64/paging/mod.rs @@ -301,7 +301,7 @@ impl ActivePageTable { // overwrite recursive mapping self.p4_mut()[crate::RECURSIVE_PAGE_PML4].page_table_entry_set( table.p4_frame.clone(), - TableDescriptorFlags::PRESENT | TableDescriptorFlags::VALID | TableDescriptorFlags::TABLE, + TableDescriptorFlags::VALID | TableDescriptorFlags::TABLE, ); self.flush_all(); @@ -311,7 +311,7 @@ impl ActivePageTable { // restore recursive mapping to original p4 table p4_table[crate::RECURSIVE_PAGE_PML4].page_table_entry_set( backup, - TableDescriptorFlags::PRESENT | TableDescriptorFlags::VALID | TableDescriptorFlags::TABLE, + TableDescriptorFlags::VALID | TableDescriptorFlags::TABLE, ); self.flush_all(); } @@ -357,7 +357,7 @@ impl InactivePageTable { // set up recursive mapping for the table table[crate::RECURSIVE_PAGE_PML4].page_table_entry_set( frame.clone(), - TableDescriptorFlags::PRESENT | TableDescriptorFlags::VALID | TableDescriptorFlags::TABLE + TableDescriptorFlags::VALID | TableDescriptorFlags::TABLE ); } temporary_page.unmap(active_table); diff --git a/src/arch/aarch64/paging/table.rs b/src/arch/aarch64/paging/table.rs index 8827315..ccedf7e 100644 --- a/src/arch/aarch64/paging/table.rs +++ b/src/arch/aarch64/paging/table.rs @@ -121,8 +121,7 @@ impl Table where L: HierarchicalLevel { self.increment_entry_count(); /* Allow users to go down the page table, implement permissions at the page level */ - let mut perms = TableDescriptorFlags::PRESENT; - perms |= TableDescriptorFlags::VALID; + let mut perms = TableDescriptorFlags::VALID; perms |= TableDescriptorFlags::TABLE; self[index].page_table_entry_set(frame, perms); @@ -133,7 +132,7 @@ impl Table where L: HierarchicalLevel { fn next_table_address(&self, index: usize) -> Option { let entry_flags = self[index].page_table_entry_flags(); - if entry_flags.contains(TableDescriptorFlags::PRESENT) { + if entry_flags.contains(TableDescriptorFlags::VALID) { let table_address = self as *const _ as usize; if (table_address & KSPACE_ADDR_MASK) != 0 { Some((table_address << 9) | (index << 12)) From 9c3f6e3660955bf35d1d376dc1abb8610cc205bf Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Fri, 15 Jan 2021 15:49:04 +0000 Subject: [PATCH 07/55] Introduce a PageTableType enum to help distinguish User and Kernel Tables --- src/allocator/linked_list.rs | 4 +- src/arch/aarch64/device/gic.rs | 2 +- src/arch/aarch64/device/rtc.rs | 2 +- src/arch/aarch64/device/serial.rs | 2 +- src/arch/aarch64/interrupt/trace.rs | 2 +- src/arch/aarch64/paging/mapper.rs | 10 ++--- src/arch/aarch64/paging/mod.rs | 20 +++++---- src/context/list.rs | 2 +- src/context/memory.rs | 63 +++++++++++++++++++++++------ src/ptrace.rs | 4 +- src/scheme/memory.rs | 7 +++- src/syscall/driver.rs | 8 +++- src/syscall/process.rs | 6 +-- src/syscall/validate.rs | 7 +++- 14 files changed, 92 insertions(+), 47 deletions(-) diff --git a/src/allocator/linked_list.rs b/src/allocator/linked_list.rs index 0b18ac7..1c8acb6 100644 --- a/src/allocator/linked_list.rs +++ b/src/allocator/linked_list.rs @@ -3,7 +3,7 @@ use core::ptr::{self, NonNull}; use linked_list_allocator::Heap; use spin::Mutex; -use crate::paging::ActivePageTable; +use crate::paging::{ActivePageTable, PageTableType}; static HEAP: Mutex> = Mutex::new(None); @@ -32,7 +32,7 @@ unsafe impl GlobalAlloc for Allocator { panic!("__rust_allocate: heap not initialized"); }; - super::map_heap(&mut ActivePageTable::new(), crate::KERNEL_HEAP_OFFSET + size, crate::KERNEL_HEAP_SIZE); + super::map_heap(&mut ActivePageTable::new(PageTableType::Kernel), crate::KERNEL_HEAP_OFFSET + size, crate::KERNEL_HEAP_SIZE); if let Some(ref mut heap) = *HEAP.lock() { heap.extend(crate::KERNEL_HEAP_SIZE); diff --git a/src/arch/aarch64/device/gic.rs b/src/arch/aarch64/device/gic.rs index 582854b..92c3724 100644 --- a/src/arch/aarch64/device/gic.rs +++ b/src/arch/aarch64/device/gic.rs @@ -57,7 +57,7 @@ pub struct GicDistIf { impl GicDistIf { unsafe fn init(&mut self) { // Map in the Distributor interface - let mut active_table = ActivePageTable::new(/* TODO PageTableType::Kernel */); + let mut active_table = ActivePageTable::new(PageTableType::Kernel); let start_frame = Frame::containing_address(PhysicalAddress::new(0x08000000)); let end_frame = Frame::containing_address(PhysicalAddress::new(0x08000000 + 0x10000 - 1)); diff --git a/src/arch/aarch64/device/rtc.rs b/src/arch/aarch64/device/rtc.rs index de95ab4..e547523 100644 --- a/src/arch/aarch64/device/rtc.rs +++ b/src/arch/aarch64/device/rtc.rs @@ -29,7 +29,7 @@ struct Pl031rtc { impl Pl031rtc { unsafe fn init(&mut self) { - let mut active_table = ActivePageTable::new(/* TODO PageTableType::Kernel */); + let mut active_table = ActivePageTable::new(PageTableType::Kernel); let start_frame = Frame::containing_address(PhysicalAddress::new(0x09010000)); let end_frame = Frame::containing_address(PhysicalAddress::new(0x09010000 + 0x1000 - 1)); diff --git a/src/arch/aarch64/device/serial.rs b/src/arch/aarch64/device/serial.rs index a852fde..1bba91e 100644 --- a/src/arch/aarch64/device/serial.rs +++ b/src/arch/aarch64/device/serial.rs @@ -16,7 +16,7 @@ pub unsafe fn init() { } let (base, size) = device_tree::diag_uart_range(crate::KERNEL_DTB_OFFSET, crate::KERNEL_DTB_MAX_SIZE).unwrap(); - let mut active_ktable = unsafe { ActivePageTable::new(/* TODO PageTableType::Kernel */) }; + let mut active_ktable = unsafe { ActivePageTable::new(PageTableType::Kernel) }; let mut flush_all = MapperFlushAll::new(); let start_frame = Frame::containing_address(PhysicalAddress::new(base)); diff --git a/src/arch/aarch64/interrupt/trace.rs b/src/arch/aarch64/interrupt/trace.rs index 7db7bf1..37eb101 100644 --- a/src/arch/aarch64/interrupt/trace.rs +++ b/src/arch/aarch64/interrupt/trace.rs @@ -12,7 +12,7 @@ pub unsafe fn stack_trace() { println!("TRACE: {:>016x}", fp); //Maximum 64 frames - let active_table = ActivePageTable::new(/* TODO PageTableType::Kernel */); + let active_table = ActivePageTable::new(PageTableType::User); for _frame in 0..64 { if let Some(pc_fp) = fp.checked_add(mem::size_of::()) { if active_table.translate(VirtualAddress::new(fp)).is_some() && active_table.translate(VirtualAddress::new(pc_fp)).is_some() { diff --git a/src/arch/aarch64/paging/mapper.rs b/src/arch/aarch64/paging/mapper.rs index 3c2fe45..823b539 100644 --- a/src/arch/aarch64/paging/mapper.rs +++ b/src/arch/aarch64/paging/mapper.rs @@ -3,7 +3,7 @@ use core::ptr::Unique; use crate::memory::{allocate_frames, deallocate_frames, Frame}; -use super::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress, VAddrType}; +use super::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress, VirtualAddressType}; use super::entry::{EntryFlags, PageDescriptorFlags}; use super::table::{self, Table, Level4}; @@ -114,10 +114,10 @@ impl Mapper { if flags.contains(EntryFlags::NO_EXECUTE) { match page.start_address().get_type() { - VAddrType::User => { + VirtualAddressType::User => { translated_flags.insert(PageDescriptorFlags::UXN); }, - VAddrType::Kernel => { + VirtualAddressType::Kernel => { translated_flags.insert(PageDescriptorFlags::PXN); }, } @@ -169,10 +169,10 @@ impl Mapper { if flags.contains(EntryFlags::NO_EXECUTE) { match page.start_address().get_type() { - VAddrType::User => { + VirtualAddressType::User => { translated_flags.insert(PageDescriptorFlags::UXN); }, - VAddrType::Kernel => { + VirtualAddressType::Kernel => { translated_flags.insert(PageDescriptorFlags::PXN); }, } diff --git a/src/arch/aarch64/paging/mod.rs b/src/arch/aarch64/paging/mod.rs index 73cc6db..ff64fd6 100644 --- a/src/arch/aarch64/paging/mod.rs +++ b/src/arch/aarch64/paging/mod.rs @@ -169,7 +169,7 @@ pub unsafe fn init( init_mair(); - let mut active_table = ActivePageTable::new_unlocked(); + let mut active_table = ActivePageTable::new_unlocked(PageTableType::Kernel); let flush_all = map_tss(cpu_id, &mut active_table); flush_all.flush(&mut active_table); @@ -183,12 +183,12 @@ pub unsafe fn init_ap( ) -> usize { init_mair(); - let mut active_table = ActivePageTable::new_unlocked(); + let mut active_table = ActivePageTable::new_unlocked(PageTableType::Kernel); let mut new_table = InactivePageTable::from_address(bsp_table); let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new( - crate::USER_TMP_MISC_OFFSET, + crate::KERNEL_TMP_MISC_OFFSET, ))); active_table.with(&mut new_table, &mut temporary_page, |mapper| { @@ -230,8 +230,7 @@ impl DerefMut for ActivePageTable { impl ActivePageTable { //TODO: table_type argument - pub unsafe fn new() -> ActivePageTable { - let table_type = PageTableType::Kernel; + pub unsafe fn new(table_type: PageTableType) -> ActivePageTable { page_table_lock(); ActivePageTable { mapper: Mapper::new(match table_type { @@ -243,8 +242,7 @@ impl ActivePageTable { } //TODO: table_type argument - pub unsafe fn new_unlocked() -> ActivePageTable { - let table_type = PageTableType::Kernel; + pub unsafe fn new_unlocked(table_type: PageTableType) -> ActivePageTable { ActivePageTable { mapper: Mapper::new(match table_type { PageTableType::User => MapperType::User, @@ -394,7 +392,7 @@ impl PhysicalAddress { #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct VirtualAddress(usize); -pub enum VAddrType { +pub enum VirtualAddressType { User, Kernel } @@ -408,11 +406,11 @@ impl VirtualAddress { self.0 } - pub fn get_type(&self) -> VAddrType { + pub fn get_type(&self) -> VirtualAddressType { if ((self.0 >> 48) & 0xffff) == 0xffff { - VAddrType::Kernel + VirtualAddressType::Kernel } else { - VAddrType::User + VirtualAddressType::User } } } diff --git a/src/context/list.rs b/src/context/list.rs index d56b0e2..3c880fe 100644 --- a/src/context/list.rs +++ b/src/context/list.rs @@ -4,7 +4,7 @@ use alloc::collections::BTreeMap; use core::alloc::{GlobalAlloc, Layout}; use core::{iter, mem}; use core::sync::atomic::Ordering; -use crate::paging; +use crate::paging::{ActivePageTable, PageTableType}; use spin::RwLock; use crate::syscall::error::{Result, Error, EAGAIN}; diff --git a/src/context/memory.rs b/src/context/memory.rs index bd24a24..e050467 100644 --- a/src/context/memory.rs +++ b/src/context/memory.rs @@ -15,7 +15,7 @@ use crate::arch::paging::PAGE_SIZE; use crate::context::file::FileDescriptor; use crate::ipi::{ipi, IpiKind, IpiTarget}; use crate::memory::Frame; -use crate::paging::{ActivePageTable, InactivePageTable, Page, PageIter, PhysicalAddress, VirtualAddress}; +use crate::paging::{ActivePageTable, InactivePageTable, PageTableType, Page, PageIter, PhysicalAddress, VirtualAddress, VirtualAddressType}; use crate::paging::entry::EntryFlags; use crate::paging::mapper::MapperFlushAll; use crate::paging::temporary_page::TemporaryPage; @@ -312,7 +312,10 @@ impl Grant { } pub fn physmap(from: PhysicalAddress, to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match to.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -339,7 +342,10 @@ impl Grant { } pub fn map(to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match to.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -365,7 +371,10 @@ impl Grant { } pub fn map_inactive(from: VirtualAddress, to: VirtualAddress, size: usize, flags: EntryFlags, desc_opt: Option, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) -> Grant { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match to.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; //TODO: Do not allocate let mut frames = VecDeque::with_capacity(size/PAGE_SIZE); @@ -406,7 +415,10 @@ impl Grant { pub fn secret_clone(&self, new_start: VirtualAddress) -> Grant { assert!(self.mapped); - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match new_start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -463,7 +475,10 @@ impl Grant { pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) { assert!(self.mapped); - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match new_start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -499,7 +514,11 @@ impl Grant { pub fn unmap(mut self) { assert!(self.mapped); - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start_address().get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; + let mut flush_all = MapperFlushAll::new(); @@ -528,7 +547,10 @@ impl Grant { pub fn unmap_inactive(mut self, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) { assert!(self.mapped); - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start_address().get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; active_table.with(new_table, temporary_page, |mapper| { let start_page = Page::containing_address(self.start_address()); @@ -703,7 +725,10 @@ impl Memory { } fn map(&mut self, clear: bool) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -723,7 +748,10 @@ impl Memory { } fn unmap(&mut self) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -738,7 +766,10 @@ impl Memory { /// A complicated operation to move a piece of memory to a new page table /// It also allows for changing the address at the same time pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match new_start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -760,7 +791,10 @@ impl Memory { } pub fn remap(&mut self, new_flags: EntryFlags) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -775,7 +809,10 @@ impl Memory { } pub fn resize(&mut self, new_size: usize, clear: bool) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; //TODO: Calculate page changes to minimize operations if new_size > self.size { diff --git a/src/ptrace.rs b/src/ptrace.rs index 318cf5c..930b10e 100644 --- a/src/ptrace.rs +++ b/src/ptrace.rs @@ -9,7 +9,7 @@ use crate::{ entry::EntryFlags, mapper::MapperFlushAll, temporary_page::TemporaryPage, - ActivePageTable, InactivePageTable, Page, PAGE_SIZE, VirtualAddress + ActivePageTable, InactivePageTable, PageTableType, Page, PAGE_SIZE, VirtualAddress } }, common::unique::Unique, @@ -458,7 +458,7 @@ where F: FnOnce(*mut u8) -> Result<()> // in `proc:/mem`, or return a partial read/write. let start = Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET)); - let mut active_page_table = unsafe { ActivePageTable::new() }; + let mut active_page_table = unsafe { ActivePageTable::new(PageTableType::User) }; let mut target_page_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) }; diff --git a/src/scheme/memory.rs b/src/scheme/memory.rs index fe8810e..5fc4e55 100644 --- a/src/scheme/memory.rs +++ b/src/scheme/memory.rs @@ -1,7 +1,7 @@ use crate::context; use crate::context::memory::{entry_flags, Grant}; use crate::memory::{free_frames, used_frames, PAGE_SIZE}; -use crate::paging::{ActivePageTable, VirtualAddress}; +use crate::paging::{ActivePageTable, PageTableType, VirtualAddress, VirtualAddressType}; use crate::syscall::data::{Map, OldMap, StatVfs}; use crate::syscall::error::*; use crate::syscall::flag::MapFlags; @@ -48,7 +48,10 @@ impl Scheme for MemoryScheme { // Make sure it's *absolutely* not mapped already // TODO: Keep track of all allocated memory so this isn't necessary - let active_table = unsafe { ActivePageTable::new() }; + let active_table = match VirtualAddress::new(map.address).get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; for page in region.pages() { if active_table.translate_page(page).is_some() { diff --git a/src/syscall/driver.rs b/src/syscall/driver.rs index efbfe3b..f6e39b6 100644 --- a/src/syscall/driver.rs +++ b/src/syscall/driver.rs @@ -1,6 +1,6 @@ use crate::interrupt::InterruptStack; use crate::memory::{allocate_frames_complex, deallocate_frames, Frame}; -use crate::paging::{ActivePageTable, PhysicalAddress, VirtualAddress}; +use crate::paging::{ActivePageTable, PageTableType, PhysicalAddress, VirtualAddress, VirtualAddressType}; use crate::paging::entry::EntryFlags; use crate::context; use crate::context::memory::{Grant, Region}; @@ -153,7 +153,11 @@ pub fn physunmap(virtual_address: usize) -> Result { pub fn virttophys(virtual_address: usize) -> Result { enforce_root()?; - let active_table = unsafe { ActivePageTable::new() }; + let active_table = match VirtualAddress::new(virtual_address).get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; + match active_table.translate(VirtualAddress::new(virtual_address)) { Some(physical_address) => Ok(physical_address.data()), None => Err(Error::new(EFAULT)) diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 978f867..535f68d 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -19,7 +19,7 @@ use crate::memory::allocate_frames; use crate::paging::entry::EntryFlags; use crate::paging::mapper::MapperFlushAll; use crate::paging::temporary_page::TemporaryPage; -use crate::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, PAGE_SIZE}; +use crate::paging::{ActivePageTable, InactivePageTable, PageTableType, Page, VirtualAddress, PAGE_SIZE}; use crate::{ptrace, syscall}; use crate::scheme::FileHandle; use crate::start::usermode; @@ -338,7 +338,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { context.arch = arch; - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = unsafe { ActivePageTable::new(PageTableType::User) }; let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET))); @@ -1277,7 +1277,7 @@ pub fn mprotect(address: usize, size: usize, flags: MapFlags) -> Result { let end_offset = size.checked_sub(1).ok_or(Error::new(EFAULT))?; let end_address = address.checked_add(end_offset).ok_or(Error::new(EFAULT))?; - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = unsafe { ActivePageTable::new(PageTableType::User) }; let mut flush_all = MapperFlushAll::new(); diff --git a/src/syscall/validate.rs b/src/syscall/validate.rs index 3961feb..5806a53 100644 --- a/src/syscall/validate.rs +++ b/src/syscall/validate.rs @@ -1,6 +1,6 @@ use core::{mem, slice}; -use crate::paging::{ActivePageTable, Page, VirtualAddress}; +use crate::paging::{ActivePageTable, PageTableType, Page, VirtualAddress, VirtualAddressType}; use crate::paging::entry::EntryFlags; use crate::syscall::error::*; @@ -8,7 +8,10 @@ fn validate(address: usize, size: usize, flags: EntryFlags) -> Result<()> { let end_offset = size.checked_sub(1).ok_or(Error::new(EFAULT))?; let end_address = address.checked_add(end_offset).ok_or(Error::new(EFAULT))?; - let active_table = unsafe { ActivePageTable::new() }; + let active_table = match VirtualAddress::new(address).get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let start_page = Page::containing_address(VirtualAddress::new(address)); let end_page = Page::containing_address(VirtualAddress::new(end_address)); From e0a7471cf8ddf819ef1dd7780e6834f48ea1a434 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Fri, 15 Jan 2021 15:50:00 +0000 Subject: [PATCH 08/55] aarch64: Add a set_tcb method to setup tpidr_el0 --- src/context/arch/aarch64.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/context/arch/aarch64.rs b/src/context/arch/aarch64.rs index b57bfa4..5f5e2a3 100644 --- a/src/context/arch/aarch64.rs +++ b/src/context/arch/aarch64.rs @@ -104,6 +104,10 @@ impl Context { self.lr = address; } + pub fn set_tcb(&mut self, pid: usize) { + self.tpidr_el0 = (crate::USER_TCB_OFFSET + pid * crate::PAGE_SIZE); + } + pub fn set_fp(&mut self, address: usize) { self.fp = address; } From c5e077546a03dd311f4a00c0181af7fa2db31103 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Fri, 15 Jan 2021 15:51:47 +0000 Subject: [PATCH 09/55] aarch64: spawn: split out arch specific mods --- src/context/list.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/context/list.rs b/src/context/list.rs index 3c880fe..2e85ae8 100644 --- a/src/context/list.rs +++ b/src/context/list.rs @@ -81,12 +81,23 @@ impl ContextList { } let mut stack = vec![0; 65_536].into_boxed_slice(); let offset = stack.len() - mem::size_of::(); + + #[cfg(target_arch = "x86_64")] unsafe { let offset = stack.len() - mem::size_of::(); let func_ptr = stack.as_mut_ptr().add(offset); *(func_ptr as *mut usize) = func as usize; } - context.arch.set_page_table(unsafe { paging::ActivePageTable::new().address() }); + + #[cfg(target_arch = "aarch64")] + { + let context_id = context.id.into(); + context.arch.set_tcb(context_id); + context.arch.set_lr(func as usize); + context.arch.set_context_handle(); + } + + context.arch.set_page_table(unsafe { ActivePageTable::new(PageTableType::User).address() }); context.arch.set_fx(fx.as_ptr() as usize); context.arch.set_stack(stack.as_ptr() as usize + offset); context.kfx = Some(fx); From 76129ddf75bc2ce388d07e5b46b5173a6e638eca Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Fri, 15 Jan 2021 15:54:25 +0000 Subject: [PATCH 10/55] aarch64: Mirror PRESENT and VALID bits in Page and Table descriptors --- src/arch/aarch64/paging/entry.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/arch/aarch64/paging/entry.rs b/src/arch/aarch64/paging/entry.rs index cee4701..09381b2 100644 --- a/src/arch/aarch64/paging/entry.rs +++ b/src/arch/aarch64/paging/entry.rs @@ -15,10 +15,10 @@ pub struct PageDescriptor(u64); bitflags! { pub struct TableDescriptorFlags: u64 { + const PRESENT = 1 << 0; const VALID = 1 << 0; const TABLE = 1 << 1; const AF = 1 << 10; /* NOTE: TableDescriptors don't actually have an AF bit! */ - const PRESENT = 1 << 58; /* ARM ARM says this is an IGNORED bit, so using it here should be OK */ const PXNTABLE = 1 << 59; const UXNTABLE = 1 << 60; const APTABLE_0 = 1 << 61; @@ -32,6 +32,7 @@ bitflags! { bitflags! { pub struct PageDescriptorFlags: u64 { + const PRESENT = 1 << 0; const VALID = 1 << 0; const PAGE = 1 << 1; const ATTR_INDEX_0 = 1 << 2; @@ -48,7 +49,6 @@ bitflags! { const CONTIGUOUS = 1 << 52; const PXN = 1 << 53; const UXN = 1 << 54; - const PRESENT = 1 << 58; /* Assuming DBM can be overloaded as PRESENT */ } } From ae3a55f5d15d0f83f7e64fab646f5263aa01c1f9 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Fri, 15 Jan 2021 15:49:04 +0000 Subject: [PATCH 11/55] Introduce a PageTableType enum to help distinguish User and Kernel Tables --- src/allocator/linked_list.rs | 4 +- src/arch/aarch64/device/gic.rs | 2 +- src/arch/aarch64/device/rtc.rs | 2 +- src/arch/aarch64/device/serial.rs | 2 +- src/arch/aarch64/interrupt/trace.rs | 2 +- src/arch/aarch64/paging/mapper.rs | 10 ++--- src/arch/aarch64/paging/mod.rs | 20 +++++---- src/context/list.rs | 2 +- src/context/memory.rs | 63 +++++++++++++++++++++++------ src/ptrace.rs | 4 +- src/scheme/memory.rs | 7 +++- src/syscall/driver.rs | 8 +++- src/syscall/process.rs | 6 +-- src/syscall/validate.rs | 7 +++- 14 files changed, 92 insertions(+), 47 deletions(-) diff --git a/src/allocator/linked_list.rs b/src/allocator/linked_list.rs index 0b18ac7..1c8acb6 100644 --- a/src/allocator/linked_list.rs +++ b/src/allocator/linked_list.rs @@ -3,7 +3,7 @@ use core::ptr::{self, NonNull}; use linked_list_allocator::Heap; use spin::Mutex; -use crate::paging::ActivePageTable; +use crate::paging::{ActivePageTable, PageTableType}; static HEAP: Mutex> = Mutex::new(None); @@ -32,7 +32,7 @@ unsafe impl GlobalAlloc for Allocator { panic!("__rust_allocate: heap not initialized"); }; - super::map_heap(&mut ActivePageTable::new(), crate::KERNEL_HEAP_OFFSET + size, crate::KERNEL_HEAP_SIZE); + super::map_heap(&mut ActivePageTable::new(PageTableType::Kernel), crate::KERNEL_HEAP_OFFSET + size, crate::KERNEL_HEAP_SIZE); if let Some(ref mut heap) = *HEAP.lock() { heap.extend(crate::KERNEL_HEAP_SIZE); diff --git a/src/arch/aarch64/device/gic.rs b/src/arch/aarch64/device/gic.rs index 582854b..92c3724 100644 --- a/src/arch/aarch64/device/gic.rs +++ b/src/arch/aarch64/device/gic.rs @@ -57,7 +57,7 @@ pub struct GicDistIf { impl GicDistIf { unsafe fn init(&mut self) { // Map in the Distributor interface - let mut active_table = ActivePageTable::new(/* TODO PageTableType::Kernel */); + let mut active_table = ActivePageTable::new(PageTableType::Kernel); let start_frame = Frame::containing_address(PhysicalAddress::new(0x08000000)); let end_frame = Frame::containing_address(PhysicalAddress::new(0x08000000 + 0x10000 - 1)); diff --git a/src/arch/aarch64/device/rtc.rs b/src/arch/aarch64/device/rtc.rs index de95ab4..e547523 100644 --- a/src/arch/aarch64/device/rtc.rs +++ b/src/arch/aarch64/device/rtc.rs @@ -29,7 +29,7 @@ struct Pl031rtc { impl Pl031rtc { unsafe fn init(&mut self) { - let mut active_table = ActivePageTable::new(/* TODO PageTableType::Kernel */); + let mut active_table = ActivePageTable::new(PageTableType::Kernel); let start_frame = Frame::containing_address(PhysicalAddress::new(0x09010000)); let end_frame = Frame::containing_address(PhysicalAddress::new(0x09010000 + 0x1000 - 1)); diff --git a/src/arch/aarch64/device/serial.rs b/src/arch/aarch64/device/serial.rs index a852fde..1bba91e 100644 --- a/src/arch/aarch64/device/serial.rs +++ b/src/arch/aarch64/device/serial.rs @@ -16,7 +16,7 @@ pub unsafe fn init() { } let (base, size) = device_tree::diag_uart_range(crate::KERNEL_DTB_OFFSET, crate::KERNEL_DTB_MAX_SIZE).unwrap(); - let mut active_ktable = unsafe { ActivePageTable::new(/* TODO PageTableType::Kernel */) }; + let mut active_ktable = unsafe { ActivePageTable::new(PageTableType::Kernel) }; let mut flush_all = MapperFlushAll::new(); let start_frame = Frame::containing_address(PhysicalAddress::new(base)); diff --git a/src/arch/aarch64/interrupt/trace.rs b/src/arch/aarch64/interrupt/trace.rs index 7db7bf1..37eb101 100644 --- a/src/arch/aarch64/interrupt/trace.rs +++ b/src/arch/aarch64/interrupt/trace.rs @@ -12,7 +12,7 @@ pub unsafe fn stack_trace() { println!("TRACE: {:>016x}", fp); //Maximum 64 frames - let active_table = ActivePageTable::new(/* TODO PageTableType::Kernel */); + let active_table = ActivePageTable::new(PageTableType::User); for _frame in 0..64 { if let Some(pc_fp) = fp.checked_add(mem::size_of::()) { if active_table.translate(VirtualAddress::new(fp)).is_some() && active_table.translate(VirtualAddress::new(pc_fp)).is_some() { diff --git a/src/arch/aarch64/paging/mapper.rs b/src/arch/aarch64/paging/mapper.rs index 9828b99..4768452 100644 --- a/src/arch/aarch64/paging/mapper.rs +++ b/src/arch/aarch64/paging/mapper.rs @@ -3,7 +3,7 @@ use core::ptr::Unique; use crate::memory::{allocate_frames, deallocate_frames, Frame}; -use super::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress, VAddrType}; +use super::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress, VirtualAddressType}; use super::entry::{EntryFlags, PageDescriptorFlags}; use super::table::{self, Table, Level4}; @@ -112,10 +112,10 @@ impl Mapper { if flags.contains(EntryFlags::NO_EXECUTE) { match page.start_address().get_type() { - VAddrType::User => { + VirtualAddressType::User => { translated_flags.insert(PageDescriptorFlags::UXN); }, - VAddrType::Kernel => { + VirtualAddressType::Kernel => { translated_flags.insert(PageDescriptorFlags::PXN); }, } @@ -165,10 +165,10 @@ impl Mapper { if flags.contains(EntryFlags::NO_EXECUTE) { match page.start_address().get_type() { - VAddrType::User => { + VirtualAddressType::User => { translated_flags.insert(PageDescriptorFlags::UXN); }, - VAddrType::Kernel => { + VirtualAddressType::Kernel => { translated_flags.insert(PageDescriptorFlags::PXN); }, } diff --git a/src/arch/aarch64/paging/mod.rs b/src/arch/aarch64/paging/mod.rs index b52c79d..29bf820 100644 --- a/src/arch/aarch64/paging/mod.rs +++ b/src/arch/aarch64/paging/mod.rs @@ -169,7 +169,7 @@ pub unsafe fn init( init_mair(); - let mut active_table = ActivePageTable::new_unlocked(); + let mut active_table = ActivePageTable::new_unlocked(PageTableType::Kernel); let flush_all = map_tss(cpu_id, &mut active_table); flush_all.flush(&mut active_table); @@ -183,12 +183,12 @@ pub unsafe fn init_ap( ) -> usize { init_mair(); - let mut active_table = ActivePageTable::new_unlocked(); + let mut active_table = ActivePageTable::new_unlocked(PageTableType::Kernel); let mut new_table = InactivePageTable::from_address(bsp_table); let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new( - crate::USER_TMP_MISC_OFFSET, + crate::KERNEL_TMP_MISC_OFFSET, ))); active_table.with(&mut new_table, &mut temporary_page, |mapper| { @@ -230,8 +230,7 @@ impl DerefMut for ActivePageTable { impl ActivePageTable { //TODO: table_type argument - pub unsafe fn new() -> ActivePageTable { - let table_type = PageTableType::Kernel; + pub unsafe fn new(table_type: PageTableType) -> ActivePageTable { page_table_lock(); ActivePageTable { mapper: Mapper::new(match table_type { @@ -243,8 +242,7 @@ impl ActivePageTable { } //TODO: table_type argument - pub unsafe fn new_unlocked() -> ActivePageTable { - let table_type = PageTableType::Kernel; + pub unsafe fn new_unlocked(table_type: PageTableType) -> ActivePageTable { ActivePageTable { mapper: Mapper::new(match table_type { PageTableType::User => MapperType::User, @@ -394,7 +392,7 @@ impl PhysicalAddress { #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct VirtualAddress(usize); -pub enum VAddrType { +pub enum VirtualAddressType { User, Kernel } @@ -408,11 +406,11 @@ impl VirtualAddress { self.0 } - pub fn get_type(&self) -> VAddrType { + pub fn get_type(&self) -> VirtualAddressType { if ((self.0 >> 48) & 0xffff) == 0xffff { - VAddrType::Kernel + VirtualAddressType::Kernel } else { - VAddrType::User + VirtualAddressType::User } } } diff --git a/src/context/list.rs b/src/context/list.rs index d56b0e2..3c880fe 100644 --- a/src/context/list.rs +++ b/src/context/list.rs @@ -4,7 +4,7 @@ use alloc::collections::BTreeMap; use core::alloc::{GlobalAlloc, Layout}; use core::{iter, mem}; use core::sync::atomic::Ordering; -use crate::paging; +use crate::paging::{ActivePageTable, PageTableType}; use spin::RwLock; use crate::syscall::error::{Result, Error, EAGAIN}; diff --git a/src/context/memory.rs b/src/context/memory.rs index bd24a24..e050467 100644 --- a/src/context/memory.rs +++ b/src/context/memory.rs @@ -15,7 +15,7 @@ use crate::arch::paging::PAGE_SIZE; use crate::context::file::FileDescriptor; use crate::ipi::{ipi, IpiKind, IpiTarget}; use crate::memory::Frame; -use crate::paging::{ActivePageTable, InactivePageTable, Page, PageIter, PhysicalAddress, VirtualAddress}; +use crate::paging::{ActivePageTable, InactivePageTable, PageTableType, Page, PageIter, PhysicalAddress, VirtualAddress, VirtualAddressType}; use crate::paging::entry::EntryFlags; use crate::paging::mapper::MapperFlushAll; use crate::paging::temporary_page::TemporaryPage; @@ -312,7 +312,10 @@ impl Grant { } pub fn physmap(from: PhysicalAddress, to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match to.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -339,7 +342,10 @@ impl Grant { } pub fn map(to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match to.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -365,7 +371,10 @@ impl Grant { } pub fn map_inactive(from: VirtualAddress, to: VirtualAddress, size: usize, flags: EntryFlags, desc_opt: Option, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) -> Grant { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match to.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; //TODO: Do not allocate let mut frames = VecDeque::with_capacity(size/PAGE_SIZE); @@ -406,7 +415,10 @@ impl Grant { pub fn secret_clone(&self, new_start: VirtualAddress) -> Grant { assert!(self.mapped); - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match new_start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -463,7 +475,10 @@ impl Grant { pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) { assert!(self.mapped); - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match new_start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -499,7 +514,11 @@ impl Grant { pub fn unmap(mut self) { assert!(self.mapped); - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start_address().get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; + let mut flush_all = MapperFlushAll::new(); @@ -528,7 +547,10 @@ impl Grant { pub fn unmap_inactive(mut self, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) { assert!(self.mapped); - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start_address().get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; active_table.with(new_table, temporary_page, |mapper| { let start_page = Page::containing_address(self.start_address()); @@ -703,7 +725,10 @@ impl Memory { } fn map(&mut self, clear: bool) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -723,7 +748,10 @@ impl Memory { } fn unmap(&mut self) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -738,7 +766,10 @@ impl Memory { /// A complicated operation to move a piece of memory to a new page table /// It also allows for changing the address at the same time pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match new_start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -760,7 +791,10 @@ impl Memory { } pub fn remap(&mut self, new_flags: EntryFlags) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let mut flush_all = MapperFlushAll::new(); @@ -775,7 +809,10 @@ impl Memory { } pub fn resize(&mut self, new_size: usize, clear: bool) { - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = match self.start.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; //TODO: Calculate page changes to minimize operations if new_size > self.size { diff --git a/src/ptrace.rs b/src/ptrace.rs index 318cf5c..930b10e 100644 --- a/src/ptrace.rs +++ b/src/ptrace.rs @@ -9,7 +9,7 @@ use crate::{ entry::EntryFlags, mapper::MapperFlushAll, temporary_page::TemporaryPage, - ActivePageTable, InactivePageTable, Page, PAGE_SIZE, VirtualAddress + ActivePageTable, InactivePageTable, PageTableType, Page, PAGE_SIZE, VirtualAddress } }, common::unique::Unique, @@ -458,7 +458,7 @@ where F: FnOnce(*mut u8) -> Result<()> // in `proc:/mem`, or return a partial read/write. let start = Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET)); - let mut active_page_table = unsafe { ActivePageTable::new() }; + let mut active_page_table = unsafe { ActivePageTable::new(PageTableType::User) }; let mut target_page_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) }; diff --git a/src/scheme/memory.rs b/src/scheme/memory.rs index fe8810e..5fc4e55 100644 --- a/src/scheme/memory.rs +++ b/src/scheme/memory.rs @@ -1,7 +1,7 @@ use crate::context; use crate::context::memory::{entry_flags, Grant}; use crate::memory::{free_frames, used_frames, PAGE_SIZE}; -use crate::paging::{ActivePageTable, VirtualAddress}; +use crate::paging::{ActivePageTable, PageTableType, VirtualAddress, VirtualAddressType}; use crate::syscall::data::{Map, OldMap, StatVfs}; use crate::syscall::error::*; use crate::syscall::flag::MapFlags; @@ -48,7 +48,10 @@ impl Scheme for MemoryScheme { // Make sure it's *absolutely* not mapped already // TODO: Keep track of all allocated memory so this isn't necessary - let active_table = unsafe { ActivePageTable::new() }; + let active_table = match VirtualAddress::new(map.address).get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; for page in region.pages() { if active_table.translate_page(page).is_some() { diff --git a/src/syscall/driver.rs b/src/syscall/driver.rs index efbfe3b..f6e39b6 100644 --- a/src/syscall/driver.rs +++ b/src/syscall/driver.rs @@ -1,6 +1,6 @@ use crate::interrupt::InterruptStack; use crate::memory::{allocate_frames_complex, deallocate_frames, Frame}; -use crate::paging::{ActivePageTable, PhysicalAddress, VirtualAddress}; +use crate::paging::{ActivePageTable, PageTableType, PhysicalAddress, VirtualAddress, VirtualAddressType}; use crate::paging::entry::EntryFlags; use crate::context; use crate::context::memory::{Grant, Region}; @@ -153,7 +153,11 @@ pub fn physunmap(virtual_address: usize) -> Result { pub fn virttophys(virtual_address: usize) -> Result { enforce_root()?; - let active_table = unsafe { ActivePageTable::new() }; + let active_table = match VirtualAddress::new(virtual_address).get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; + match active_table.translate(VirtualAddress::new(virtual_address)) { Some(physical_address) => Ok(physical_address.data()), None => Err(Error::new(EFAULT)) diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 978f867..535f68d 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -19,7 +19,7 @@ use crate::memory::allocate_frames; use crate::paging::entry::EntryFlags; use crate::paging::mapper::MapperFlushAll; use crate::paging::temporary_page::TemporaryPage; -use crate::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, PAGE_SIZE}; +use crate::paging::{ActivePageTable, InactivePageTable, PageTableType, Page, VirtualAddress, PAGE_SIZE}; use crate::{ptrace, syscall}; use crate::scheme::FileHandle; use crate::start::usermode; @@ -338,7 +338,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { context.arch = arch; - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = unsafe { ActivePageTable::new(PageTableType::User) }; let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET))); @@ -1277,7 +1277,7 @@ pub fn mprotect(address: usize, size: usize, flags: MapFlags) -> Result { let end_offset = size.checked_sub(1).ok_or(Error::new(EFAULT))?; let end_address = address.checked_add(end_offset).ok_or(Error::new(EFAULT))?; - let mut active_table = unsafe { ActivePageTable::new() }; + let mut active_table = unsafe { ActivePageTable::new(PageTableType::User) }; let mut flush_all = MapperFlushAll::new(); diff --git a/src/syscall/validate.rs b/src/syscall/validate.rs index 3961feb..5806a53 100644 --- a/src/syscall/validate.rs +++ b/src/syscall/validate.rs @@ -1,6 +1,6 @@ use core::{mem, slice}; -use crate::paging::{ActivePageTable, Page, VirtualAddress}; +use crate::paging::{ActivePageTable, PageTableType, Page, VirtualAddress, VirtualAddressType}; use crate::paging::entry::EntryFlags; use crate::syscall::error::*; @@ -8,7 +8,10 @@ fn validate(address: usize, size: usize, flags: EntryFlags) -> Result<()> { let end_offset = size.checked_sub(1).ok_or(Error::new(EFAULT))?; let end_address = address.checked_add(end_offset).ok_or(Error::new(EFAULT))?; - let active_table = unsafe { ActivePageTable::new() }; + let active_table = match VirtualAddress::new(address).get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; let start_page = Page::containing_address(VirtualAddress::new(address)); let end_page = Page::containing_address(VirtualAddress::new(end_address)); From 14d79927af5b66a690ae68f2774fc22e81d7db0b Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Fri, 15 Jan 2021 15:50:00 +0000 Subject: [PATCH 12/55] aarch64: Add a set_tcb method to setup tpidr_el0 --- src/context/arch/aarch64.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/context/arch/aarch64.rs b/src/context/arch/aarch64.rs index b57bfa4..5f5e2a3 100644 --- a/src/context/arch/aarch64.rs +++ b/src/context/arch/aarch64.rs @@ -104,6 +104,10 @@ impl Context { self.lr = address; } + pub fn set_tcb(&mut self, pid: usize) { + self.tpidr_el0 = (crate::USER_TCB_OFFSET + pid * crate::PAGE_SIZE); + } + pub fn set_fp(&mut self, address: usize) { self.fp = address; } From 825bc4a02d2ed1c40f17a4332b168575d3650ae7 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Fri, 15 Jan 2021 15:51:47 +0000 Subject: [PATCH 13/55] aarch64: spawn: split out arch specific mods --- src/context/list.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/context/list.rs b/src/context/list.rs index 3c880fe..2e85ae8 100644 --- a/src/context/list.rs +++ b/src/context/list.rs @@ -81,12 +81,23 @@ impl ContextList { } let mut stack = vec![0; 65_536].into_boxed_slice(); let offset = stack.len() - mem::size_of::(); + + #[cfg(target_arch = "x86_64")] unsafe { let offset = stack.len() - mem::size_of::(); let func_ptr = stack.as_mut_ptr().add(offset); *(func_ptr as *mut usize) = func as usize; } - context.arch.set_page_table(unsafe { paging::ActivePageTable::new().address() }); + + #[cfg(target_arch = "aarch64")] + { + let context_id = context.id.into(); + context.arch.set_tcb(context_id); + context.arch.set_lr(func as usize); + context.arch.set_context_handle(); + } + + context.arch.set_page_table(unsafe { ActivePageTable::new(PageTableType::User).address() }); context.arch.set_fx(fx.as_ptr() as usize); context.arch.set_stack(stack.as_ptr() as usize + offset); context.kfx = Some(fx); From 6677cfbf1e0454d06e61c8c67b4d3019cf94f18a Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Fri, 15 Jan 2021 19:02:43 +0000 Subject: [PATCH 14/55] aarch64: Make interrupt::pause use nop so we can move ahead before interrupts are enabled --- src/arch/aarch64/interrupt/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/arch/aarch64/interrupt/mod.rs b/src/arch/aarch64/interrupt/mod.rs index b7451af..9811df2 100644 --- a/src/arch/aarch64/interrupt/mod.rs +++ b/src/arch/aarch64/interrupt/mod.rs @@ -49,7 +49,7 @@ pub unsafe fn halt() { /// Safe because it is similar to a NOP, and has no memory effects #[inline(always)] pub fn pause() { - unsafe { llvm_asm!("wfi") }; + unsafe { llvm_asm!("nop") }; } pub fn available_irqs_iter(cpu_id: usize) -> impl Iterator + 'static { From 5bc9dea242d68fa9942ef29a0e8511ad65c5f6a6 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Fri, 15 Jan 2021 19:03:42 +0000 Subject: [PATCH 15/55] aarch64: context::switch: update the CONTEXT_SWITCH_LOCK --- src/context/arch/aarch64.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/context/arch/aarch64.rs b/src/context/arch/aarch64.rs index 5f5e2a3..ac74e22 100644 --- a/src/context/arch/aarch64.rs +++ b/src/context/arch/aarch64.rs @@ -239,6 +239,8 @@ impl Context { llvm_asm!("mov $0, sp" : "=r"(self.sp) : : "memory" : "volatile"); llvm_asm!("mov sp, $0" : : "r"(next.sp) : "memory" : "volatile"); + + CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst); } } From 208fb681f4cae3615674d48c9b4e1a932201f5c9 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Sun, 17 Jan 2021 09:55:10 +0000 Subject: [PATCH 16/55] aarch64: vectors: Manage unhandled exceptions So we can more clearly see when things go wrong. --- .../aarch64/init/pre_kstart/helpers/vectors.S | 78 ++++++++--------- src/arch/aarch64/interrupt/irq.rs | 2 +- src/arch/aarch64/interrupt/syscall.rs | 84 ++++++++++++++++++- 3 files changed, 123 insertions(+), 41 deletions(-) diff --git a/src/arch/aarch64/init/pre_kstart/helpers/vectors.S b/src/arch/aarch64/init/pre_kstart/helpers/vectors.S index c368bae..2241348 100644 --- a/src/arch/aarch64/init/pre_kstart/helpers/vectors.S +++ b/src/arch/aarch64/init/pre_kstart/helpers/vectors.S @@ -10,92 +10,92 @@ exception_vector_base: .align 7 __vec_00: mov x18, #0xb0b0 - wfi - b __vec_00 + b do_exception_synchronous + b __vec_00 .align 7 __vec_01: mov x18, #0xb0b1 - wfi - b __vec_01 + b do_exception_irq + b __vec_01 .align 7 __vec_02: mov x18, #0xb0b2 - wfi - b __vec_02 + b do_exception_unhandled + b __vec_02 .align 7 __vec_03: mov x18, #0xb0b3 - wfi - b __vec_03 + b do_exception_unhandled + b __vec_03 .align 7 __vec_04: - b do_report_exception - wfi - b __vec_04 + mov x18, #0xb0b4 + b do_exception_synchronous + b __vec_04 .align 7 __vec_05: - b do_irq // First level interrupt handler - wfi - b __vec_05 + mov x18, #0xb0b5 + b do_exception_irq + b __vec_05 .align 7 __vec_06: mov x18, #0xb0b6 - wfi - b __vec_06 + b do_exception_unhandled + b __vec_06 .align 7 __vec_07: mov x18, #0xb0b7 - wfi - b __vec_07 + b do_exception_unhandled + b __vec_07 .align 7 __vec_08: - b do_syscall // Syscall handler - wfi - b __vec_08 + mov x18, #0xb0b8 + b do_exception_synchronous + b __vec_08 .align 7 __vec_09: - b do_irq // First level interrupt handler - wfi - b __vec_09 + mov x18, #0xb0b9 + b do_exception_irq + b __vec_09 .align 7 __vec_10: - mov x18, #0xb0bb - wfi - b __vec_10 + mov x18, #0xb0ba + b do_exception_unhandled + b __vec_10 .align 7 __vec_11: - mov x18, #0xb0bc - wfi - b __vec_11 + mov x18, #0xb0bb + b do_exception_unhandled + b __vec_11 .align 7 __vec_12: - mov x18, #0xb0bd - wfi - b __vec_12 + mov x18, #0xb0bc + b do_exception_unhandled + b __vec_12 .align 7 __vec_13: - mov x18, #0xb0be - wfi - b __vec_13 + mov x18, #0xb0bd + b do_exception_unhandled + b __vec_13 .align 7 __vec_14: - mov x18, #0xb0bf - wfi - b __vec_14 + mov x18, #0xb0be + b do_exception_unhandled + b __vec_14 .align 7 exception_vector_end: diff --git a/src/arch/aarch64/interrupt/irq.rs b/src/arch/aarch64/interrupt/irq.rs index 72de8c1..1ad13f6 100644 --- a/src/arch/aarch64/interrupt/irq.rs +++ b/src/arch/aarch64/interrupt/irq.rs @@ -12,7 +12,7 @@ pub static PIT_TICKS: AtomicUsize = ATOMIC_USIZE_INIT; #[naked] #[no_mangle] -pub unsafe extern fn do_irq() { +pub unsafe extern fn do_exception_irq() { #[inline(never)] unsafe fn inner() { irq_demux(); diff --git a/src/arch/aarch64/interrupt/syscall.rs b/src/arch/aarch64/interrupt/syscall.rs index 26f47a0..be8af68 100644 --- a/src/arch/aarch64/interrupt/syscall.rs +++ b/src/arch/aarch64/interrupt/syscall.rs @@ -3,9 +3,91 @@ use crate::syscall; #[naked] #[no_mangle] -pub unsafe extern fn do_syscall() { +pub unsafe extern fn do_exception_unhandled() { #[inline(never)] unsafe fn inner(stack: &mut InterruptStack) -> usize { + println!("do_exception_unhandled: ELR: 0x{:016x}", stack.elr_el1); + loop {} + } + + llvm_asm!("str x0, [sp, #-8]! + str x1, [sp, #-8]! + str x2, [sp, #-8]! + str x3, [sp, #-8]! + str x4, [sp, #-8]! + str x5, [sp, #-8]! + str x6, [sp, #-8]! + str x7, [sp, #-8]! + str x8, [sp, #-8]! + str x9, [sp, #-8]! + str x10, [sp, #-8]! + str x11, [sp, #-8]! + str x12, [sp, #-8]! + str x13, [sp, #-8]! + str x14, [sp, #-8]! + str x15, [sp, #-8]! + str x16, [sp, #-8]! + str x17, [sp, #-8]! + str x18, [sp, #-8]! + str x19, [sp, #-8]! + str x20, [sp, #-8]! + str x21, [sp, #-8]! + str x22, [sp, #-8]! + str x23, [sp, #-8]! + str x24, [sp, #-8]! + str x25, [sp, #-8]! + str x26, [sp, #-8]! + str x27, [sp, #-8]! + str x28, [sp, #-8]! + str x29, [sp, #-8]! + str x30, [sp, #-8]! + + mrs x18, sp_el0 + str x18, [sp, #-8]! + + mrs x18, esr_el1 + str x18, [sp, #-8]! + + mrs x18, spsr_el1 + str x18, [sp, #-8]! + + mrs x18, tpidrro_el0 + str x18, [sp, #-8]! + + mrs x18, tpidr_el0 + str x18, [sp, #-8]! + + str x18, [sp, #-8]! + + mrs x18, elr_el1 + str x18, [sp, #-8]!" + : : : : "volatile"); + + let sp: usize; + llvm_asm!("" : "={sp}"(sp) : : : "volatile"); + llvm_asm!("mov x29, sp" : : : : "volatile"); + + let a = inner(&mut *(sp as *mut InterruptStack)); +} + +#[naked] +#[no_mangle] +pub unsafe extern fn do_exception_synchronous() { + #[inline(never)] + unsafe fn inner(stack: &mut InterruptStack) -> usize { + let exception_code = (stack.esr_el1 & (0x3f << 26)) >> 26; + if exception_code != 0b010101 { + println!("do_exception_synchronous: Non-SVC!!!"); + loop {} + } else { + println!("do_exception_synchronous: SVC: x8: 0x{:016x}", stack.scratch.x8); + } + + llvm_asm!("nop": : : : "volatile"); + llvm_asm!("nop": : : : "volatile"); + llvm_asm!("nop": : : : "volatile"); + llvm_asm!("nop": : : : "volatile"); + let fp; llvm_asm!("" : "={fp}"(fp) : : : "volatile"); From 3585f620b080726814e5faf86573762e1e45890b Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Sun, 17 Jan 2021 10:03:50 +0000 Subject: [PATCH 17/55] aarch64: clone: Fix incorrect stack offset in clone_ret --- src/arch/aarch64/interrupt/syscall.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/arch/aarch64/interrupt/syscall.rs b/src/arch/aarch64/interrupt/syscall.rs index be8af68..0cae56e 100644 --- a/src/arch/aarch64/interrupt/syscall.rs +++ b/src/arch/aarch64/interrupt/syscall.rs @@ -266,6 +266,7 @@ pub struct SyscallStack { #[naked] pub unsafe extern fn clone_ret() { llvm_asm!("ldp x29, x30, [sp], #16"); + llvm_asm!("add sp, sp, #16"); llvm_asm!("mov x0, 0"); } From 67d72532a9e230d7fe641d635132aa8eaeea902e Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Sun, 17 Jan 2021 10:05:10 +0000 Subject: [PATCH 18/55] aarch64: usermode: Remove tpidr_el0 manipulation Was using the incorrect USER_TLS_OFFSET instead of USER_TCB_OFFSET. In any case, this is better done in process::clone. --- src/arch/aarch64/start.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/arch/aarch64/start.rs b/src/arch/aarch64/start.rs index e4d3530..13d1b2d 100644 --- a/src/arch/aarch64/start.rs +++ b/src/arch/aarch64/start.rs @@ -176,10 +176,8 @@ pub unsafe extern fn kstart_ap(args_ptr: *const KernelArgsAp) -> ! { #[naked] pub unsafe fn usermode(ip: usize, sp: usize, arg: usize, singlestep: bool) -> ! { let cpu_id: usize = 0; - let uspace_tls_start = (crate::USER_TLS_OFFSET + crate::USER_TLS_SIZE * cpu_id); let spsr: u32 = 0; - llvm_asm!("msr tpidr_el0, $0" : : "r"(uspace_tls_start) : : "volatile"); llvm_asm!("msr spsr_el1, $0" : : "r"(spsr) : : "volatile"); llvm_asm!("msr elr_el1, $0" : : "r"(ip) : : "volatile"); llvm_asm!("msr sp_el0, $0" : : "r"(sp) : : "volatile"); From 67ec6c23e7aca70d7a9f6182ce56e687be530d80 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Sun, 17 Jan 2021 10:09:03 +0000 Subject: [PATCH 19/55] aarch64: Move tpidr_el0 setup from spawn to switch --- src/context/list.rs | 1 - src/context/switch.rs | 5 +++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/context/list.rs b/src/context/list.rs index 2e85ae8..fbc92db 100644 --- a/src/context/list.rs +++ b/src/context/list.rs @@ -92,7 +92,6 @@ impl ContextList { #[cfg(target_arch = "aarch64")] { let context_id = context.id.into(); - context.arch.set_tcb(context_id); context.arch.set_lr(func as usize); context.arch.set_context_handle(); } diff --git a/src/context/switch.rs b/src/context/switch.rs index 2e17b42..634aff3 100644 --- a/src/context/switch.rs +++ b/src/context/switch.rs @@ -151,6 +151,11 @@ pub unsafe fn switch() -> bool { } gdt::set_tcb((*to_ptr).id.into()); } + #[cfg(target_arch = "aarch64")] + { + let pid = (*to_ptr).id.into(); + (*to_ptr).arch.set_tcb(pid); + } CONTEXT_ID.store((*to_ptr).id, Ordering::SeqCst); } From ae0aebd0365e33df6b7c7202a161ed3ac675b710 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Sun, 17 Jan 2021 10:12:42 +0000 Subject: [PATCH 20/55] aarch64: clone: Return from clone syscall No CLONE_STACK functionality yet. --- src/syscall/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/syscall/mod.rs b/src/syscall/mod.rs index d5e1485..779ac68 100644 --- a/src/syscall/mod.rs +++ b/src/syscall/mod.rs @@ -129,7 +129,8 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u #[cfg(target_arch = "aarch64")] { //TODO: CLONE_STACK - clone(b, bp).map(ContextId::into) + let ret = clone(b, bp).map(ContextId::into); + ret } #[cfg(target_arch = "x86_64")] From 95bd8f2013bada04b0a993fc16cad8bf102645d7 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Sun, 17 Jan 2021 10:26:49 +0000 Subject: [PATCH 21/55] clone: Make stack manipulation arch specific --- src/syscall/process.rs | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 535f68d..67ff878 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -94,28 +94,33 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { kfx_opt = Some(new_fx); } - if let Some(ref stack) = context.kstack { - // Get the relative offset to the return address of the function - // obtaining `stack_base`. - // - // (base pointer - start of stack) - one - offset = stack_base - stack.as_ptr() as usize - mem::size_of::(); // Add clone ret - let mut new_stack = stack.clone(); + #[cfg(target_arch = "x86_64")] + { + if let Some(ref stack) = context.kstack { + // Get the relative offset to the return address of the function + // obtaining `stack_base`. + // + // (base pointer - start of stack) - one + offset = stack_base - stack.as_ptr() as usize - mem::size_of::(); // Add clone ret + let mut new_stack = stack.clone(); - unsafe { - // Set clone's return value to zero. This is done because - // the clone won't return like normal, which means the value - // would otherwise never get set. - #[cfg(target_arch = "x86_64")] // TODO - if let Some(regs) = ptrace::rebase_regs_ptr_mut(context.regs, Some(&mut new_stack)) { - (*regs).scratch.rax = 0; + unsafe { + // Set clone's return value to zero. This is done because + // the clone won't return like normal, which means the value + // would otherwise never get set. + if let Some(regs) = ptrace::rebase_regs_ptr_mut(context.regs, Some(&mut new_stack)) { + (*regs).scratch.rax = 0; + } + + // Change the return address of the child (previously + // syscall) to the arch-specific clone_ret callback + let func_ptr = new_stack.as_mut_ptr().add(offset); + *(func_ptr as *mut usize) = interrupt::syscall::clone_ret as usize; } - // Change the return address of the child (previously - // syscall) to the arch-specific clone_ret callback - let func_ptr = new_stack.as_mut_ptr().add(offset); - *(func_ptr as *mut usize) = interrupt::syscall::clone_ret as usize; + kstack_opt = Some(new_stack); } + } kstack_opt = Some(new_stack); } From c188a60871f4323c3d001dd3d9a2c6e145fca62e Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Mon, 18 Jan 2021 21:47:28 +0000 Subject: [PATCH 22/55] aarch64: Fix clone_ret FIXME: Explain the magic numbers here later. --- src/arch/aarch64/interrupt/syscall.rs | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/src/arch/aarch64/interrupt/syscall.rs b/src/arch/aarch64/interrupt/syscall.rs index 0cae56e..42b64ff 100644 --- a/src/arch/aarch64/interrupt/syscall.rs +++ b/src/arch/aarch64/interrupt/syscall.rs @@ -265,16 +265,6 @@ pub struct SyscallStack { #[naked] pub unsafe extern fn clone_ret() { - llvm_asm!("ldp x29, x30, [sp], #16"); - llvm_asm!("add sp, sp, #16"); + llvm_asm!("ldp x29, x30, [sp], #0x60"); llvm_asm!("mov x0, 0"); } - -/* -#[naked] -pub unsafe extern fn clone_ret() { - llvm_asm!("add sp, sp, #16"); - llvm_asm!("ldp x29, x30, [sp], #16"); - llvm_asm!("mov x0, 0"); -} -*/ From fd0336692d5d26ad8413ef7b80a96577ccf16526 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Mon, 18 Jan 2021 21:50:19 +0000 Subject: [PATCH 23/55] aarch64: clone: Introduce kernel and user space specific mods At present these are done 'wholesale' without any regard for x86_64. That needs to change eventually. --- src/context/arch/aarch64.rs | 8 ++++- src/context/list.rs | 3 +- src/syscall/process.rs | 60 +++++++++++++++++++++---------------- 3 files changed, 43 insertions(+), 28 deletions(-) diff --git a/src/context/arch/aarch64.rs b/src/context/arch/aarch64.rs index ac74e22..e759f0c 100644 --- a/src/context/arch/aarch64.rs +++ b/src/context/arch/aarch64.rs @@ -14,6 +14,7 @@ pub struct Context { elr_el1: usize, sp_el0: usize, ttbr0_el1: usize, /* Pointer to U4 translation table for this Context */ + ttbr1_el1: usize, /* Pointer to P4 translation table for this Context */ tpidr_el0: usize, /* Pointer to TLS region for this Context */ tpidrro_el0: usize, /* Pointer to TLS (read-only) region for this Context */ rflags: usize, @@ -53,6 +54,7 @@ impl Context { elr_el1: 0, sp_el0: 0, ttbr0_el1: 0, + ttbr1_el1: 0, tpidr_el0: 0, tpidrro_el0: 0, rflags: 0, /* spsr_el1 */ @@ -92,10 +94,14 @@ impl Context { pub fn set_fx(&mut self, _address: usize) { } - pub fn set_page_table(&mut self, address: usize) { + pub fn set_page_utable(&mut self, address: usize) { self.ttbr0_el1 = address; } + pub fn set_page_ktable(&mut self, address: usize) { + self.ttbr1_el1 = address; + } + pub fn set_stack(&mut self, address: usize) { self.sp = address; } diff --git a/src/context/list.rs b/src/context/list.rs index fbc92db..47c6c9f 100644 --- a/src/context/list.rs +++ b/src/context/list.rs @@ -96,7 +96,8 @@ impl ContextList { context.arch.set_context_handle(); } - context.arch.set_page_table(unsafe { ActivePageTable::new(PageTableType::User).address() }); + context.arch.set_page_utable(unsafe { ActivePageTable::new(PageTableType::User).address() }); + context.arch.set_page_ktable(unsafe { ActivePageTable::new(PageTableType::Kernel).address() }); context.arch.set_fx(fx.as_ptr() as usize); context.arch.set_stack(stack.as_ptr() as usize + offset); context.kfx = Some(fx); diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 67ff878..4e2c5e1 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -343,31 +343,39 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { context.arch = arch; - let mut active_table = unsafe { ActivePageTable::new(PageTableType::User) }; + let mut active_utable = unsafe { ActivePageTable::new(PageTableType::User) }; + let mut active_ktable = unsafe { ActivePageTable::new(PageTableType::Kernel) }; - let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET))); + let mut temporary_upage = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_MISC_OFFSET))); + let mut temporary_kpage = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::KERNEL_TMP_MISC_OFFSET))); - let mut new_table = { + let mut new_utable = { let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table"); - InactivePageTable::new(frame, &mut active_table, &mut temporary_page) + InactivePageTable::new(frame, &mut active_utable, &mut temporary_upage) }; - context.arch.set_page_table(unsafe { new_table.address() }); + let mut new_ktable = { + let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table"); + InactivePageTable::new(frame, &mut active_ktable, &mut temporary_kpage) + }; + + context.arch.set_page_utable(unsafe { new_utable.address() }); + context.arch.set_page_ktable(unsafe { new_ktable.address() }); // Copy kernel image mapping { - let frame = active_table.p4()[crate::KERNEL_PML4].pointed_frame().expect("kernel image not mapped"); - let flags = active_table.p4()[crate::KERNEL_PML4].flags(); - active_table.with(&mut new_table, &mut temporary_page, |mapper| { + let frame = active_ktable.p4()[crate::KERNEL_PML4].pointed_frame().expect("kernel image not mapped"); + let flags = active_ktable.p4()[crate::KERNEL_PML4].flags(); + active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| { mapper.p4_mut()[crate::KERNEL_PML4].set(frame, flags); }); } // Copy kernel heap mapping { - let frame = active_table.p4()[crate::KERNEL_HEAP_PML4].pointed_frame().expect("kernel heap not mapped"); - let flags = active_table.p4()[crate::KERNEL_HEAP_PML4].flags(); - active_table.with(&mut new_table, &mut temporary_page, |mapper| { + let frame = active_ktable.p4()[crate::KERNEL_HEAP_PML4].pointed_frame().expect("kernel heap not mapped"); + let flags = active_ktable.p4()[crate::KERNEL_HEAP_PML4].flags(); + active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| { mapper.p4_mut()[crate::KERNEL_HEAP_PML4].set(frame, flags); }); } @@ -389,9 +397,9 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { if flags.contains(CLONE_VM) { // Copy user image mapping, if found if ! image.is_empty() { - let frame = active_table.p4()[crate::USER_PML4].pointed_frame().expect("user image not mapped"); - let flags = active_table.p4()[crate::USER_PML4].flags(); - active_table.with(&mut new_table, &mut temporary_page, |mapper| { + let frame = active_utable.p4()[crate::USER_PML4].pointed_frame().expect("user image not mapped"); + let flags = active_utable.p4()[crate::USER_PML4].flags(); + active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| { mapper.p4_mut()[crate::USER_PML4].set(frame, flags); }); } @@ -437,7 +445,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { for memory_shared in image.iter_mut() { memory_shared.with(|memory| { let start = VirtualAddress::new(memory.start_address().data() - crate::USER_TMP_OFFSET + crate::USER_OFFSET); - memory.move_to(start, &mut new_table, &mut temporary_page); + memory.move_to(start, &mut new_utable, &mut temporary_upage); }); } context.image = image; @@ -449,7 +457,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { for mut grant in old_grants.inner.into_iter() { let start = VirtualAddress::new(grant.start_address().data() + crate::USER_GRANT_OFFSET - crate::USER_TMP_GRANT_OFFSET); - grant.move_to(start, &mut new_table, &mut temporary_page); + grant.move_to(start, &mut new_utable, &mut temporary_upage); grants.insert(grant); } } @@ -459,14 +467,14 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { // Setup user stack if let Some(stack_shared) = stack_opt { if flags.contains(CLONE_STACK) { - let frame = active_table.p4()[crate::USER_STACK_PML4].pointed_frame().expect("user stack not mapped"); - let flags = active_table.p4()[crate::USER_STACK_PML4].flags(); - active_table.with(&mut new_table, &mut temporary_page, |mapper| { + let frame = active_utable.p4()[crate::USER_STACK_PML4].pointed_frame().expect("user stack not mapped"); + let flags = active_utable.p4()[crate::USER_STACK_PML4].flags(); + active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| { mapper.p4_mut()[crate::USER_STACK_PML4].set(frame, flags); }); } else { stack_shared.with(|stack| { - stack.move_to(VirtualAddress::new(crate::USER_STACK_OFFSET), &mut new_table, &mut temporary_page); + stack.move_to(VirtualAddress::new(crate::USER_STACK_OFFSET), &mut new_utable, &mut temporary_upage); }); } context.stack = Some(stack_shared); @@ -474,7 +482,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { // Setup user sigstack if let Some(mut sigstack) = sigstack_opt { - sigstack.move_to(VirtualAddress::new(crate::USER_SIGSTACK_OFFSET), &mut new_table, &mut temporary_page); + sigstack.move_to(VirtualAddress::new(crate::USER_SIGSTACK_OFFSET), &mut new_utable, &mut temporary_upage); context.sigstack = Some(sigstack); } @@ -491,9 +499,9 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { if let Some(mut tls) = tls_opt { // Copy TLS mapping { - let frame = active_table.p4()[crate::USER_TLS_PML4].pointed_frame().expect("user tls not mapped"); - let flags = active_table.p4()[crate::USER_TLS_PML4].flags(); - active_table.with(&mut new_table, &mut temporary_page, |mapper| { + let frame = active_utable.p4()[crate::USER_TLS_PML4].pointed_frame().expect("user tls not mapped"); + let flags = active_utable.p4()[crate::USER_TLS_PML4].flags(); + active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| { mapper.p4_mut()[crate::USER_TLS_PML4].set(frame, flags); }); } @@ -501,7 +509,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { // TODO: Make sure size is not greater than USER_TLS_SIZE let tls_addr = crate::USER_TLS_OFFSET + context.id.into() * crate::USER_TLS_SIZE; //println!("{}: Copy TLS: address 0x{:x}, size 0x{:x}", context.id.into(), tls_addr, tls.mem.size()); - tls.mem.move_to(VirtualAddress::new(tls_addr), &mut new_table, &mut temporary_page); + tls.mem.move_to(VirtualAddress::new(tls_addr), &mut new_utable, &mut temporary_upage); unsafe { *(tcb_addr as *mut usize) = tls.mem.start_address().data() + tls.mem.size(); } @@ -516,7 +524,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { } } - tcb.move_to(VirtualAddress::new(tcb_addr), &mut new_table, &mut temporary_page); + tcb.move_to(VirtualAddress::new(tcb_addr), &mut new_utable, &mut temporary_upage); context.image.push(tcb.to_shared()); context.name = name; From 9429032cec706c6379747fac58362770c605228a Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Mon, 18 Jan 2021 21:53:04 +0000 Subject: [PATCH 24/55] aarch64: clone: Further clone_ret + tpidr_el0 fixes --- src/syscall/process.rs | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 4e2c5e1..c5fa642 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -122,7 +122,14 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { } } - kstack_opt = Some(new_stack); + #[cfg(target_arch = "aarch64")] + { + if let Some(ref stack) = context.kstack { + offset = stack_base - stack.as_ptr() as usize; + let mut new_stack = stack.clone(); + + kstack_opt = Some(new_stack); + } } if flags.contains(CLONE_VM) { @@ -389,6 +396,10 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { if let Some(stack) = kstack_opt.take() { context.arch.set_stack(stack.as_ptr() as usize + offset); context.kstack = Some(stack); + #[cfg(target_arch = "aarch64")] + { + context.arch.set_lr(interrupt::syscall::clone_ret as usize); + } } // TODO: Clone ksig? @@ -495,6 +506,18 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { true ); + #[cfg(target_arch = "aarch64")] + { + if let Some(stack) = &mut context.kstack { + unsafe { + let interrupt_stack_offset_from_stack_base = *(stack_base as *const u64) - stack_base as u64; + let mut interrupt_stack = &mut *(stack.as_mut_ptr().add(offset + interrupt_stack_offset_from_stack_base as usize) as *mut crate::arch::interrupt::InterruptStack); + interrupt_stack.tpidr_el0 = tcb_addr; + } + } + } + + // Setup user TLS if let Some(mut tls) = tls_opt { // Copy TLS mapping From f1db56f0266886bc8769bd188a7fc4c96dd733b2 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Mon, 18 Jan 2021 21:55:42 +0000 Subject: [PATCH 25/55] aarch64: clone: Further uspace and kspace mods --- src/syscall/process.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/syscall/process.rs b/src/syscall/process.rs index c5fa642..8518f99 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -418,9 +418,9 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { // Copy grant mapping if ! grants.lock().is_empty() { - let frame = active_table.p4()[crate::USER_GRANT_PML4].pointed_frame().expect("user grants not mapped"); - let flags = active_table.p4()[crate::USER_GRANT_PML4].flags(); - active_table.with(&mut new_table, &mut temporary_page, |mapper| { + let frame = active_utable.p4()[crate::USER_GRANT_PML4].pointed_frame().expect("user grants not mapped"); + let flags = active_utable.p4()[crate::USER_GRANT_PML4].flags(); + active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| { mapper.p4_mut()[crate::USER_GRANT_PML4].set(frame, flags); }); } @@ -443,8 +443,8 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { let start_page = Page::containing_address(VirtualAddress::new(start)); let end_page = Page::containing_address(VirtualAddress::new(end - 1)); for page in Page::range_inclusive(start_page, end_page) { - let frame = active_table.translate_page(page).expect("kernel percpu not mapped"); - active_table.with(&mut new_table, &mut temporary_page, |mapper| { + let frame = active_ktable.translate_page(page).expect("kernel percpu not mapped"); + active_ktable.with(&mut new_ktable, &mut temporary_kpage, |mapper| { let result = mapper.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE); // Ignore result due to operating on inactive table unsafe { result.ignore(); } From 452196b81fcf857f5b1af236bc0e5339ff539d01 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Thu, 21 Jan 2021 11:37:32 +0000 Subject: [PATCH 26/55] aarch64: consts: Use the same USER_TLS_SIZE as x86_64 --- src/arch/aarch64/consts.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/arch/aarch64/consts.rs b/src/arch/aarch64/consts.rs index 59c5f0c..a348714 100644 --- a/src/arch/aarch64/consts.rs +++ b/src/arch/aarch64/consts.rs @@ -81,7 +81,8 @@ /// Offset to user TLS pub const USER_TLS_OFFSET: usize = USER_SIGSTACK_OFFSET + PML4_SIZE; pub const USER_TLS_PML4: usize = (USER_TLS_OFFSET & PML4_MASK)/PML4_SIZE; - pub const USER_TLS_SIZE: usize = 64 * 1024; + // Maximum TLS allocated to each PID, should be approximately 8 MB + pub const USER_TLS_SIZE: usize = PML4_SIZE / 65536; /// Offset to user temporary image (used when cloning) pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE; From 3da345867a7cd92ad2f97ba1182fe551852f228c Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Thu, 21 Jan 2021 11:38:46 +0000 Subject: [PATCH 27/55] aarch64: paging: Derive Debug, PartialEq for VirtualAddressType This makes asserts on VirtualAddressType equality possible. --- src/arch/aarch64/paging/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/arch/aarch64/paging/mod.rs b/src/arch/aarch64/paging/mod.rs index 29bf820..43c8d13 100644 --- a/src/arch/aarch64/paging/mod.rs +++ b/src/arch/aarch64/paging/mod.rs @@ -392,6 +392,7 @@ impl PhysicalAddress { #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct VirtualAddress(usize); +#[derive(Debug, PartialEq)] pub enum VirtualAddressType { User, Kernel From 75870a655feafe7a9d12827e724511f3d44b077d Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Thu, 21 Jan 2021 11:40:02 +0000 Subject: [PATCH 28/55] aarch64: context: Add separate kspace and uspace page table getters --- src/context/arch/aarch64.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/context/arch/aarch64.rs b/src/context/arch/aarch64.rs index e759f0c..8637665 100644 --- a/src/context/arch/aarch64.rs +++ b/src/context/arch/aarch64.rs @@ -87,10 +87,14 @@ impl Context { } } - pub fn get_page_table(&self) -> usize { + pub fn get_page_utable(&self) -> usize { self.ttbr0_el1 } + pub fn get_page_ktable(&self) -> usize { + self.ttbr1_el1 + } + pub fn set_fx(&mut self, _address: usize) { } From 65448c2d4875ca5a1f1f58b79d4407c8e1923591 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Thu, 21 Jan 2021 11:41:26 +0000 Subject: [PATCH 29/55] aarch64: context: memory: Grant::map_inactive: Bugfix When mapping one (from) virtual address range to another (to) virtual address range, be mindful of which mapper type to use for each range. Before this, the same mapper type was used for both ranges. This meant that if from and to were different (as in not both kernel virtual addresses or user virtual addresses) then it would appear that either from or to was not mapped previously and the kernel would panic. --- src/context/memory.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/context/memory.rs b/src/context/memory.rs index e050467..f61c1e9 100644 --- a/src/context/memory.rs +++ b/src/context/memory.rs @@ -371,7 +371,7 @@ impl Grant { } pub fn map_inactive(from: VirtualAddress, to: VirtualAddress, size: usize, flags: EntryFlags, desc_opt: Option, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) -> Grant { - let mut active_table = match to.get_type() { + let mut active_table = match from.get_type() { VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } }; @@ -386,6 +386,11 @@ impl Grant { frames.push_back(frame); } + let mut active_table = match to.get_type() { + VirtualAddressType::User => unsafe { ActivePageTable::new(PageTableType::User) }, + VirtualAddressType::Kernel => unsafe { ActivePageTable::new(PageTableType::Kernel) } + }; + active_table.with(new_table, temporary_page, |mapper| { let start_page = Page::containing_address(to); let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1)); From 591775874b6aae8e02cec9fb8eaa770d661c687f Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Thu, 21 Jan 2021 11:50:56 +0000 Subject: [PATCH 30/55] ptrace: with_context_memory: use user-space specific page table --- src/ptrace.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ptrace.rs b/src/ptrace.rs index 930b10e..aeea28f 100644 --- a/src/ptrace.rs +++ b/src/ptrace.rs @@ -460,7 +460,7 @@ where F: FnOnce(*mut u8) -> Result<()> let mut active_page_table = unsafe { ActivePageTable::new(PageTableType::User) }; let mut target_page_table = unsafe { - InactivePageTable::from_address(context.arch.get_page_table()) + InactivePageTable::from_address(context.arch.get_page_utable()) }; // Find the physical frames for all pages From 6cacbb47f6457e4d4a40d23b6473ad1596a1129e Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Thu, 21 Jan 2021 11:53:07 +0000 Subject: [PATCH 31/55] scheme: user: Use user-space specific pagt table --- src/scheme/user.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/scheme/user.rs b/src/scheme/user.rs index 18fdfc4..8ba0786 100644 --- a/src/scheme/user.rs +++ b/src/scheme/user.rs @@ -123,7 +123,7 @@ impl UserInner { let context_lock = context_weak.upgrade().ok_or(Error::new(ESRCH))?; let mut context = context_lock.write(); - let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) }; + let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) }; let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET))); let mut grants = context.grants.lock(); @@ -154,7 +154,7 @@ impl UserInner { let context_lock = self.context.upgrade().ok_or(Error::new(ESRCH))?; let mut context = context_lock.write(); - let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) }; + let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) }; let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET))); let mut grants = context.grants.lock(); From 78d1cd17985d8302dd18a687ab0cb488962659b7 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Thu, 21 Jan 2021 11:53:35 +0000 Subject: [PATCH 32/55] syscall: process: empty: Use user-space specific page table --- src/syscall/process.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 8518f99..05b66e2 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -599,7 +599,7 @@ fn empty(context: &mut context::Context, reaping: bool) { if reaping { println!("{}: {}: Grant should not exist: {:?}", context.id.into(), unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }, grant); - let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) }; + let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) }; let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET))); grant.unmap_inactive(&mut new_table, &mut temporary_page); From 4a215c7c2c60b60749918f50347f8e1e22bd14f7 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Tue, 26 Jan 2021 18:17:09 +0000 Subject: [PATCH 33/55] aarch64: exception management and clone fixups --- .../aarch64/init/pre_kstart/helpers/vectors.S | 28 +- src/arch/aarch64/interrupt/handler.rs | 306 +++++++++++++++--- src/arch/aarch64/interrupt/mod.rs | 4 +- src/arch/aarch64/interrupt/syscall.rs | 73 ++++- .../aarch64/interrupt/unhandled_exceptions.rs | 145 --------- src/context/arch/aarch64.rs | 36 +++ src/syscall/process.rs | 21 +- 7 files changed, 407 insertions(+), 206 deletions(-) delete mode 100644 src/arch/aarch64/interrupt/unhandled_exceptions.rs diff --git a/src/arch/aarch64/init/pre_kstart/helpers/vectors.S b/src/arch/aarch64/init/pre_kstart/helpers/vectors.S index 2241348..dec303b 100644 --- a/src/arch/aarch64/init/pre_kstart/helpers/vectors.S +++ b/src/arch/aarch64/init/pre_kstart/helpers/vectors.S @@ -7,95 +7,117 @@ .align 11 exception_vector_base: + // Synchronous .align 7 __vec_00: mov x18, #0xb0b0 - b do_exception_synchronous + b synchronous_exception_at_el1_with_sp0 b __vec_00 + // IRQ .align 7 __vec_01: mov x18, #0xb0b1 b do_exception_irq b __vec_01 + // FIQ .align 7 __vec_02: mov x18, #0xb0b2 b do_exception_unhandled b __vec_02 + // SError .align 7 __vec_03: mov x18, #0xb0b3 b do_exception_unhandled b __vec_03 + // Synchronous .align 7 __vec_04: mov x18, #0xb0b4 - b do_exception_synchronous + b synchronous_exception_at_el1_with_spx b __vec_04 + // IRQ .align 7 __vec_05: mov x18, #0xb0b5 b do_exception_irq b __vec_05 + // FIQ .align 7 __vec_06: mov x18, #0xb0b6 b do_exception_unhandled b __vec_06 + // SError .align 7 __vec_07: mov x18, #0xb0b7 b do_exception_unhandled b __vec_07 + // Synchronous .align 7 __vec_08: mov x18, #0xb0b8 - b do_exception_synchronous + b synchronous_exception_at_el0 b __vec_08 + // IRQ .align 7 __vec_09: mov x18, #0xb0b9 b do_exception_irq b __vec_09 + // FIQ .align 7 __vec_10: mov x18, #0xb0ba b do_exception_unhandled b __vec_10 + // SError .align 7 __vec_11: mov x18, #0xb0bb b do_exception_unhandled b __vec_11 + // Synchronous .align 7 __vec_12: mov x18, #0xb0bc b do_exception_unhandled b __vec_12 + // IRQ .align 7 __vec_13: mov x18, #0xb0bd b do_exception_unhandled b __vec_13 + // FIQ .align 7 __vec_14: mov x18, #0xb0be b do_exception_unhandled b __vec_14 + // SError + .align 7 +__vec_15: + mov x18, #0xb0bf + b do_exception_unhandled + b __vec_15 + .align 7 exception_vector_end: diff --git a/src/arch/aarch64/interrupt/handler.rs b/src/arch/aarch64/interrupt/handler.rs index 279c89a..9be3973 100644 --- a/src/arch/aarch64/interrupt/handler.rs +++ b/src/arch/aarch64/interrupt/handler.rs @@ -1,25 +1,28 @@ +use crate::syscall::IntRegisters; + #[derive(Default)] #[repr(packed)] pub struct ScratchRegisters { - pub x18: usize, - pub x17: usize, - pub x16: usize, - pub x15: usize, - pub x14: usize, - pub x13: usize, - pub x12: usize, - pub x11: usize, - pub x10: usize, - pub x9: usize, - pub x8: usize, - pub x7: usize, - pub x6: usize, - pub x5: usize, - pub x4: usize, - pub x3: usize, - pub x2: usize, - pub x1: usize, pub x0: usize, + pub x1: usize, + pub x2: usize, + pub x3: usize, + pub x4: usize, + pub x5: usize, + pub x6: usize, + pub x7: usize, + pub x8: usize, + pub x9: usize, + pub x10: usize, + pub x11: usize, + pub x12: usize, + pub x13: usize, + pub x14: usize, + pub x15: usize, + pub x16: usize, + pub x17: usize, + pub x18: usize, + pub padding: usize, } impl ScratchRegisters { @@ -50,18 +53,18 @@ impl ScratchRegisters { #[repr(packed)] pub struct PreservedRegisters { //TODO: is X30 a preserved register? - pub x30: usize, - pub x29: usize, - pub x28: usize, - pub x27: usize, - pub x26: usize, - pub x25: usize, - pub x24: usize, - pub x23: usize, - pub x22: usize, - pub x21: usize, - pub x20: usize, pub x19: usize, + pub x20: usize, + pub x21: usize, + pub x22: usize, + pub x23: usize, + pub x24: usize, + pub x25: usize, + pub x26: usize, + pub x27: usize, + pub x28: usize, + pub x29: usize, + pub x30: usize, } impl PreservedRegisters { @@ -83,34 +86,245 @@ impl PreservedRegisters { #[derive(Default)] #[repr(packed)] -pub struct InterruptStack { - pub elr_el1: usize, - //TODO: should this push be removed? - pub unknown: usize, - pub tpidr_el0: usize, - pub tpidrro_el0: usize, - pub spsr_el1: usize, +pub struct IretRegisters { + // occurred + // The exception vector disambiguates at which EL the interrupt + pub sp_el0: usize, // Shouldn't be used if interrupt occurred at EL1 pub esr_el1: usize, - pub sp_el0: usize, - pub preserved: PreservedRegisters, + pub spsr_el1: usize, + pub tpidrro_el0: usize, + pub tpidr_el0: usize, + pub elr_el1: usize, +} + +impl IretRegisters { + pub fn dump(&self) { + println!("ELR_EL1: {:>016X}", { self.elr_el1 }); + println!("TPIDR_EL0: {:>016X}", { self.tpidr_el0 }); + println!("TPIDRRO_EL0: {:>016X}", { self.tpidrro_el0 }); + println!("SPSR_EL1: {:>016X}", { self.spsr_el1 }); + println!("ESR_EL1: {:>016X}", { self.esr_el1 }); + println!("SP_EL0: {:>016X}", { self.sp_el0 }); + } +} + +#[derive(Default)] +#[repr(packed)] +pub struct InterruptStack { + pub iret: IretRegisters, pub scratch: ScratchRegisters, - //TODO: eret registers + pub preserved: PreservedRegisters, } impl InterruptStack { pub fn dump(&self) { + self.iret.dump(); self.scratch.dump(); self.preserved.dump(); - println!("SP_EL0: {:>016X}", { self.sp_el0 }); - println!("ESR_EL1: {:>016X}", { self.esr_el1 }); - println!("SPSR_EL1: {:>016X}", { self.spsr_el1 }); - println!("TPIDRRO_EL0: {:>016X}", { self.tpidrro_el0 }); - println!("TPIDR_EL0: {:>016X}", { self.tpidr_el0 }); - println!("UNKNOWN: {:>016X}", { self.unknown }); - println!("ELR_EL1: {:>016X}", { self.elr_el1 }); + } + + /// Saves all registers to a struct used by the proc: + /// scheme to read/write registers. + pub fn save(&self, all: &mut IntRegisters) { + all.elr_el1 = self.iret.elr_el1; + all.tpidr_el0 = self.iret.tpidr_el0; + all.tpidrro_el0 = self.iret.tpidrro_el0; + all.spsr_el1 = self.iret.spsr_el1; + all.esr_el1 = self.iret.esr_el1; + all.sp_el0 = self.iret.sp_el0; + all.padding = 0; + all.x30 = self.preserved.x30; + all.x29 = self.preserved.x29; + all.x28 = self.preserved.x28; + all.x27 = self.preserved.x27; + all.x26 = self.preserved.x26; + all.x25 = self.preserved.x25; + all.x24 = self.preserved.x24; + all.x23 = self.preserved.x23; + all.x22 = self.preserved.x22; + all.x21 = self.preserved.x21; + all.x20 = self.preserved.x20; + all.x19 = self.preserved.x19; + all.x18 = self.scratch.x18; + all.x17 = self.scratch.x17; + all.x16 = self.scratch.x16; + all.x15 = self.scratch.x15; + all.x14 = self.scratch.x14; + all.x13 = self.scratch.x13; + all.x12 = self.scratch.x12; + all.x11 = self.scratch.x11; + all.x10 = self.scratch.x10; + all.x9 = self.scratch.x9; + all.x8 = self.scratch.x8; + all.x7 = self.scratch.x7; + all.x6 = self.scratch.x6; + all.x5 = self.scratch.x5; + all.x4 = self.scratch.x4; + all.x3 = self.scratch.x3; + all.x2 = self.scratch.x2; + all.x1 = self.scratch.x1; + all.x0 = self.scratch.x0; } //TODO pub fn is_singlestep(&self) -> bool { false } pub fn set_singlestep(&mut self, singlestep: bool) {} } + +#[macro_export] +macro_rules! aarch64_asm { + ($($strings:expr,)+) => { + global_asm!(concat!( + $($strings),+, + )); + }; +} + +#[macro_export] +macro_rules! function { + ($name:ident => { $($body:expr,)+ }) => { + aarch64_asm!( + ".global ", stringify!($name), "\n", + ".type ", stringify!($name), ", @function\n", + ".section .text.", stringify!($name), ", \"ax\", @progbits\n", + stringify!($name), ":\n", + $($body),+, + ".size ", stringify!($name), ", . - ", stringify!($name), "\n", + ".text\n", + ); + extern "C" { + pub fn $name(); + } + }; +} + +#[macro_export] +macro_rules! push_scratch { + () => { " + // Push scratch registers + stp x18, x18, [sp, #-16]! + stp x16, x17, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x4, x5, [sp, #-16]! + stp x2, x3, [sp, #-16]! + stp x0, x1, [sp, #-16]! + " }; +} + +#[macro_export] +macro_rules! pop_scratch { + () => { " + // Pop scratch registers + ldp x0, x1, [sp], #16 + ldp x2, x3, [sp], #16 + ldp x4, x5, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x16, x17, [sp], #16 + ldp x18, x18, [sp], #16 + " }; +} + +#[macro_export] +macro_rules! push_preserved { + () => { " + // Push preserved registers + stp x29, x30, [sp, #-16]! + stp x27, x28, [sp, #-16]! + stp x25, x26, [sp, #-16]! + stp x23, x24, [sp, #-16]! + stp x21, x22, [sp, #-16]! + stp x19, x20, [sp, #-16]! + " }; +} + +#[macro_export] +macro_rules! pop_preserved { + () => { " + // Pop preserved registers + ldp x19, x20, [sp], #16 + ldp x21, x22, [sp], #16 + ldp x23, x24, [sp], #16 + ldp x25, x26, [sp], #16 + ldp x27, x28, [sp], #16 + ldp x29, x30, [sp], #16 + " }; +} + +#[macro_export] +macro_rules! push_special { + () => { " + mrs x14, tpidr_el0 + mrs x15, elr_el1 + stp x14, x15, [sp, #-16]! + + mrs x14, spsr_el1 + mrs x15, tpidrro_el0 + stp x14, x15, [sp, #-16]! + + mrs x14, sp_el0 + mrs x15, esr_el1 + stp x14, x15, [sp, #-16]! + " }; +} + +#[macro_export] +macro_rules! pop_special { + () => { " + ldp x14, x15, [sp], 16 + msr esr_el1, x15 + msr sp_el0, x14 + + ldp x14, x15, [sp], 16 + msr tpidrro_el0, x15 + msr spsr_el1, x14 + + ldp x14, x15, [sp], 16 + msr elr_el1, x15 + msr tpidr_el0, x14 + " }; +} + +#[macro_export] +macro_rules! exception_stack { + ($name:ident, |$stack:ident| $code:block) => { + paste::item! { + #[no_mangle] + unsafe extern "C" fn [<__exception_ $name>](stack: *mut $crate::arch::aarch64::interrupt::InterruptStack) { + // This inner function is needed because macros are buggy: + // https://github.com/dtolnay/paste/issues/7 + #[inline(always)] + unsafe fn inner($stack: &mut $crate::arch::aarch64::interrupt::InterruptStack) { + $code + } + inner(&mut *stack); + } + + function!($name => { + // Backup all userspace registers to stack + push_preserved!(), + push_scratch!(), + push_special!(), + + // Call inner function with pointer to stack + "mov x29, sp\n", + "mov x0, sp\n", + "bl __exception_", stringify!($name), "\n", + + // Restore all userspace registers + pop_special!(), + pop_scratch!(), + pop_preserved!(), + + "eret\n", + }); + } + }; +} diff --git a/src/arch/aarch64/interrupt/mod.rs b/src/arch/aarch64/interrupt/mod.rs index 9811df2..4639332 100644 --- a/src/arch/aarch64/interrupt/mod.rs +++ b/src/arch/aarch64/interrupt/mod.rs @@ -1,10 +1,12 @@ //! Interrupt instructions +#[macro_use] pub mod handler; + +pub mod exception; pub mod irq; pub mod syscall; pub mod trace; -pub mod unhandled_exceptions; pub use self::handler::InterruptStack; pub use self::trace::stack_trace; diff --git a/src/arch/aarch64/interrupt/syscall.rs b/src/arch/aarch64/interrupt/syscall.rs index 42b64ff..afd1416 100644 --- a/src/arch/aarch64/interrupt/syscall.rs +++ b/src/arch/aarch64/interrupt/syscall.rs @@ -1,6 +1,14 @@ -use crate::interrupt::InterruptStack; -use crate::syscall; +use crate::{ + arch::{interrupt::InterruptStack}, + context, + syscall, + syscall::flag::{PTRACE_FLAG_IGNORE, PTRACE_STOP_PRE_SYSCALL, PTRACE_STOP_POST_SYSCALL}, +}; +#[no_mangle] +pub unsafe extern fn do_exception_unhandled() {} + +/* #[naked] #[no_mangle] pub unsafe extern fn do_exception_unhandled() { @@ -69,7 +77,12 @@ pub unsafe extern fn do_exception_unhandled() { let a = inner(&mut *(sp as *mut InterruptStack)); } +*/ +#[no_mangle] +pub unsafe extern fn do_exception_synchronous() {} + +/* #[naked] #[no_mangle] pub unsafe extern fn do_exception_synchronous() { @@ -219,6 +232,7 @@ pub unsafe extern fn do_exception_synchronous() { llvm_asm!("eret" :::: "volatile"); } +*/ #[allow(dead_code)] #[repr(packed)] @@ -263,8 +277,55 @@ pub struct SyscallStack { pub x0: usize, } -#[naked] -pub unsafe extern fn clone_ret() { - llvm_asm!("ldp x29, x30, [sp], #0x60"); - llvm_asm!("mov x0, 0"); +#[macro_export] +macro_rules! with_exception_stack { + (|$stack:ident| $code:block) => {{ + let $stack = &mut *$stack; + (*$stack).scratch.x0 = $code; + }} } + +#[no_mangle] +pub unsafe extern "C" fn __inner_syscall_instruction(stack: *mut InterruptStack) { + with_exception_stack!(|stack| { + // Set a restore point for clone + let fp; + asm!("mov {}, fp", out(reg) fp); + + let scratch = &stack.scratch; + syscall::syscall(scratch.x8, scratch.x0, scratch.x1, scratch.x2, scratch.x3, scratch.x4, fp, stack) + }); +} + +function!(syscall_instruction => { + " + nop + ", + + // Push context registers + push_preserved!(), + push_scratch!(), + push_special!(), + + // TODO: Map PTI + + // Call inner function + "mov x0, sp\n", + "bl __inner_syscall_instruction\n", + + // TODO: Unmap PTI + + // Pop context registers + pop_special!(), + pop_scratch!(), + pop_preserved!(), + + // Return + "eret\n", +}); + +function!(clone_ret => { + "ldp x29, x30, [sp], #16\n", + "mov sp, x29\n", + "ret\n", +}); diff --git a/src/arch/aarch64/interrupt/unhandled_exceptions.rs b/src/arch/aarch64/interrupt/unhandled_exceptions.rs deleted file mode 100644 index 6897d89..0000000 --- a/src/arch/aarch64/interrupt/unhandled_exceptions.rs +++ /dev/null @@ -1,145 +0,0 @@ -use crate::{ - context, - cpu_id, - interrupt::{self, InterruptStack, stack_trace}, -}; - -bitflags! { - pub struct ExceptionClasses: u32 { - const SVC_INSN_IN_AARCH64_STATE = 0b10101 << 26; - const DATA_ABORT_FROM_LOWER_EL = 0b100100 << 26; - const BKPT_INSN_IN_AARCH64_STATE = 0b111100 << 26; - } -} - -#[inline(never)] -pub unsafe extern fn report_exception(stack: &InterruptStack) { - println!("Unhandled exception"); - - stack.dump(); - stack_trace(); - - println!("CPU {}, PID {:?}", cpu_id(), context::context_id()); - //WARNING: name cannot be grabed, it may deadlock - - println!("HALT"); - loop { - interrupt::halt(); - } -} - -#[naked] -#[no_mangle] -pub unsafe extern fn do_report_exception() { - llvm_asm!("str x0, [sp, #-8]! - str x1, [sp, #-8]! - str x2, [sp, #-8]! - str x3, [sp, #-8]! - str x4, [sp, #-8]! - str x5, [sp, #-8]! - str x6, [sp, #-8]! - str x7, [sp, #-8]! - str x8, [sp, #-8]! - str x9, [sp, #-8]! - str x10, [sp, #-8]! - str x11, [sp, #-8]! - str x12, [sp, #-8]! - str x13, [sp, #-8]! - str x14, [sp, #-8]! - str x15, [sp, #-8]! - str x16, [sp, #-8]! - str x17, [sp, #-8]! - str x18, [sp, #-8]! - str x19, [sp, #-8]! - str x20, [sp, #-8]! - str x21, [sp, #-8]! - str x22, [sp, #-8]! - str x23, [sp, #-8]! - str x24, [sp, #-8]! - str x25, [sp, #-8]! - str x26, [sp, #-8]! - str x27, [sp, #-8]! - str x28, [sp, #-8]! - str x29, [sp, #-8]! - str x30, [sp, #-8]! - - mrs x18, sp_el0 - str x18, [sp, #-8]! - - mrs x18, esr_el1 - str x18, [sp, #-8]! - - mrs x18, spsr_el1 - str x18, [sp, #-8]! - - mrs x18, tpidrro_el0 - str x18, [sp, #-8]! - - mrs x18, tpidr_el0 - str x18, [sp, #-8]! - - str x18, [sp, #-8]! - - mrs x18, elr_el1 - str x18, [sp, #-8]!" - : : : : "volatile"); - - let sp: usize; - llvm_asm!("" : "={sp}"(sp) : : : "volatile"); - report_exception(&*(sp as *const InterruptStack)); - - llvm_asm!("ldr x18, [sp], #8 - msr elr_el1, x18 - - ldr x18, [sp], #8 - - ldr x18, [sp], #8 - msr tpidr_el0, x18 - - ldr x18, [sp], #8 - msr tpidrro_el0, x18 - - ldr x18, [sp], #8 - msr spsr_el1, x18 - - ldr x18, [sp], #8 - msr esr_el1, x18 - - ldr x18, [sp], #8 - msr sp_el0, x18 - - ldr x30, [sp], #8 - ldr x29, [sp], #8 - ldr x28, [sp], #8 - ldr x27, [sp], #8 - ldr x26, [sp], #8 - ldr x25, [sp], #8 - ldr x24, [sp], #8 - ldr x23, [sp], #8 - ldr x22, [sp], #8 - ldr x21, [sp], #8 - ldr x20, [sp], #8 - ldr x19, [sp], #8 - ldr x18, [sp], #8 - ldr x17, [sp], #8 - ldr x16, [sp], #8 - ldr x15, [sp], #8 - ldr x14, [sp], #8 - ldr x13, [sp], #8 - ldr x12, [sp], #8 - ldr x11, [sp], #8 - ldr x10, [sp], #8 - ldr x9, [sp], #8 - ldr x8, [sp], #8 - ldr x7, [sp], #8 - ldr x6, [sp], #8 - ldr x5, [sp], #8 - ldr x4, [sp], #8 - ldr x3, [sp], #8 - ldr x2, [sp], #8 - ldr x1, [sp], #8 - ldr x0, [sp], #8" - : : : : "volatile"); - - llvm_asm!("eret" :::: "volatile"); -} diff --git a/src/context/arch/aarch64.rs b/src/context/arch/aarch64.rs index 8637665..3af07fd 100644 --- a/src/context/arch/aarch64.rs +++ b/src/context/arch/aarch64.rs @@ -150,6 +150,42 @@ impl Context { value } + pub fn dump(&self) { + println!("elr_el1: 0x{:016x}", self.elr_el1); + println!("sp_el0: 0x{:016x}", self.sp_el0); + println!("ttbr0_el1: 0x{:016x}", self.ttbr0_el1); + println!("ttbr1_el1: 0x{:016x}", self.ttbr1_el1); + println!("tpidr_el0: 0x{:016x}", self.tpidr_el0); + println!("tpidrro_el0: 0x{:016x}", self.tpidrro_el0); + println!("rflags: 0x{:016x}", self.rflags); + println!("esr_el1: 0x{:016x}", self.esr_el1); + println!("padding: 0x{:016x}", self.padding); + println!("sp: 0x{:016x}", self.sp); + println!("lr: 0x{:016x}", self.lr); + println!("fp: 0x{:016x}", self.fp); + println!("x28: 0x{:016x}", self.x28); + println!("x27: 0x{:016x}", self.x27); + println!("x26: 0x{:016x}", self.x26); + println!("x25: 0x{:016x}", self.x25); + println!("x24: 0x{:016x}", self.x24); + println!("x23: 0x{:016x}", self.x23); + println!("x22: 0x{:016x}", self.x22); + println!("x21: 0x{:016x}", self.x21); + println!("x20: 0x{:016x}", self.x20); + println!("x19: 0x{:016x}", self.x19); + println!("x18: 0x{:016x}", self.x18); + println!("x17: 0x{:016x}", self.x17); + println!("x16: 0x{:016x}", self.x16); + println!("x15: 0x{:016x}", self.x15); + println!("x14: 0x{:016x}", self.x14); + println!("x13: 0x{:016x}", self.x13); + println!("x12: 0x{:016x}", self.x12); + println!("x11: 0x{:016x}", self.x11); + println!("x10: 0x{:016x}", self.x10); + println!("x9: 0x{:016x}", self.x9); + println!("x8: 0x{:016x}", self.x8); + } + #[cold] #[inline(never)] #[naked] diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 05b66e2..5e7dd27 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -510,14 +510,25 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { { if let Some(stack) = &mut context.kstack { unsafe { - let interrupt_stack_offset_from_stack_base = *(stack_base as *const u64) - stack_base as u64; - let mut interrupt_stack = &mut *(stack.as_mut_ptr().add(offset + interrupt_stack_offset_from_stack_base as usize) as *mut crate::arch::interrupt::InterruptStack); - interrupt_stack.tpidr_el0 = tcb_addr; + // stack_base contains a pointer to InterruptStack. Get its offset from + // stack_base itself + let istack_offset = *(stack_base as *const u64) - stack_base as u64; + + // Get the top of the new process' stack + let new_sp = stack.as_mut_ptr().add(offset); + + // Update the pointer to the InterruptStack to reflect the new process' + // stack. (Without this the pointer would be InterruptStack on the parent + // process' stack. + *(new_sp as *mut u64) = new_sp as u64 + istack_offset; + + // Update tpidr_el0 in the new process' InterruptStack + let mut interrupt_stack = &mut *(stack.as_mut_ptr().add(offset + istack_offset as usize) as *mut crate::arch::interrupt::InterruptStack); + interrupt_stack.iret.tpidr_el0 = tcb_addr; } } } - - + // Setup user TLS if let Some(mut tls) = tls_opt { // Copy TLS mapping From 28dfc0f46bf63a5500f3d3cab78f1a9c3a729f7a Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Tue, 26 Jan 2021 18:18:19 +0000 Subject: [PATCH 34/55] aarch64: Basic exception handlers --- src/arch/aarch64/interrupt/exception.rs | 47 +++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 src/arch/aarch64/interrupt/exception.rs diff --git a/src/arch/aarch64/interrupt/exception.rs b/src/arch/aarch64/interrupt/exception.rs new file mode 100644 index 0000000..01d0ef3 --- /dev/null +++ b/src/arch/aarch64/interrupt/exception.rs @@ -0,0 +1,47 @@ +use crate::{ + interrupt::stack_trace, + syscall, + syscall::flag::*, + + with_exception_stack, + exception_stack, +}; + +exception_stack!(synchronous_exception_at_el1_with_sp0, |stack| { + println!("Synchronous exception at EL1 with SP0"); + stack.dump(); + stack_trace(); + loop {} +}); + +exception_stack!(synchronous_exception_at_el1_with_spx, |stack| { + println!("Synchronous exception at EL1 with SPx"); + stack.dump(); + stack_trace(); + loop {} +}); + +exception_stack!(synchronous_exception_at_el0, |stack| { + with_exception_stack!(|stack| { + let fp; + asm!("mov {}, fp", out(reg) fp); + + let exception_code = (stack.iret.esr_el1 & (0x3f << 26)) >> 26; + if exception_code != 0b010101 { + println!("FATAL: Not an SVC induced synchronous exception"); + stack.dump(); + stack_trace(); + loop {} + } + + let scratch = &stack.scratch; + syscall::syscall(scratch.x8, scratch.x0, scratch.x1, scratch.x2, scratch.x3, scratch.x4, fp, stack) + }); +}); + +exception_stack!(unhandled_exception, |stack| { + println!("Unhandled exception"); + stack.dump(); + stack_trace(); + loop {} +}); From 9621c64991bd0508c7a7d62ca6a454dec92d14e3 Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Tue, 26 Jan 2021 11:46:09 -0700 Subject: [PATCH 35/55] Update syscall --- syscall | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syscall b/syscall index b12d582..3029f09 160000 --- a/syscall +++ b/syscall @@ -1 +1 @@ -Subproject commit b12d582d4dd805e63972f6a9c8612518d8635889 +Subproject commit 3029f094a49711ae70f50e0aa25beb894d65885c From 00723c4ac2bcfc1bd79261f153d9fa40d46069ce Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Tue, 26 Jan 2021 19:37:23 +0000 Subject: [PATCH 36/55] aarch64: Make IRQs use the exception macros --- .../aarch64/init/pre_kstart/helpers/vectors.S | 26 +- src/arch/aarch64/interrupt/irq.rs | 131 +-------- src/arch/aarch64/interrupt/syscall.rs | 262 ------------------ 3 files changed, 28 insertions(+), 391 deletions(-) diff --git a/src/arch/aarch64/init/pre_kstart/helpers/vectors.S b/src/arch/aarch64/init/pre_kstart/helpers/vectors.S index dec303b..0406506 100644 --- a/src/arch/aarch64/init/pre_kstart/helpers/vectors.S +++ b/src/arch/aarch64/init/pre_kstart/helpers/vectors.S @@ -18,21 +18,21 @@ __vec_00: .align 7 __vec_01: mov x18, #0xb0b1 - b do_exception_irq + b irq_at_el1 b __vec_01 // FIQ .align 7 __vec_02: mov x18, #0xb0b2 - b do_exception_unhandled + b unhandled_exception b __vec_02 // SError .align 7 __vec_03: mov x18, #0xb0b3 - b do_exception_unhandled + b unhandled_exception b __vec_03 // Synchronous @@ -46,21 +46,21 @@ __vec_04: .align 7 __vec_05: mov x18, #0xb0b5 - b do_exception_irq + b irq_at_el1 b __vec_05 // FIQ .align 7 __vec_06: mov x18, #0xb0b6 - b do_exception_unhandled + b unhandled_exception b __vec_06 // SError .align 7 __vec_07: mov x18, #0xb0b7 - b do_exception_unhandled + b unhandled_exception b __vec_07 // Synchronous @@ -74,49 +74,49 @@ __vec_08: .align 7 __vec_09: mov x18, #0xb0b9 - b do_exception_irq + b irq_at_el0 b __vec_09 // FIQ .align 7 __vec_10: mov x18, #0xb0ba - b do_exception_unhandled + b unhandled_exception b __vec_10 // SError .align 7 __vec_11: mov x18, #0xb0bb - b do_exception_unhandled + b unhandled_exception b __vec_11 // Synchronous .align 7 __vec_12: mov x18, #0xb0bc - b do_exception_unhandled + b unhandled_exception b __vec_12 // IRQ .align 7 __vec_13: mov x18, #0xb0bd - b do_exception_unhandled + b unhandled_exception b __vec_13 // FIQ .align 7 __vec_14: mov x18, #0xb0be - b do_exception_unhandled + b unhandled_exception b __vec_14 // SError .align 7 __vec_15: mov x18, #0xb0bf - b do_exception_unhandled + b unhandled_exception b __vec_15 .align 7 diff --git a/src/arch/aarch64/interrupt/irq.rs b/src/arch/aarch64/interrupt/irq.rs index 1ad13f6..27636e4 100644 --- a/src/arch/aarch64/interrupt/irq.rs +++ b/src/arch/aarch64/interrupt/irq.rs @@ -7,127 +7,26 @@ use crate::device::{gic}; use crate::device::serial::{COM1}; use crate::time; +use crate::{exception_stack}; + //resets to 0 in context::switch() pub static PIT_TICKS: AtomicUsize = ATOMIC_USIZE_INIT; -#[naked] -#[no_mangle] -pub unsafe extern fn do_exception_irq() { - #[inline(never)] - unsafe fn inner() { - irq_demux(); +exception_stack!(irq_at_el0, |stack| { + match gic::irq_ack() { + 30 => irq_handler_gentimer(30), + 33 => irq_handler_com1(33), + _ => panic!("irq_demux: unregistered IRQ"), } +}); - llvm_asm!("str x0, [sp, #-8]! - str x1, [sp, #-8]! - str x2, [sp, #-8]! - str x3, [sp, #-8]! - str x4, [sp, #-8]! - str x5, [sp, #-8]! - str x6, [sp, #-8]! - str x7, [sp, #-8]! - str x8, [sp, #-8]! - str x9, [sp, #-8]! - str x10, [sp, #-8]! - str x11, [sp, #-8]! - str x12, [sp, #-8]! - str x13, [sp, #-8]! - str x14, [sp, #-8]! - str x15, [sp, #-8]! - str x16, [sp, #-8]! - str x17, [sp, #-8]! - str x18, [sp, #-8]! - str x19, [sp, #-8]! - str x20, [sp, #-8]! - str x21, [sp, #-8]! - str x22, [sp, #-8]! - str x23, [sp, #-8]! - str x24, [sp, #-8]! - str x25, [sp, #-8]! - str x26, [sp, #-8]! - str x27, [sp, #-8]! - str x28, [sp, #-8]! - str x29, [sp, #-8]! - str x30, [sp, #-8]! - - mrs x18, sp_el0 - str x18, [sp, #-8]! - - mrs x18, esr_el1 - str x18, [sp, #-8]! - - mrs x18, spsr_el1 - str x18, [sp, #-8]! - - mrs x18, tpidrro_el0 - str x18, [sp, #-8]! - - mrs x18, tpidr_el0 - str x18, [sp, #-8]! - - str x18, [sp, #-8]! - - mrs x18, elr_el1 - str x18, [sp, #-8]!" - : : : : "volatile"); - - inner(); - - llvm_asm!("ldr x18, [sp], #8 - msr elr_el1, x18 - - ldr x18, [sp], #8 - - ldr x18, [sp], #8 - msr tpidr_el0, x18 - - ldr x18, [sp], #8 - msr tpidrro_el0, x18 - - ldr x18, [sp], #8 - msr spsr_el1, x18 - - ldr x18, [sp], #8 - msr esr_el1, x18 - - ldr x18, [sp], #8 - msr sp_el0, x18 - - ldr x30, [sp], #8 - ldr x29, [sp], #8 - ldr x28, [sp], #8 - ldr x27, [sp], #8 - ldr x26, [sp], #8 - ldr x25, [sp], #8 - ldr x24, [sp], #8 - ldr x23, [sp], #8 - ldr x22, [sp], #8 - ldr x21, [sp], #8 - ldr x20, [sp], #8 - ldr x19, [sp], #8 - ldr x18, [sp], #8 - ldr x17, [sp], #8 - ldr x16, [sp], #8 - ldr x15, [sp], #8 - ldr x14, [sp], #8 - ldr x13, [sp], #8 - ldr x12, [sp], #8 - ldr x11, [sp], #8 - ldr x10, [sp], #8 - ldr x9, [sp], #8 - ldr x8, [sp], #8 - ldr x7, [sp], #8 - ldr x6, [sp], #8 - ldr x5, [sp], #8 - ldr x4, [sp], #8 - ldr x3, [sp], #8 - ldr x2, [sp], #8 - ldr x1, [sp], #8 - ldr x0, [sp], #8" - : : : : "volatile"); - - llvm_asm!("eret" :::: "volatile"); -} +exception_stack!(irq_at_el1, |stack| { + match gic::irq_ack() { + 30 => irq_handler_gentimer(30), + 33 => irq_handler_com1(33), + _ => panic!("irq_demux: unregistered IRQ"), + } +}); unsafe fn trigger(irq: u32) { extern { diff --git a/src/arch/aarch64/interrupt/syscall.rs b/src/arch/aarch64/interrupt/syscall.rs index afd1416..948cd5d 100644 --- a/src/arch/aarch64/interrupt/syscall.rs +++ b/src/arch/aarch64/interrupt/syscall.rs @@ -8,232 +8,9 @@ use crate::{ #[no_mangle] pub unsafe extern fn do_exception_unhandled() {} -/* -#[naked] -#[no_mangle] -pub unsafe extern fn do_exception_unhandled() { - #[inline(never)] - unsafe fn inner(stack: &mut InterruptStack) -> usize { - println!("do_exception_unhandled: ELR: 0x{:016x}", stack.elr_el1); - loop {} - } - - llvm_asm!("str x0, [sp, #-8]! - str x1, [sp, #-8]! - str x2, [sp, #-8]! - str x3, [sp, #-8]! - str x4, [sp, #-8]! - str x5, [sp, #-8]! - str x6, [sp, #-8]! - str x7, [sp, #-8]! - str x8, [sp, #-8]! - str x9, [sp, #-8]! - str x10, [sp, #-8]! - str x11, [sp, #-8]! - str x12, [sp, #-8]! - str x13, [sp, #-8]! - str x14, [sp, #-8]! - str x15, [sp, #-8]! - str x16, [sp, #-8]! - str x17, [sp, #-8]! - str x18, [sp, #-8]! - str x19, [sp, #-8]! - str x20, [sp, #-8]! - str x21, [sp, #-8]! - str x22, [sp, #-8]! - str x23, [sp, #-8]! - str x24, [sp, #-8]! - str x25, [sp, #-8]! - str x26, [sp, #-8]! - str x27, [sp, #-8]! - str x28, [sp, #-8]! - str x29, [sp, #-8]! - str x30, [sp, #-8]! - - mrs x18, sp_el0 - str x18, [sp, #-8]! - - mrs x18, esr_el1 - str x18, [sp, #-8]! - - mrs x18, spsr_el1 - str x18, [sp, #-8]! - - mrs x18, tpidrro_el0 - str x18, [sp, #-8]! - - mrs x18, tpidr_el0 - str x18, [sp, #-8]! - - str x18, [sp, #-8]! - - mrs x18, elr_el1 - str x18, [sp, #-8]!" - : : : : "volatile"); - - let sp: usize; - llvm_asm!("" : "={sp}"(sp) : : : "volatile"); - llvm_asm!("mov x29, sp" : : : : "volatile"); - - let a = inner(&mut *(sp as *mut InterruptStack)); -} -*/ - #[no_mangle] pub unsafe extern fn do_exception_synchronous() {} -/* -#[naked] -#[no_mangle] -pub unsafe extern fn do_exception_synchronous() { - #[inline(never)] - unsafe fn inner(stack: &mut InterruptStack) -> usize { - let exception_code = (stack.esr_el1 & (0x3f << 26)) >> 26; - if exception_code != 0b010101 { - println!("do_exception_synchronous: Non-SVC!!!"); - loop {} - } else { - println!("do_exception_synchronous: SVC: x8: 0x{:016x}", stack.scratch.x8); - } - - llvm_asm!("nop": : : : "volatile"); - llvm_asm!("nop": : : : "volatile"); - llvm_asm!("nop": : : : "volatile"); - llvm_asm!("nop": : : : "volatile"); - - let fp; - llvm_asm!("" : "={fp}"(fp) : : : "volatile"); - - syscall::syscall( - stack.scratch.x8, - stack.scratch.x0, - stack.scratch.x1, - stack.scratch.x2, - stack.scratch.x3, - stack.scratch.x4, - fp, - stack - ) - } - - llvm_asm!("str x0, [sp, #-8]! - str x1, [sp, #-8]! - str x2, [sp, #-8]! - str x3, [sp, #-8]! - str x4, [sp, #-8]! - str x5, [sp, #-8]! - str x6, [sp, #-8]! - str x7, [sp, #-8]! - str x8, [sp, #-8]! - str x9, [sp, #-8]! - str x10, [sp, #-8]! - str x11, [sp, #-8]! - str x12, [sp, #-8]! - str x13, [sp, #-8]! - str x14, [sp, #-8]! - str x15, [sp, #-8]! - str x16, [sp, #-8]! - str x17, [sp, #-8]! - str x18, [sp, #-8]! - str x19, [sp, #-8]! - str x20, [sp, #-8]! - str x21, [sp, #-8]! - str x22, [sp, #-8]! - str x23, [sp, #-8]! - str x24, [sp, #-8]! - str x25, [sp, #-8]! - str x26, [sp, #-8]! - str x27, [sp, #-8]! - str x28, [sp, #-8]! - str x29, [sp, #-8]! - str x30, [sp, #-8]! - - mrs x18, sp_el0 - str x18, [sp, #-8]! - - mrs x18, esr_el1 - str x18, [sp, #-8]! - - mrs x18, spsr_el1 - str x18, [sp, #-8]! - - mrs x18, tpidrro_el0 - str x18, [sp, #-8]! - - mrs x18, tpidr_el0 - str x18, [sp, #-8]! - - str x18, [sp, #-8]! - - mrs x18, elr_el1 - str x18, [sp, #-8]!" - : : : : "volatile"); - - let sp: usize; - llvm_asm!("" : "={sp}"(sp) : : : "volatile"); - llvm_asm!("mov x29, sp" : : : : "volatile"); - - let a = inner(&mut *(sp as *mut InterruptStack)); - - llvm_asm!("" : : "{x0}"(a) : : "volatile"); - - llvm_asm!("ldr x18, [sp], #8 - msr elr_el1, x18 - - ldr x18, [sp], #8 - - ldr x18, [sp], #8 - msr tpidr_el0, x18 - - ldr x18, [sp], #8 - msr tpidrro_el0, x18 - - ldr x18, [sp], #8 - msr spsr_el1, x18 - - ldr x18, [sp], #8 - msr esr_el1, x18 - - ldr x18, [sp], #8 - msr sp_el0, x18 - - ldr x30, [sp], #8 - ldr x29, [sp], #8 - ldr x28, [sp], #8 - ldr x27, [sp], #8 - ldr x26, [sp], #8 - ldr x25, [sp], #8 - ldr x24, [sp], #8 - ldr x23, [sp], #8 - ldr x22, [sp], #8 - ldr x21, [sp], #8 - ldr x20, [sp], #8 - ldr x19, [sp], #8 - ldr x18, [sp], #8 - ldr x17, [sp], #8 - ldr x16, [sp], #8 - ldr x15, [sp], #8 - ldr x14, [sp], #8 - ldr x13, [sp], #8 - ldr x12, [sp], #8 - ldr x11, [sp], #8 - ldr x10, [sp], #8 - ldr x9, [sp], #8 - ldr x8, [sp], #8 - ldr x7, [sp], #8 - ldr x6, [sp], #8 - ldr x5, [sp], #8 - ldr x4, [sp], #8 - ldr x3, [sp], #8 - ldr x2, [sp], #8 - ldr x1, [sp], #8 - add sp, sp, #8" /* Skip over x0 - it's got the retval of inner already */ - : : : : "volatile"); - - llvm_asm!("eret" :::: "volatile"); -} -*/ - #[allow(dead_code)] #[repr(packed)] pub struct SyscallStack { @@ -285,45 +62,6 @@ macro_rules! with_exception_stack { }} } -#[no_mangle] -pub unsafe extern "C" fn __inner_syscall_instruction(stack: *mut InterruptStack) { - with_exception_stack!(|stack| { - // Set a restore point for clone - let fp; - asm!("mov {}, fp", out(reg) fp); - - let scratch = &stack.scratch; - syscall::syscall(scratch.x8, scratch.x0, scratch.x1, scratch.x2, scratch.x3, scratch.x4, fp, stack) - }); -} - -function!(syscall_instruction => { - " - nop - ", - - // Push context registers - push_preserved!(), - push_scratch!(), - push_special!(), - - // TODO: Map PTI - - // Call inner function - "mov x0, sp\n", - "bl __inner_syscall_instruction\n", - - // TODO: Unmap PTI - - // Pop context registers - pop_special!(), - pop_scratch!(), - pop_preserved!(), - - // Return - "eret\n", -}); - function!(clone_ret => { "ldp x29, x30, [sp], #16\n", "mov sp, x29\n", From 3afa0f0895e2dd7aec6bd6a020d9cde8904de628 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Wed, 27 Jan 2021 17:17:11 +0000 Subject: [PATCH 37/55] aarch64: Basic Floating-point/SIMD support --- src/context/arch/aarch64.rs | 114 ++++++++++++++++++++++++++++++++---- 1 file changed, 101 insertions(+), 13 deletions(-) diff --git a/src/context/arch/aarch64.rs b/src/context/arch/aarch64.rs index 3af07fd..338e763 100644 --- a/src/context/arch/aarch64.rs +++ b/src/context/arch/aarch64.rs @@ -2,6 +2,7 @@ use core::mem; use core::sync::atomic::{AtomicBool, AtomicUsize, ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT, Ordering}; use crate::device::cpu::registers::{control_regs, tlb}; +use crate::syscall::FloatRegisters; /// This must be used by the kernel to ensure that context switches are done atomically /// Compare and exchange this to true when beginning a context switch on any CPU @@ -17,9 +18,10 @@ pub struct Context { ttbr1_el1: usize, /* Pointer to P4 translation table for this Context */ tpidr_el0: usize, /* Pointer to TLS region for this Context */ tpidrro_el0: usize, /* Pointer to TLS (read-only) region for this Context */ - rflags: usize, + spsr_el1: usize, esr_el1: usize, - padding: usize, + fx_loadable: bool, + fx_address: usize, sp: usize, /* Stack Pointer (x31) */ lr: usize, /* Link Register (x30) */ fp: usize, /* Frame pointer Register (x29) */ @@ -46,8 +48,6 @@ pub struct Context { x8: usize, /* Indirect location Register */ } -static CONTEXT_COUNT: AtomicUsize = ATOMIC_USIZE_INIT; - impl Context { pub fn new() -> Context { Context { @@ -57,9 +57,10 @@ impl Context { ttbr1_el1: 0, tpidr_el0: 0, tpidrro_el0: 0, - rflags: 0, /* spsr_el1 */ + spsr_el1: 0, esr_el1: 0, - padding: 0xbeef0000 | CONTEXT_COUNT.fetch_add(1, Ordering::SeqCst), + fx_loadable: false, + fx_address: 0, sp: 0, lr: 0, fp: 0, @@ -95,9 +96,6 @@ impl Context { self.ttbr1_el1 } - pub fn set_fx(&mut self, _address: usize) { - } - pub fn set_page_utable(&mut self, address: usize) { self.ttbr0_el1 = address; } @@ -150,6 +148,44 @@ impl Context { value } + pub fn get_fx_regs(&self) -> Option { + if !self.fx_loadable { + return None; + } + let mut regs = unsafe { *(self.fx_address as *const FloatRegisters) }; + let mut new_st = regs.fp_simd_regs; + regs.fp_simd_regs = new_st; + Some(regs) + } + + pub fn set_fx_regs(&mut self, mut new: FloatRegisters) -> bool { + if !self.fx_loadable { + return false; + } + + { + let old = unsafe { &*(self.fx_address as *const FloatRegisters) }; + let old_st = new.fp_simd_regs; + let mut new_st = new.fp_simd_regs; + for (new_st, old_st) in new_st.iter_mut().zip(&old_st) { + *new_st = *old_st; + } + new.fp_simd_regs = new_st; + + // Make sure we don't use `old` from now on + } + + unsafe { + *(self.fx_address as *mut FloatRegisters) = new; + } + true + } + + pub fn set_fx(&mut self, address: usize) { + self.fx_address = address; + } + + pub fn dump(&self) { println!("elr_el1: 0x{:016x}", self.elr_el1); println!("sp_el0: 0x{:016x}", self.sp_el0); @@ -157,9 +193,8 @@ impl Context { println!("ttbr1_el1: 0x{:016x}", self.ttbr1_el1); println!("tpidr_el0: 0x{:016x}", self.tpidr_el0); println!("tpidrro_el0: 0x{:016x}", self.tpidrro_el0); - println!("rflags: 0x{:016x}", self.rflags); + println!("spsr_el1: 0x{:016x}", self.spsr_el1); println!("esr_el1: 0x{:016x}", self.esr_el1); - println!("padding: 0x{:016x}", self.padding); println!("sp: 0x{:016x}", self.sp); println!("lr: 0x{:016x}", self.lr); println!("fp: 0x{:016x}", self.fp); @@ -190,6 +225,59 @@ impl Context { #[inline(never)] #[naked] pub unsafe fn switch_to(&mut self, next: &mut Context) { + let mut float_regs = self.fx_address as *mut FloatRegisters; + asm!( + "stp q0, q1, [{0}, #16 * 0]", + "stp q2, q3, [{0}, #16 * 2]", + "stp q4, q5, [{0}, #16 * 4]", + "stp q6, q7, [{0}, #16 * 6]", + "stp q8, q9, [{0}, #16 * 8]", + "stp q10, q11, [{0}, #16 * 10]", + "stp q12, q13, [{0}, #16 * 12]", + "stp q14, q15, [{0}, #16 * 14]", + "stp q16, q17, [{0}, #16 * 16]", + "stp q18, q19, [{0}, #16 * 18]", + "stp q20, q21, [{0}, #16 * 20]", + "stp q22, q23, [{0}, #16 * 22]", + "stp q24, q25, [{0}, #16 * 24]", + "stp q26, q27, [{0}, #16 * 26]", + "stp q28, q29, [{0}, #16 * 28]", + "stp q30, q31, [{0}, #16 * 30]", + "mrs {1}, fpcr", + "mrs {2}, fpsr", + in(reg) (&(*(float_regs)).fp_simd_regs), + out(reg) ((*(float_regs)).fpcr), + out(reg) ((*(float_regs)).fpsr) + ); + + self.fx_loadable = true; + + if next.fx_loadable { + asm!( + "ldp q0, q1, [{0}, #16 * 0]", + "ldp q2, q3, [{0}, #16 * 2]", + "ldp q4, q5, [{0}, #16 * 4]", + "ldp q6, q7, [{0}, #16 * 6]", + "ldp q8, q9, [{0}, #16 * 8]", + "ldp q10, q11, [{0}, #16 * 10]", + "ldp q12, q13, [{0}, #16 * 12]", + "ldp q14, q15, [{0}, #16 * 14]", + "ldp q16, q17, [{0}, #16 * 16]", + "ldp q18, q19, [{0}, #16 * 18]", + "ldp q20, q21, [{0}, #16 * 20]", + "ldp q22, q23, [{0}, #16 * 22]", + "ldp q24, q25, [{0}, #16 * 24]", + "ldp q26, q27, [{0}, #16 * 26]", + "ldp q28, q29, [{0}, #16 * 28]", + "ldp q30, q31, [{0}, #16 * 30]", + "msr fpcr, {1}", + "msr fpsr, {2}", + in(reg) (&(*(float_regs)).fp_simd_regs), + in(reg) ((*(float_regs)).fpcr), + in(reg) ((*(float_regs)).fpsr) + ); + } + self.ttbr0_el1 = control_regs::ttbr0_el1() as usize; if next.ttbr0_el1 != self.ttbr0_el1 { control_regs::ttbr0_el1_write(next.ttbr0_el1 as u64); @@ -277,8 +365,8 @@ impl Context { llvm_asm!("mrs $0, tpidrro_el0" : "=r"(self.tpidrro_el0) : : "memory" : "volatile"); llvm_asm!("msr tpidrro_el0, $0" : : "r"(next.tpidrro_el0) : "memory" : "volatile"); - llvm_asm!("mrs $0, spsr_el1" : "=r"(self.rflags) : : "memory" : "volatile"); - llvm_asm!("msr spsr_el1, $0" : : "r"(next.rflags) : "memory" : "volatile"); + llvm_asm!("mrs $0, spsr_el1" : "=r"(self.spsr_el1) : : "memory" : "volatile"); + llvm_asm!("msr spsr_el1, $0" : : "r"(next.spsr_el1) : "memory" : "volatile"); llvm_asm!("mrs $0, esr_el1" : "=r"(self.esr_el1) : : "memory" : "volatile"); llvm_asm!("msr esr_el1, $0" : : "r"(next.esr_el1) : "memory" : "volatile"); From 1462fe8638331ec26436c08905325d21c645b14a Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Wed, 27 Jan 2021 17:17:59 +0000 Subject: [PATCH 38/55] aarch64: context: Align with x86_64 code --- src/context/arch/aarch64.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/context/arch/aarch64.rs b/src/context/arch/aarch64.rs index 338e763..a8af274 100644 --- a/src/context/arch/aarch64.rs +++ b/src/context/arch/aarch64.rs @@ -1,5 +1,5 @@ use core::mem; -use core::sync::atomic::{AtomicBool, AtomicUsize, ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT, Ordering}; +use core::sync::atomic::{AtomicBool, Ordering}; use crate::device::cpu::registers::{control_regs, tlb}; use crate::syscall::FloatRegisters; @@ -8,7 +8,7 @@ use crate::syscall::FloatRegisters; /// Compare and exchange this to true when beginning a context switch on any CPU /// The `Context::switch_to` function will set it back to false, allowing other CPU's to switch /// This must be done, as no locks can be held on the stack during switch -pub static CONTEXT_SWITCH_LOCK: AtomicBool = ATOMIC_BOOL_INIT; +pub static CONTEXT_SWITCH_LOCK: AtomicBool = AtomicBool::new(false); #[derive(Clone, Debug)] pub struct Context { From 4dbfaf3ec108ca07b00ef6296d4fb1e63efca29b Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Wed, 27 Jan 2021 17:19:37 +0000 Subject: [PATCH 39/55] Nit: Add missing close brace in code comment --- src/syscall/process.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 5e7dd27..df1b24d 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -519,7 +519,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { // Update the pointer to the InterruptStack to reflect the new process' // stack. (Without this the pointer would be InterruptStack on the parent - // process' stack. + // process' stack). *(new_sp as *mut u64) = new_sp as u64 + istack_offset; // Update tpidr_el0 in the new process' InterruptStack From a06636b77fd0fb335512164bf7b451d2e1247f43 Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Wed, 27 Jan 2021 10:44:52 -0700 Subject: [PATCH 40/55] Update syscall --- syscall | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syscall b/syscall index 3029f09..bfdd0bb 160000 --- a/syscall +++ b/syscall @@ -1 +1 @@ -Subproject commit 3029f094a49711ae70f50e0aa25beb894d65885c +Subproject commit bfdd0bb8d64c078192e44e83719a8aa4b3bf85a1 From afca6ab31ca8fcf27557c45672afb5a125bdb274 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Thu, 28 Jan 2021 16:50:07 +0000 Subject: [PATCH 41/55] aarch64: Fix incorrect FP save/restore --- src/context/arch/aarch64.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/context/arch/aarch64.rs b/src/context/arch/aarch64.rs index a8af274..2b850a7 100644 --- a/src/context/arch/aarch64.rs +++ b/src/context/arch/aarch64.rs @@ -225,7 +225,7 @@ impl Context { #[inline(never)] #[naked] pub unsafe fn switch_to(&mut self, next: &mut Context) { - let mut float_regs = self.fx_address as *mut FloatRegisters; + let mut float_regs = &mut *(self.fx_address as *mut FloatRegisters); asm!( "stp q0, q1, [{0}, #16 * 0]", "stp q2, q3, [{0}, #16 * 2]", @@ -245,14 +245,15 @@ impl Context { "stp q30, q31, [{0}, #16 * 30]", "mrs {1}, fpcr", "mrs {2}, fpsr", - in(reg) (&(*(float_regs)).fp_simd_regs), - out(reg) ((*(float_regs)).fpcr), - out(reg) ((*(float_regs)).fpsr) + in(reg) &mut float_regs.fp_simd_regs, + out(reg) float_regs.fpcr, + out(reg) float_regs.fpsr ); self.fx_loadable = true; if next.fx_loadable { + let mut float_regs = &mut *(next.fx_address as *mut FloatRegisters); asm!( "ldp q0, q1, [{0}, #16 * 0]", "ldp q2, q3, [{0}, #16 * 2]", @@ -272,9 +273,9 @@ impl Context { "ldp q30, q31, [{0}, #16 * 30]", "msr fpcr, {1}", "msr fpsr, {2}", - in(reg) (&(*(float_regs)).fp_simd_regs), - in(reg) ((*(float_regs)).fpcr), - in(reg) ((*(float_regs)).fpsr) + in(reg) &mut float_regs.fp_simd_regs, + in(reg) float_regs.fpcr, + in(reg) float_regs.fpsr ); } From 1e10cac3e1d951258920a6d7aed36343fe162e64 Mon Sep 17 00:00:00 2001 From: Robin Randhawa Date: Thu, 28 Jan 2021 16:51:50 +0000 Subject: [PATCH 42/55] aarch64: Increase storage for FP context to consider AArch64's needs Brute-forcing this at present. Would be better to wrap this conditionally for the architecture. --- src/context/list.rs | 2 +- src/context/mod.rs | 2 +- src/syscall/process.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/context/list.rs b/src/context/list.rs index 47c6c9f..704642e 100644 --- a/src/context/list.rs +++ b/src/context/list.rs @@ -75,7 +75,7 @@ impl ContextList { let context_lock = self.new_context()?; { let mut context = context_lock.write(); - let mut fx = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) }; + let mut fx = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(1024, 16)) as *mut [u8; 1024]) }; for b in fx.iter_mut() { *b = 0; } diff --git a/src/context/mod.rs b/src/context/mod.rs index 6c29328..a19c428 100644 --- a/src/context/mod.rs +++ b/src/context/mod.rs @@ -55,7 +55,7 @@ pub fn init() { let mut contexts = contexts_mut(); let context_lock = contexts.new_context().expect("could not initialize first context"); let mut context = context_lock.write(); - let mut fx = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) }; + let mut fx = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(1024, 16)) as *mut [u8; 1024]) }; for b in fx.iter_mut() { *b = 0; } diff --git a/src/syscall/process.rs b/src/syscall/process.rs index df1b24d..9adad28 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -87,7 +87,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { arch = context.arch.clone(); if let Some(ref fx) = context.kfx { - let mut new_fx = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) }; + let mut new_fx = unsafe { Box::from_raw(crate::ALLOCATOR.alloc(Layout::from_size_align_unchecked(1024, 16)) as *mut [u8; 1024]) }; for (new_b, b) in new_fx.iter_mut().zip(fx.iter()) { *new_b = *b; } From b26c3e0ae9602bd0c27f8b1899f0db8ab027fd52 Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Sat, 13 Feb 2021 11:10:21 -0700 Subject: [PATCH 43/55] Make context name a RwLock --- src/arch/x86_64/interrupt/syscall.rs | 2 +- src/context/context.rs | 8 ++++---- src/context/switch.rs | 2 +- src/lib.rs | 2 +- src/scheme/proc.rs | 2 +- src/scheme/sys/block.rs | 2 +- src/scheme/sys/context.rs | 2 +- src/scheme/sys/exe.rs | 2 +- src/scheme/sys/iostat.rs | 2 +- src/scheme/sys/syscall.rs | 2 +- src/syscall/mod.rs | 10 +++++----- src/syscall/process.rs | 10 +++++----- 12 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/arch/x86_64/interrupt/syscall.rs b/src/arch/x86_64/interrupt/syscall.rs index 847d3af..82beba8 100644 --- a/src/arch/x86_64/interrupt/syscall.rs +++ b/src/arch/x86_64/interrupt/syscall.rs @@ -95,7 +95,7 @@ interrupt_stack!(syscall, |stack| { let context = contexts.current(); if let Some(current) = context { let current = current.read(); - let name = current.name.lock(); + let name = current.name.read(); println!("Warning: Context {} used deprecated `int 0x80` construct", core::str::from_utf8(&name).unwrap_or("(invalid utf8)")); } else { println!("Warning: Unknown context used deprecated `int 0x80` construct"); diff --git a/src/context/context.rs b/src/context/context.rs index 1304613..15163e5 100644 --- a/src/context/context.rs +++ b/src/context/context.rs @@ -5,7 +5,7 @@ use alloc::collections::VecDeque; use core::alloc::{GlobalAlloc, Layout}; use core::cmp::Ordering; use core::mem; -use spin::Mutex; +use spin::{Mutex, RwLock}; use crate::arch::{interrupt::InterruptStack, paging::PAGE_SIZE}; use crate::common::unique::Unique; @@ -120,7 +120,7 @@ pub struct ContextSnapshot { impl ContextSnapshot { //TODO: Should this accept &mut Context to ensure name/files will not change? pub fn new(context: &Context) -> Self { - let name = context.name.lock().clone(); + let name = context.name.read().clone(); let mut files = Vec::new(); for descriptor_opt in context.files.lock().iter() { let description = if let Some(descriptor) = descriptor_opt { @@ -230,7 +230,7 @@ pub struct Context { /// User grants pub grants: Arc>, /// The name of the context - pub name: Arc>>, + pub name: Arc>>, /// The current working directory pub cwd: Arc>>, /// The open files in the scheme @@ -287,7 +287,7 @@ impl Context { sigstack: None, tls: None, grants: Arc::new(Mutex::new(UserGrants::default())), - name: Arc::new(Mutex::new(Vec::new().into_boxed_slice())), + name: Arc::new(RwLock::new(Vec::new().into_boxed_slice())), cwd: Arc::new(Mutex::new(Vec::new())), files: Arc::new(Mutex::new(Vec::new())), actions: Arc::new(Mutex::new(vec![( diff --git a/src/context/switch.rs b/src/context/switch.rs index 634aff3..2423d55 100644 --- a/src/context/switch.rs +++ b/src/context/switch.rs @@ -13,7 +13,7 @@ unsafe fn update(context: &mut Context, cpu_id: usize) { // Take ownership if not already owned if context.cpu_id == None { context.cpu_id = Some(cpu_id); - // println!("{}: take {} {}", cpu_id, context.id, ::core::str::from_utf8_unchecked(&context.name.lock())); + // println!("{}: take {} {}", cpu_id, context.id, ::core::str::from_utf8_unchecked(&context.name.read())); } // Restore from signal, must only be done from another context to avoid overwriting the stack! diff --git a/src/lib.rs b/src/lib.rs index aad157f..a18fb27 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -279,7 +279,7 @@ pub extern fn ksignal(signal: usize) { let contexts = context::contexts(); if let Some(context_lock) = contexts.current() { let context = context_lock.read(); - info!("NAME {}", unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }); + info!("NAME {}", unsafe { ::core::str::from_utf8_unchecked(&context.name.read()) }); } } diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs index 68dbf81..99d19e3 100644 --- a/src/scheme/proc.rs +++ b/src/scheme/proc.rs @@ -236,7 +236,7 @@ impl Scheme for ProcScheme { data = match operation { Operation::Memory => OperationData::Memory(MemData::default()), Operation::Trace => OperationData::Trace(TraceData::default()), - Operation::Static(_) => OperationData::Static(StaticData::new(target.name.lock().clone())), + Operation::Static(_) => OperationData::Static(StaticData::new(target.name.read().clone())), _ => OperationData::Other, }; diff --git a/src/scheme/sys/block.rs b/src/scheme/sys/block.rs index 2143016..ff9d094 100644 --- a/src/scheme/sys/block.rs +++ b/src/scheme/sys/block.rs @@ -15,7 +15,7 @@ pub fn resource() -> Result> { let contexts = context::contexts(); for (id, context_lock) in contexts.iter() { let context = context_lock.read(); - rows.push((*id, context.name.lock().clone(), context.status_reason)); + rows.push((*id, context.name.read().clone(), context.status_reason)); } } diff --git a/src/scheme/sys/context.rs b/src/scheme/sys/context.rs index 2c94960..df457c8 100644 --- a/src/scheme/sys/context.rs +++ b/src/scheme/sys/context.rs @@ -107,7 +107,7 @@ pub fn resource() -> Result> { format!("{} B", memory) }; - let name_bytes = context.name.lock(); + let name_bytes = context.name.read(); let name = str::from_utf8(&name_bytes).unwrap_or(""); string.push_str(&format!("{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<8}{:<8}{}\n", diff --git a/src/scheme/sys/exe.rs b/src/scheme/sys/exe.rs index e4b1a6a..69e2261 100644 --- a/src/scheme/sys/exe.rs +++ b/src/scheme/sys/exe.rs @@ -8,7 +8,7 @@ pub fn resource() -> Result> { let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); - let name = context.name.lock(); + let name = context.name.read(); name.clone().into_vec() }; Ok(name) diff --git a/src/scheme/sys/iostat.rs b/src/scheme/sys/iostat.rs index e066387..3aac16e 100644 --- a/src/scheme/sys/iostat.rs +++ b/src/scheme/sys/iostat.rs @@ -16,7 +16,7 @@ pub fn resource() -> Result> { let contexts = context::contexts(); for (id, context_lock) in contexts.iter() { let context = context_lock.read(); - rows.push((*id, context.name.lock().clone(), context.files.lock().clone())); + rows.push((*id, context.name.read().clone(), context.files.lock().clone())); } } diff --git a/src/scheme/sys/syscall.rs b/src/scheme/sys/syscall.rs index 4d2eea4..c458d4e 100644 --- a/src/scheme/sys/syscall.rs +++ b/src/scheme/sys/syscall.rs @@ -16,7 +16,7 @@ pub fn resource() -> Result> { let contexts = context::contexts(); for (id, context_lock) in contexts.iter() { let context = context_lock.read(); - rows.push((*id, context.name.lock().clone(), context.syscall.clone())); + rows.push((*id, context.name.read().clone(), context.syscall.clone())); } } diff --git a/src/syscall/mod.rs b/src/syscall/mod.rs index 779ac68..eec7300 100644 --- a/src/syscall/mod.rs +++ b/src/syscall/mod.rs @@ -82,7 +82,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u let contexts = crate::context::contexts(); let current = contexts.current().unwrap(); let current = current.read(); - let name = current.name.lock(); + let name = current.name.read(); println!("{:?} using deprecated fmap(...) call", core::str::from_utf8(&name)); } file_op(a, fd, c, d) @@ -92,7 +92,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u let contexts = crate::context::contexts(); let current = contexts.current().unwrap(); let current = current.read(); - let name = current.name.lock(); + let name = current.name.read(); println!("{:?} using deprecated funmap(...) call", core::str::from_utf8(&name)); } funmap_old(b) @@ -208,7 +208,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u let contexts = crate::context::contexts(); if let Some(context_lock) = contexts.current() { let context = context_lock.read(); - let name_raw = context.name.lock(); + let name_raw = context.name.read(); let name = unsafe { core::str::from_utf8_unchecked(&name_raw) }; if name.contains("redoxfs") { if a == SYS_CLOCK_GETTIME || a == SYS_YIELD { @@ -230,7 +230,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u let contexts = crate::context::contexts(); if let Some(context_lock) = contexts.current() { let context = context_lock.read(); - print!("{} ({}): ", unsafe { core::str::from_utf8_unchecked(&context.name.lock()) }, context.id.into()); + print!("{} ({}): ", unsafe { core::str::from_utf8_unchecked(&context.name.read()) }, context.id.into()); } println!("{}", debug::format_call(a, b, c, d, e, f)); @@ -265,7 +265,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u let contexts = crate::context::contexts(); if let Some(context_lock) = contexts.current() { let context = context_lock.read(); - print!("{} ({}): ", unsafe { core::str::from_utf8_unchecked(&context.name.lock()) }, context.id.into()); + print!("{} ({}): ", unsafe { core::str::from_utf8_unchecked(&context.name.read()) }, context.id.into()); } print!("{} = ", debug::format_call(a, b, c, d, e, f)); diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 9adad28..b034358 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -5,7 +5,7 @@ use alloc::vec::Vec; use core::alloc::{GlobalAlloc, Layout}; use core::ops::DerefMut; use core::{intrinsics, mem}; -use spin::Mutex; +use spin::{RwLock, Mutex}; use crate::context::file::FileDescriptor; use crate::context::{ContextId, WaitpidKey}; @@ -244,7 +244,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { if flags.contains(CLONE_VM) { name = Arc::clone(&context.name); } else { - name = Arc::new(Mutex::new(context.name.lock().clone())); + name = Arc::new(RwLock::new(context.name.read().clone())); } if flags.contains(CLONE_FS) { @@ -608,7 +608,7 @@ fn empty(context: &mut context::Context, reaping: bool) { let grants = mem::replace(&mut *grants, UserGrants::default()); for grant in grants.inner.into_iter() { if reaping { - println!("{}: {}: Grant should not exist: {:?}", context.id.into(), unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }, grant); + println!("{}: {}: Grant should not exist: {:?}", context.id.into(), unsafe { ::core::str::from_utf8_unchecked(&context.name.read()) }, grant); let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) }; let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET))); @@ -652,7 +652,7 @@ fn fexec_noreturn( ptrace::regs_for(&context).map(|s| s.is_singlestep()).unwrap_or(false) }; - context.name = Arc::new(Mutex::new(name)); + context.name = Arc::new(RwLock::new(name)); empty(&mut context, false); @@ -974,7 +974,7 @@ pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]> println!( "{}: {}: fexec failed to execute {}: {}", context.id.into(), - unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }, + unsafe { ::core::str::from_utf8_unchecked(&context.name.read()) }, fd.into(), err ); From a9bee0bbdcdef4ced83f6afff9c32d539923dc5c Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Sat, 13 Feb 2021 12:16:47 -0700 Subject: [PATCH 44/55] Require UTF-8 for context name --- src/arch/x86_64/interrupt/syscall.rs | 3 +- src/context/context.rs | 25 ++++++++++------- src/context/switch.rs | 2 +- src/lib.rs | 2 +- src/scheme/proc.rs | 4 ++- src/scheme/sys/block.rs | 3 +- src/scheme/sys/context.rs | 6 +--- src/scheme/sys/exe.rs | 8 ++++-- src/scheme/sys/iostat.rs | 2 +- src/scheme/sys/syscall.rs | 3 +- src/syscall/mod.rs | 13 ++++----- src/syscall/process.rs | 42 +++++++++++++++++----------- 12 files changed, 62 insertions(+), 51 deletions(-) diff --git a/src/arch/x86_64/interrupt/syscall.rs b/src/arch/x86_64/interrupt/syscall.rs index 82beba8..9c5364e 100644 --- a/src/arch/x86_64/interrupt/syscall.rs +++ b/src/arch/x86_64/interrupt/syscall.rs @@ -95,8 +95,7 @@ interrupt_stack!(syscall, |stack| { let context = contexts.current(); if let Some(current) = context { let current = current.read(); - let name = current.name.read(); - println!("Warning: Context {} used deprecated `int 0x80` construct", core::str::from_utf8(&name).unwrap_or("(invalid utf8)")); + println!("Warning: Context {} used deprecated `int 0x80` construct", *current.name.read()); } else { println!("Warning: Unknown context used deprecated `int 0x80` construct"); } diff --git a/src/context/context.rs b/src/context/context.rs index 15163e5..f2da71a 100644 --- a/src/context/context.rs +++ b/src/context/context.rs @@ -1,10 +1,15 @@ -use alloc::sync::Arc; -use alloc::boxed::Box; -use alloc::vec::Vec; -use alloc::collections::VecDeque; -use core::alloc::{GlobalAlloc, Layout}; -use core::cmp::Ordering; -use core::mem; +use alloc::{ + boxed::Box, + collections::VecDeque, + string::String, + sync::Arc, + vec::Vec, +}; +use core::{ + alloc::{GlobalAlloc, Layout}, + cmp::Ordering, + mem, +}; use spin::{Mutex, RwLock}; use crate::arch::{interrupt::InterruptStack, paging::PAGE_SIZE}; @@ -112,7 +117,7 @@ pub struct ContextSnapshot { pub syscall: Option<(usize, usize, usize, usize, usize, usize)>, // Clone fields //TODO: is there a faster way than allocation? - pub name: Box<[u8]>, + pub name: Box, pub files: Vec>, // pub cwd: Box<[u8]>, } @@ -230,7 +235,7 @@ pub struct Context { /// User grants pub grants: Arc>, /// The name of the context - pub name: Arc>>, + pub name: Arc>>, /// The current working directory pub cwd: Arc>>, /// The open files in the scheme @@ -287,7 +292,7 @@ impl Context { sigstack: None, tls: None, grants: Arc::new(Mutex::new(UserGrants::default())), - name: Arc::new(RwLock::new(Vec::new().into_boxed_slice())), + name: Arc::new(RwLock::new(String::new().into_boxed_str())), cwd: Arc::new(Mutex::new(Vec::new())), files: Arc::new(Mutex::new(Vec::new())), actions: Arc::new(Mutex::new(vec![( diff --git a/src/context/switch.rs b/src/context/switch.rs index 2423d55..67b0832 100644 --- a/src/context/switch.rs +++ b/src/context/switch.rs @@ -13,7 +13,7 @@ unsafe fn update(context: &mut Context, cpu_id: usize) { // Take ownership if not already owned if context.cpu_id == None { context.cpu_id = Some(cpu_id); - // println!("{}: take {} {}", cpu_id, context.id, ::core::str::from_utf8_unchecked(&context.name.read())); + // println!("{}: take {} {}", cpu_id, context.id, *context.name.read()); } // Restore from signal, must only be done from another context to avoid overwriting the stack! diff --git a/src/lib.rs b/src/lib.rs index a18fb27..ec36059 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -279,7 +279,7 @@ pub extern fn ksignal(signal: usize) { let contexts = context::contexts(); if let Some(context_lock) = contexts.current() { let context = context_lock.read(); - info!("NAME {}", unsafe { ::core::str::from_utf8_unchecked(&context.name.read()) }); + info!("NAME {}", *context.name.read()); } } diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs index 99d19e3..60a7395 100644 --- a/src/scheme/proc.rs +++ b/src/scheme/proc.rs @@ -236,7 +236,9 @@ impl Scheme for ProcScheme { data = match operation { Operation::Memory => OperationData::Memory(MemData::default()), Operation::Trace => OperationData::Trace(TraceData::default()), - Operation::Static(_) => OperationData::Static(StaticData::new(target.name.read().clone())), + Operation::Static(_) => OperationData::Static(StaticData::new( + target.name.read().clone().into() + )), _ => OperationData::Other, }; diff --git a/src/scheme/sys/block.rs b/src/scheme/sys/block.rs index ff9d094..2d58e16 100644 --- a/src/scheme/sys/block.rs +++ b/src/scheme/sys/block.rs @@ -1,7 +1,6 @@ use alloc::string::String; use alloc::vec::Vec; use core::fmt::Write; -use core::str; use crate::context; use crate::syscall::error::Result; @@ -21,7 +20,7 @@ pub fn resource() -> Result> { for row in rows.iter() { let id: usize = row.0.into(); - let name = str::from_utf8(&row.1).unwrap_or("."); + let name = &row.1; let _ = writeln!(string, "{}: {}", id, name); diff --git a/src/scheme/sys/context.rs b/src/scheme/sys/context.rs index df457c8..26e3ae0 100644 --- a/src/scheme/sys/context.rs +++ b/src/scheme/sys/context.rs @@ -1,6 +1,5 @@ use alloc::string::String; use alloc::vec::Vec; -use core::str; use crate::context; use crate::syscall::error::Result; @@ -107,9 +106,6 @@ pub fn resource() -> Result> { format!("{} B", memory) }; - let name_bytes = context.name.read(); - let name = str::from_utf8(&name_bytes).unwrap_or(""); - string.push_str(&format!("{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<8}{:<8}{}\n", context.id.into(), context.pgid.into(), @@ -124,7 +120,7 @@ pub fn resource() -> Result> { cpu_string, ticks_string, memory_string, - name)); + *context.name.read())); } } diff --git a/src/scheme/sys/exe.rs b/src/scheme/sys/exe.rs index 69e2261..43aaf35 100644 --- a/src/scheme/sys/exe.rs +++ b/src/scheme/sys/exe.rs @@ -1,4 +1,7 @@ -use alloc::vec::Vec; +use alloc::{ + boxed::Box, + vec::Vec, +}; use crate::context; use crate::syscall::error::{Error, ESRCH, Result}; @@ -9,7 +12,8 @@ pub fn resource() -> Result> { let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); let name = context.name.read(); - name.clone().into_vec() + let name_bytes: Box<[u8]> = name.clone().into(); + name_bytes.into_vec() }; Ok(name) } diff --git a/src/scheme/sys/iostat.rs b/src/scheme/sys/iostat.rs index 3aac16e..6a73227 100644 --- a/src/scheme/sys/iostat.rs +++ b/src/scheme/sys/iostat.rs @@ -22,7 +22,7 @@ pub fn resource() -> Result> { for row in rows.iter() { let id: usize = row.0.into(); - let name = str::from_utf8(&row.1).unwrap_or("."); + let name = &row.1; let _ = writeln!(string, "{}: {}", id, name); for (fd, f) in row.2.iter().enumerate() { diff --git a/src/scheme/sys/syscall.rs b/src/scheme/sys/syscall.rs index c458d4e..a199998 100644 --- a/src/scheme/sys/syscall.rs +++ b/src/scheme/sys/syscall.rs @@ -1,7 +1,6 @@ use alloc::string::String; use alloc::vec::Vec; use core::fmt::Write; -use core::str; use crate::context; use crate::syscall; @@ -22,7 +21,7 @@ pub fn resource() -> Result> { for row in rows.iter() { let id: usize = row.0.into(); - let name = str::from_utf8(&row.1).unwrap_or("."); + let name = &row.1; let _ = writeln!(string, "{}: {}", id, name); diff --git a/src/syscall/mod.rs b/src/syscall/mod.rs index eec7300..9f8139b 100644 --- a/src/syscall/mod.rs +++ b/src/syscall/mod.rs @@ -82,8 +82,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u let contexts = crate::context::contexts(); let current = contexts.current().unwrap(); let current = current.read(); - let name = current.name.read(); - println!("{:?} using deprecated fmap(...) call", core::str::from_utf8(&name)); + println!("{:?} using deprecated fmap(...) call", *current.name.read()); } file_op(a, fd, c, d) }, @@ -92,8 +91,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u let contexts = crate::context::contexts(); let current = contexts.current().unwrap(); let current = current.read(); - let name = current.name.read(); - println!("{:?} using deprecated funmap(...) call", core::str::from_utf8(&name)); + println!("{:?} using deprecated funmap(...) call", *current.name.read()); } funmap_old(b) }, @@ -208,8 +206,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u let contexts = crate::context::contexts(); if let Some(context_lock) = contexts.current() { let context = context_lock.read(); - let name_raw = context.name.read(); - let name = unsafe { core::str::from_utf8_unchecked(&name_raw) }; + let name = context.name.read(); if name.contains("redoxfs") { if a == SYS_CLOCK_GETTIME || a == SYS_YIELD { false @@ -230,7 +227,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u let contexts = crate::context::contexts(); if let Some(context_lock) = contexts.current() { let context = context_lock.read(); - print!("{} ({}): ", unsafe { core::str::from_utf8_unchecked(&context.name.read()) }, context.id.into()); + print!("{} ({}): ", *context.name.read(), context.id.into()); } println!("{}", debug::format_call(a, b, c, d, e, f)); @@ -265,7 +262,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u let contexts = crate::context::contexts(); if let Some(context_lock) = contexts.current() { let context = context_lock.read(); - print!("{} ({}): ", unsafe { core::str::from_utf8_unchecked(&context.name.read()) }, context.id.into()); + print!("{} ({}): ", *context.name.read(), context.id.into()); } print!("{} = ", debug::format_call(a, b, c, d, e, f)); diff --git a/src/syscall/process.rs b/src/syscall/process.rs index b034358..6b15779 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -1,7 +1,10 @@ -use alloc::boxed::Box; -use alloc::collections::BTreeSet; -use alloc::sync::Arc; -use alloc::vec::Vec; +use alloc::{ + boxed::Box, + collections::BTreeSet, + string::String, + sync::Arc, + vec::Vec, +}; use core::alloc::{GlobalAlloc, Layout}; use core::ops::DerefMut; use core::{intrinsics, mem}; @@ -608,7 +611,7 @@ fn empty(context: &mut context::Context, reaping: bool) { let grants = mem::replace(&mut *grants, UserGrants::default()); for grant in grants.inner.into_iter() { if reaping { - println!("{}: {}: Grant should not exist: {:?}", context.id.into(), unsafe { ::core::str::from_utf8_unchecked(&context.name.read()) }, grant); + println!("{}: {}: Grant should not exist: {:?}", context.id.into(), *context.name.read(), grant); let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) }; let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET))); @@ -632,7 +635,7 @@ impl Drop for ExecFile { fn fexec_noreturn( setuid: Option, setgid: Option, - name: Box<[u8]>, + name: Box, data: Box<[u8]>, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]>]>, @@ -893,7 +896,7 @@ fn fexec_noreturn( unsafe { usermode(entry, sp, 0, singlestep) } } -pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]>]>, name_override_opt: Option>, auxv: Option>) -> Result { +pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]>]>, name_override_opt: Option>, auxv: Option>) -> Result { let (uid, gid) = { let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; @@ -902,7 +905,7 @@ pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]> }; let mut stat: Stat; - let mut name: Vec; + let name: String; let mut data: Vec; { let file = ExecFile(fd); @@ -926,11 +929,18 @@ pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]> } if let Some(name_override) = name_override_opt { - name = Vec::from(name_override); + name = String::from(name_override); } else { - name = vec![0; 4096]; - let len = syscall::file_op_mut_slice(syscall::number::SYS_FPATH, file.0, &mut name)?; - name.truncate(len); + let mut name_bytes = vec![0; 4096]; + let len = syscall::file_op_mut_slice(syscall::number::SYS_FPATH, file.0, &mut name_bytes)?; + name_bytes.truncate(len); + name = match String::from_utf8(name_bytes) { + Ok(ok) => ok, + Err(_err) => { + //TODO: print error? + return Err(Error::new(EINVAL)); + } + }; } //TODO: Only read elf header, not entire file. Then read required segments @@ -974,7 +984,7 @@ pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]> println!( "{}: {}: fexec failed to execute {}: {}", context.id.into(), - unsafe { ::core::str::from_utf8_unchecked(&context.name.read()) }, + *context.name.read(), fd.into(), err ); @@ -1026,8 +1036,8 @@ pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]> let mut args_vec = Vec::from(args); //TODO: pass file handle in auxv - let name_override = name.into_boxed_slice(); - args_vec[0] = name_override.clone(); + let name_override = name.into_boxed_str(); + args_vec[0] = name_override.clone().into(); // Drop variables, since fexec_kernel probably won't return drop(elf); @@ -1060,7 +1070,7 @@ pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]> // This is the point of no return, quite literaly. Any checks for validity need // to be done before, and appropriate errors returned. Otherwise, we have nothing // to return to. - fexec_noreturn(setuid, setgid, name.into_boxed_slice(), data.into_boxed_slice(), args, vars, auxv.into_boxed_slice()); + fexec_noreturn(setuid, setgid, name.into_boxed_str(), data.into_boxed_slice(), args, vars, auxv.into_boxed_slice()); } pub fn fexec(fd: FileHandle, arg_ptrs: &[[usize; 2]], var_ptrs: &[[usize; 2]]) -> Result { From c7aba8fdfdd88a5849f48900960d27bb7ca9f249 Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Sat, 13 Feb 2021 12:24:19 -0700 Subject: [PATCH 45/55] Switch Context::cwd to using RwLock --- src/context/context.rs | 6 +++--- src/syscall/fs.rs | 4 ++-- src/syscall/process.rs | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/context/context.rs b/src/context/context.rs index f2da71a..2068d39 100644 --- a/src/context/context.rs +++ b/src/context/context.rs @@ -237,7 +237,7 @@ pub struct Context { /// The name of the context pub name: Arc>>, /// The current working directory - pub cwd: Arc>>, + pub cwd: Arc>>, /// The open files in the scheme pub files: Arc>>>, /// Signal actions @@ -293,7 +293,7 @@ impl Context { tls: None, grants: Arc::new(Mutex::new(UserGrants::default())), name: Arc::new(RwLock::new(String::new().into_boxed_str())), - cwd: Arc::new(Mutex::new(Vec::new())), + cwd: Arc::new(RwLock::new(Vec::new())), files: Arc::new(Mutex::new(Vec::new())), actions: Arc::new(Mutex::new(vec![( SigAction { @@ -315,7 +315,7 @@ impl Context { /// "bar:/foo" will be used directly, as it is already absolute pub fn canonicalize(&self, path: &[u8]) -> Vec { let mut canon = if path.iter().position(|&b| b == b':').is_none() { - let cwd = self.cwd.lock(); + let cwd = self.cwd.read(); let mut canon = if !path.starts_with(b"/") { let mut c = cwd.clone(); diff --git a/src/syscall/fs.rs b/src/syscall/fs.rs index 197442d..780f869 100644 --- a/src/syscall/fs.rs +++ b/src/syscall/fs.rs @@ -66,7 +66,7 @@ pub fn chdir(path: &[u8]) -> Result { let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); let canonical = context.canonicalize(path); - *context.cwd.lock() = canonical; + *context.cwd.write() = canonical; Ok(0) } else { Err(Error::new(ENOTDIR)) @@ -78,7 +78,7 @@ pub fn getcwd(buf: &mut [u8]) -> Result { let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); - let cwd = context.cwd.lock(); + let cwd = context.cwd.read(); let mut i = 0; while i < buf.len() && i < cwd.len() { buf[i] = cwd[i]; diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 6b15779..4f0cbac 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -253,7 +253,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { if flags.contains(CLONE_FS) { cwd = Arc::clone(&context.cwd); } else { - cwd = Arc::new(Mutex::new(context.cwd.lock().clone())); + cwd = Arc::new(RwLock::new(context.cwd.read().clone())); } if flags.contains(CLONE_FILES) { From 83dea72a5068eb0f3f984f5df07584c3f590fa38 Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Sat, 13 Feb 2021 12:57:53 -0700 Subject: [PATCH 46/55] Switch Context::files to RwLock --- src/context/context.rs | 14 +++++++------- src/event.rs | 2 +- src/scheme/sys/iostat.rs | 2 +- src/scheme/user.rs | 4 ++-- src/syscall/fs.rs | 2 +- src/syscall/process.rs | 10 +++++----- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/context/context.rs b/src/context/context.rs index 2068d39..fccd6b0 100644 --- a/src/context/context.rs +++ b/src/context/context.rs @@ -127,7 +127,7 @@ impl ContextSnapshot { pub fn new(context: &Context) -> Self { let name = context.name.read().clone(); let mut files = Vec::new(); - for descriptor_opt in context.files.lock().iter() { + for descriptor_opt in context.files.read().iter() { let description = if let Some(descriptor) = descriptor_opt { let description = descriptor.description.read(); Some(FileDescription { @@ -239,7 +239,7 @@ pub struct Context { /// The current working directory pub cwd: Arc>>, /// The open files in the scheme - pub files: Arc>>>, + pub files: Arc>>>, /// Signal actions pub actions: Arc>>, /// The pointer to the user-space registers, saved after certain @@ -294,7 +294,7 @@ impl Context { grants: Arc::new(Mutex::new(UserGrants::default())), name: Arc::new(RwLock::new(String::new().into_boxed_str())), cwd: Arc::new(RwLock::new(Vec::new())), - files: Arc::new(Mutex::new(Vec::new())), + files: Arc::new(RwLock::new(Vec::new())), actions: Arc::new(Mutex::new(vec![( SigAction { sa_handler: unsafe { mem::transmute(SIG_DFL) }, @@ -416,7 +416,7 @@ impl Context { /// Add a file to the lowest available slot greater than or equal to min. /// Return the file descriptor number or None if no slot was found pub fn add_file_min(&self, file: FileDescriptor, min: usize) -> Option { - let mut files = self.files.lock(); + let mut files = self.files.write(); for (i, file_option) in files.iter_mut().enumerate() { if file_option.is_none() && i >= min { *file_option = Some(file); @@ -439,7 +439,7 @@ impl Context { /// Get a file pub fn get_file(&self, i: FileHandle) -> Option { - let files = self.files.lock(); + let files = self.files.read(); if i.into() < files.len() { files[i.into()].clone() } else { @@ -450,7 +450,7 @@ impl Context { /// Insert a file with a specific handle number. This is used by dup2 /// Return the file descriptor number or None if the slot was not empty, or i was invalid pub fn insert_file(&self, i: FileHandle, file: FileDescriptor) -> Option { - let mut files = self.files.lock(); + let mut files = self.files.write(); if i.into() < super::CONTEXT_MAX_FILES { while i.into() >= files.len() { files.push(None); @@ -469,7 +469,7 @@ impl Context { /// Remove a file // TODO: adjust files vector to smaller size if possible pub fn remove_file(&self, i: FileHandle) -> Option { - let mut files = self.files.lock(); + let mut files = self.files.write(); if i.into() < files.len() { files[i.into()].take() } else { diff --git a/src/event.rs b/src/event.rs index f221bc2..73e2de1 100644 --- a/src/event.rs +++ b/src/event.rs @@ -35,7 +35,7 @@ impl EventQueue { let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); - let files = context.files.lock(); + let files = context.files.read(); match files.get(event.id).ok_or(Error::new(EBADF))? { Some(file) => file.clone(), None => return Err(Error::new(EBADF)) diff --git a/src/scheme/sys/iostat.rs b/src/scheme/sys/iostat.rs index 6a73227..fe0d65f 100644 --- a/src/scheme/sys/iostat.rs +++ b/src/scheme/sys/iostat.rs @@ -16,7 +16,7 @@ pub fn resource() -> Result> { let contexts = context::contexts(); for (id, context_lock) in contexts.iter() { let context = context_lock.read(); - rows.push((*id, context.name.read().clone(), context.files.lock().clone())); + rows.push((*id, context.name.read().clone(), context.files.read().clone())); } } diff --git a/src/scheme/user.rs b/src/scheme/user.rs index 8ba0786..c28b8a4 100644 --- a/src/scheme/user.rs +++ b/src/scheme/user.rs @@ -349,7 +349,7 @@ impl Scheme for UserScheme { // TODO: Faster, cleaner mechanism to get descriptor let scheme = inner.scheme_id.load(Ordering::SeqCst); let mut desc_res = Err(Error::new(EBADF)); - for context_file_opt in context.files.lock().iter() { + for context_file_opt in context.files.read().iter() { if let Some(context_file) = context_file_opt { let (context_scheme, context_number) = { let desc = context_file.description.read(); @@ -402,7 +402,7 @@ impl Scheme for UserScheme { // TODO: Faster, cleaner mechanism to get descriptor let scheme = inner.scheme_id.load(Ordering::SeqCst); let mut desc_res = Err(Error::new(EBADF)); - for context_file_opt in context.files.lock().iter() { + for context_file_opt in context.files.read().iter() { if let Some(context_file) = context_file_opt { let (context_scheme, context_number) = { let desc = context_file.description.read(); diff --git a/src/syscall/fs.rs b/src/syscall/fs.rs index 780f869..dd19ea4 100644 --- a/src/syscall/fs.rs +++ b/src/syscall/fs.rs @@ -377,7 +377,7 @@ pub fn fcntl(fd: FileHandle, cmd: usize, arg: usize) -> Result { let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); - let mut files = context.files.lock(); + let mut files = context.files.write(); match *files.get_mut(fd.into()).ok_or(Error::new(EBADF))? { Some(ref mut file) => match cmd { F_GETFD => { diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 4f0cbac..262ffff 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -259,7 +259,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { if flags.contains(CLONE_FILES) { files = Arc::clone(&context.files); } else { - files = Arc::new(Mutex::new(context.files.lock().clone())); + files = Arc::new(RwLock::new(context.files.read().clone())); } if flags.contains(CLONE_SIGHAND) { @@ -272,7 +272,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { // If not cloning files, dup to get a new number from scheme // This has to be done outside the context lock to prevent deadlocks if !flags.contains(CLONE_FILES) { - for (_fd, file_opt) in files.lock().iter_mut().enumerate() { + for (_fd, file_opt) in files.write().iter_mut().enumerate() { let new_file_opt = if let Some(ref file) = *file_opt { Some(FileDescriptor { description: Arc::clone(&file.description), @@ -866,7 +866,7 @@ fn fexec_noreturn( (vfork, context.ppid, files) }; - for (_fd, file_opt) in files.lock().iter_mut().enumerate() { + for (_fd, file_opt) in files.write().iter_mut().enumerate() { let mut cloexec = false; if let Some(ref file) = *file_opt { if file.cloexec { @@ -1108,12 +1108,12 @@ pub fn exit(status: usize) -> ! { let pid = { let mut context = context_lock.write(); { - let mut lock = context.files.lock(); + let mut lock = context.files.write(); if Arc::strong_count(&context.files) == 1 { mem::swap(lock.deref_mut(), &mut close_files); } } - context.files = Arc::new(Mutex::new(Vec::new())); + context.files = Arc::new(RwLock::new(Vec::new())); context.id }; From 41bea0086fbf8efff82bf917b5cc51eac005e44c Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Sat, 13 Feb 2021 13:01:20 -0700 Subject: [PATCH 47/55] Switch Context::actions to RwLock --- src/context/context.rs | 4 ++-- src/context/signal.rs | 2 +- src/ptrace.rs | 4 ++-- src/syscall/process.rs | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/context/context.rs b/src/context/context.rs index fccd6b0..5926a16 100644 --- a/src/context/context.rs +++ b/src/context/context.rs @@ -241,7 +241,7 @@ pub struct Context { /// The open files in the scheme pub files: Arc>>>, /// Signal actions - pub actions: Arc>>, + pub actions: Arc>>, /// The pointer to the user-space registers, saved after certain /// interrupts. This pointer is somewhere inside kstack, and the /// kstack address at the time of creation is the first element in @@ -295,7 +295,7 @@ impl Context { name: Arc::new(RwLock::new(String::new().into_boxed_str())), cwd: Arc::new(RwLock::new(Vec::new())), files: Arc::new(RwLock::new(Vec::new())), - actions: Arc::new(Mutex::new(vec![( + actions: Arc::new(RwLock::new(vec![( SigAction { sa_handler: unsafe { mem::transmute(SIG_DFL) }, sa_mask: [0; 2], diff --git a/src/context/signal.rs b/src/context/signal.rs index 05d3ff7..bc5add1 100644 --- a/src/context/signal.rs +++ b/src/context/signal.rs @@ -17,7 +17,7 @@ pub extern "C" fn signal_handler(sig: usize) { let contexts = contexts(); let context_lock = contexts.current().expect("context::signal_handler not inside of context"); let context = context_lock.read(); - let actions = context.actions.lock(); + let actions = context.actions.read(); actions[sig] }; diff --git a/src/ptrace.rs b/src/ptrace.rs index aeea28f..6f0e741 100644 --- a/src/ptrace.rs +++ b/src/ptrace.rs @@ -404,7 +404,7 @@ pub unsafe fn regs_for(context: &Context) -> Option<&InterruptStack> { None => None, Some((_, _, ref kstack, signum)) => { let is_user_handled = { - let actions = context.actions.lock(); + let actions = context.actions.read(); signal::is_user_handled(actions[signum as usize].0.sa_handler) }; if is_user_handled { @@ -425,7 +425,7 @@ pub unsafe fn regs_for_mut(context: &mut Context) -> Option<&mut InterruptStack> None => None, Some((_, _, ref mut kstack, signum)) => { let is_user_handled = { - let actions = context.actions.lock(); + let actions = context.actions.read(); signal::is_user_handled(actions[signum as usize].0.sa_handler) }; if is_user_handled { diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 262ffff..94d3766 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -265,7 +265,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { if flags.contains(CLONE_SIGHAND) { actions = Arc::clone(&context.actions); } else { - actions = Arc::new(Mutex::new(context.actions.lock().clone())); + actions = Arc::new(RwLock::new(context.actions.read().clone())); } } @@ -849,7 +849,7 @@ fn fexec_noreturn( drop(args); drop(vars); - context.actions = Arc::new(Mutex::new(vec![( + context.actions = Arc::new(RwLock::new(vec![( SigAction { sa_handler: unsafe { mem::transmute(SIG_DFL) }, sa_mask: [0; 2], @@ -1414,7 +1414,7 @@ pub fn sigaction(sig: usize, act_opt: Option<&SigAction>, oldact_opt: Option<&mu let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); - let mut actions = context.actions.lock(); + let mut actions = context.actions.write(); if let Some(oldact) = oldact_opt { *oldact = actions[sig].0; From 8fcd375bd9f76b71f1575ea9c69defaae913c02a Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Sat, 13 Feb 2021 13:06:13 -0700 Subject: [PATCH 48/55] Switch Context::grants to RwLock --- src/context/context.rs | 6 +++--- src/scheme/memory.rs | 2 +- src/scheme/sys/context.rs | 2 +- src/scheme/user.rs | 4 ++-- src/syscall/driver.rs | 4 ++-- src/syscall/fs.rs | 4 ++-- src/syscall/process.rs | 14 +++++++------- 7 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/context/context.rs b/src/context/context.rs index 5926a16..8e7c3b8 100644 --- a/src/context/context.rs +++ b/src/context/context.rs @@ -10,7 +10,7 @@ use core::{ cmp::Ordering, mem, }; -use spin::{Mutex, RwLock}; +use spin::RwLock; use crate::arch::{interrupt::InterruptStack, paging::PAGE_SIZE}; use crate::common::unique::Unique; @@ -233,7 +233,7 @@ pub struct Context { /// User Thread local storage pub tls: Option, /// User grants - pub grants: Arc>, + pub grants: Arc>, /// The name of the context pub name: Arc>>, /// The current working directory @@ -291,7 +291,7 @@ impl Context { stack: None, sigstack: None, tls: None, - grants: Arc::new(Mutex::new(UserGrants::default())), + grants: Arc::new(RwLock::new(UserGrants::default())), name: Arc::new(RwLock::new(String::new().into_boxed_str())), cwd: Arc::new(RwLock::new(Vec::new())), files: Arc::new(RwLock::new(Vec::new())), diff --git a/src/scheme/memory.rs b/src/scheme/memory.rs index 5fc4e55..9ee45c1 100644 --- a/src/scheme/memory.rs +++ b/src/scheme/memory.rs @@ -40,7 +40,7 @@ impl Scheme for MemoryScheme { let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); - let mut grants = context.grants.lock(); + let mut grants = context.grants.write(); let region = grants.find_free_at(VirtualAddress::new(map.address), map.size, map.flags)?.round(); diff --git a/src/scheme/sys/context.rs b/src/scheme/sys/context.rs index 26e3ae0..3002f55 100644 --- a/src/scheme/sys/context.rs +++ b/src/scheme/sys/context.rs @@ -90,7 +90,7 @@ pub fn resource() -> Result> { if let Some(ref sigstack) = context.sigstack { memory += sigstack.size(); } - for grant in context.grants.lock().iter() { + for grant in context.grants.read().iter() { if grant.is_owned() { memory += grant.size(); } diff --git a/src/scheme/user.rs b/src/scheme/user.rs index c28b8a4..a0e3046 100644 --- a/src/scheme/user.rs +++ b/src/scheme/user.rs @@ -126,7 +126,7 @@ impl UserInner { let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) }; let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET))); - let mut grants = context.grants.lock(); + let mut grants = context.grants.write(); let from_address = round_down_pages(address); let offset = address - from_address; @@ -157,7 +157,7 @@ impl UserInner { let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_utable()) }; let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET))); - let mut grants = context.grants.lock(); + let mut grants = context.grants.write(); if let Some(region) = grants.contains(VirtualAddress::new(address)).map(Region::from) { grants.take(®ion).unwrap().unmap_inactive(&mut new_table, &mut temporary_page); diff --git a/src/syscall/driver.rs b/src/syscall/driver.rs index f6e39b6..045b3ed 100644 --- a/src/syscall/driver.rs +++ b/src/syscall/driver.rs @@ -80,7 +80,7 @@ pub fn inner_physmap(physical_address: usize, size: usize, flags: PhysmapFlags) let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); - let mut grants = context.grants.lock(); + let mut grants = context.grants.write(); let from_address = (physical_address/4096) * 4096; let offset = physical_address - from_address; @@ -135,7 +135,7 @@ pub fn inner_physunmap(virtual_address: usize) -> Result { let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); - let mut grants = context.grants.lock(); + let mut grants = context.grants.write(); if let Some(region) = grants.contains(VirtualAddress::new(virtual_address)).map(Region::from) { grants.take(®ion).unwrap().unmap(); diff --git a/src/syscall/fs.rs b/src/syscall/fs.rs index dd19ea4..bd4684a 100644 --- a/src/syscall/fs.rs +++ b/src/syscall/fs.rs @@ -455,7 +455,7 @@ pub fn funmap_old(virtual_address: usize) -> Result { let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); - let mut grants = context.grants.lock(); + let mut grants = context.grants.write(); if let Some(region) = grants.contains(VirtualAddress::new(virtual_address)).map(Region::from) { let mut grant = grants.take(®ion).unwrap(); @@ -503,7 +503,7 @@ pub fn funmap(virtual_address: usize, length: usize) -> Result { let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); - let mut grants = context.grants.lock(); + let mut grants = context.grants.write(); let conflicting: Vec = grants.conflicts(requested).map(Region::from).collect(); diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 94d3766..d91d049 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -8,7 +8,7 @@ use alloc::{ use core::alloc::{GlobalAlloc, Layout}; use core::ops::DerefMut; use core::{intrinsics, mem}; -use spin::{RwLock, Mutex}; +use spin::RwLock; use crate::context::file::FileDescriptor; use crate::context::{ContextId, WaitpidKey}; @@ -237,11 +237,11 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { grants = Arc::clone(&context.grants); } else { let mut grants_set = UserGrants::default(); - for grant in context.grants.lock().iter() { + for grant in context.grants.read().iter() { let start = VirtualAddress::new(grant.start_address().data() + crate::USER_TMP_GRANT_OFFSET - crate::USER_GRANT_OFFSET); grants_set.insert(grant.secret_clone(start)); } - grants = Arc::new(Mutex::new(grants_set)); + grants = Arc::new(RwLock::new(grants_set)); } if flags.contains(CLONE_VM) { @@ -288,7 +288,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { // If not cloning virtual memory, use fmap to re-obtain every grant where possible if !flags.contains(CLONE_VM) { - let mut grants = grants.lock(); + let mut grants = grants.write(); let mut to_remove = BTreeSet::new(); @@ -420,7 +420,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { context.image = image; // Copy grant mapping - if ! grants.lock().is_empty() { + if ! grants.read().is_empty() { let frame = active_utable.p4()[crate::USER_GRANT_PML4].pointed_frame().expect("user grants not mapped"); let flags = active_utable.p4()[crate::USER_GRANT_PML4].flags(); active_utable.with(&mut new_utable, &mut temporary_upage, |mapper| { @@ -466,7 +466,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { // Move grants { - let mut grants = grants.lock(); + let mut grants = grants.write(); let old_grants = mem::replace(&mut *grants, UserGrants::default()); for mut grant in old_grants.inner.into_iter() { @@ -606,7 +606,7 @@ fn empty(context: &mut context::Context, reaping: bool) { drop(context.tls.take()); } - let mut grants = context.grants.lock(); + let mut grants = context.grants.write(); if Arc::strong_count(&context.grants) == 1 { let grants = mem::replace(&mut *grants, UserGrants::default()); for grant in grants.inner.into_iter() { From d331f72f2a51fa577072f24bc2587829fd87368b Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Sun, 14 Feb 2021 13:45:03 -0700 Subject: [PATCH 49/55] Use UTF-8 for all paths --- Cargo.lock | 36 +++++++++++----------- Cargo.toml | 2 +- rmm | 2 +- src/context/context.rs | 40 ++++++++++++------------ src/lib.rs | 12 ++++---- src/scheme/acpi.rs | 5 ++- src/scheme/debug.rs | 2 +- src/scheme/event.rs | 2 +- src/scheme/initfs.rs | 5 ++- src/scheme/irq.rs | 5 ++- src/scheme/itimer.rs | 6 ++-- src/scheme/live.rs | 2 +- src/scheme/memory.rs | 2 +- src/scheme/mod.rs | 59 +++++++++++++++++++----------------- src/scheme/proc.rs | 10 +++--- src/scheme/root.rs | 40 ++++++++++++------------ src/scheme/serio.rs | 5 ++- src/scheme/sys/mod.rs | 46 ++++++++++++++-------------- src/scheme/sys/scheme.rs | 2 +- src/scheme/sys/scheme_num.rs | 2 +- src/scheme/time.rs | 6 ++-- src/scheme/user.rs | 42 ++++++++++++++++--------- src/syscall/fs.rs | 42 +++++++++++++------------ src/syscall/mod.rs | 12 ++++---- src/syscall/privilege.rs | 4 +-- src/syscall/process.rs | 8 +++-- src/syscall/validate.rs | 9 +++++- syscall | 2 +- 28 files changed, 217 insertions(+), 193 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d887aa2..8121fd8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,17 +17,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "byteorder" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "cc" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "cfg-if" -version = "0.1.10" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -35,7 +35,7 @@ name = "fdt" version = "0.1.0" source = "git+https://gitlab.redox-os.org/thomhuds/fdt.git#baca9b0070c281dc99521ee901efcb10e5f84218" dependencies = [ - "byteorder 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.4.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -49,19 +49,19 @@ dependencies = [ [[package]] name = "kernel" -version = "0.1.54" +version = "0.2.5" dependencies = [ "bitfield 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.67 (registry+https://github.com/rust-lang/crates.io-index)", "fdt 0.1.0 (git+https://gitlab.redox-os.org/thomhuds/fdt.git)", "goblin 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "linked_list_allocator 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.14 (registry+https://github.com/rust-lang/crates.io-index)", "paste 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", "raw-cpuid 8.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.2.4", + "redox_syscall 0.2.7", "rmm 0.1.0", "rustc-cfg 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-demangle 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", @@ -96,10 +96,10 @@ dependencies = [ [[package]] name = "log" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -135,7 +135,7 @@ version = "7.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.67 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -145,13 +145,13 @@ version = "8.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.67 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "redox_syscall" -version = "0.2.4" +version = "0.2.7" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -241,15 +241,15 @@ dependencies = [ "checksum bit_field 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4" "checksum bitfield 0.13.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46afbd2983a5d5a7bd740ccb198caf5b82f45c40c09c0eed36052d91cb92e719" "checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -"checksum byteorder 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" -"checksum cc 1.0.66 (registry+https://github.com/rust-lang/crates.io-index)" = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" -"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +"checksum byteorder 1.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +"checksum cc 1.0.67 (registry+https://github.com/rust-lang/crates.io-index)" = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" +"checksum cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" "checksum fdt 0.1.0 (git+https://gitlab.redox-os.org/thomhuds/fdt.git)" = "" "checksum goblin 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d20fd25aa456527ce4f544271ae4fea65d2eda4a6561ea56f39fb3ee4f7e3884" "checksum linked_list_allocator 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "47de1a43fad0250ee197e9e124e5b5deab3d7b39d4428ae8a6d741ceb340c362" "checksum linked_list_allocator 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)" = "822add9edb1860698b79522510da17bef885171f75aa395cff099d770c609c24" "checksum lock_api 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" -"checksum log 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" +"checksum log 0.4.14 (registry+https://github.com/rust-lang/crates.io-index)" = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" "checksum paste 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" "checksum paste-impl 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" "checksum plain 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" diff --git a/Cargo.toml b/Cargo.toml index 77990ef..8eaecf6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kernel" -version = "0.1.54" +version = "0.2.5" build = "build.rs" edition = "2018" diff --git a/rmm b/rmm index 132d91d..7fd1218 160000 --- a/rmm +++ b/rmm @@ -1 +1 @@ -Subproject commit 132d91d3aaa624d1bc8709555a64ff289f7d5e4f +Subproject commit 7fd1218465b58bd88ecdaeaf63683019ad74ed41 diff --git a/src/context/context.rs b/src/context/context.rs index 8e7c3b8..5d6b444 100644 --- a/src/context/context.rs +++ b/src/context/context.rs @@ -1,7 +1,7 @@ use alloc::{ boxed::Box, collections::VecDeque, - string::String, + string::{String, ToString}, sync::Arc, vec::Vec, }; @@ -237,7 +237,7 @@ pub struct Context { /// The name of the context pub name: Arc>>, /// The current working directory - pub cwd: Arc>>, + pub cwd: Arc>, /// The open files in the scheme pub files: Arc>>>, /// Signal actions @@ -293,7 +293,7 @@ impl Context { tls: None, grants: Arc::new(RwLock::new(UserGrants::default())), name: Arc::new(RwLock::new(String::new().into_boxed_str())), - cwd: Arc::new(RwLock::new(Vec::new())), + cwd: Arc::new(RwLock::new(String::new())), files: Arc::new(RwLock::new(Vec::new())), actions: Arc::new(RwLock::new(vec![( SigAction { @@ -313,35 +313,34 @@ impl Context { /// This function will turn "foo" into "scheme:/path/foo" /// "/foo" will turn into "scheme:/foo" /// "bar:/foo" will be used directly, as it is already absolute - pub fn canonicalize(&self, path: &[u8]) -> Vec { - let mut canon = if path.iter().position(|&b| b == b':').is_none() { + pub fn canonicalize(&self, path: &str) -> String { + let mut canon = if path.find(':').is_none() { let cwd = self.cwd.read(); - let mut canon = if !path.starts_with(b"/") { + let mut canon = if !path.starts_with('/') { let mut c = cwd.clone(); - if ! c.ends_with(b"/") { - c.push(b'/'); + if ! c.ends_with('/') { + c.push('/'); } c } else { - cwd[..cwd.iter().position(|&b| b == b':').map_or(1, |i| i + 1)].to_vec() + cwd[..cwd.find(':').map_or(1, |i| i + 1)].to_string() }; - canon.extend_from_slice(&path); + canon.push_str(&path); canon } else { - path.to_vec() + path.to_string() }; // NOTE: assumes the scheme does not include anything like "../" or "./" let mut result = { - let parts = canon.split(|&c| c == b'/') - .filter(|&part| part != b".") + let parts = canon.split('/') .rev() .scan(0, |nskip, part| { - if part == b"." { + if part == "." { Some(None) - } else if part == b".." { + } else if part == ".." { *nskip += 1; Some(None) } else if *nskip > 0 { @@ -357,18 +356,17 @@ impl Context { parts .iter() .rev() - .fold(Vec::new(), |mut vec, &part| { - vec.extend_from_slice(part); - vec.push(b'/'); - vec + .fold(String::new(), |mut string, &part| { + string.push_str(part); + string.push('/'); + string }) }; result.pop(); // remove extra '/' // replace with the root of the scheme if it's empty if result.is_empty() { - let pos = canon.iter() - .position(|&b| b == b':') + let pos = canon.find(':') .map_or(canon.len(), |p| p + 1); canon.truncate(pos); canon diff --git a/src/lib.rs b/src/lib.rs index ec36059..0770027 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -168,24 +168,24 @@ static mut INIT_ENV: &[u8] = &[]; /// Initialize userspace by running the initfs:bin/init process /// This function will also set the CWD to initfs:bin and open debug: as stdio pub extern fn userspace_init() { - let path = b"initfs:/bin/init"; + let path = "initfs:/bin/init"; let env = unsafe { INIT_ENV }; - if let Err(err) = syscall::chdir(b"initfs:") { + if let Err(err) = syscall::chdir("initfs:") { info!("Failed to enter initfs ({}).", err); info!("Perhaps the kernel was compiled with an incorrect INITFS_FOLDER \ environment variable value?"); panic!("Unexpected error while trying to enter initfs:."); } - assert_eq!(syscall::open(b"debug:", syscall::flag::O_RDONLY).map(FileHandle::into), Ok(0)); - assert_eq!(syscall::open(b"debug:", syscall::flag::O_WRONLY).map(FileHandle::into), Ok(1)); - assert_eq!(syscall::open(b"debug:", syscall::flag::O_WRONLY).map(FileHandle::into), Ok(2)); + assert_eq!(syscall::open("debug:", syscall::flag::O_RDONLY).map(FileHandle::into), Ok(0)); + assert_eq!(syscall::open("debug:", syscall::flag::O_WRONLY).map(FileHandle::into), Ok(1)); + assert_eq!(syscall::open("debug:", syscall::flag::O_WRONLY).map(FileHandle::into), Ok(2)); let fd = syscall::open(path, syscall::flag::O_RDONLY).expect("failed to open init"); let mut args = Vec::new(); - args.push(path.to_vec().into_boxed_slice()); + args.push(path.as_bytes().to_vec().into_boxed_slice()); let mut vars = Vec::new(); for var in env.split(|b| *b == b'\n') { diff --git a/src/scheme/acpi.rs b/src/scheme/acpi.rs index 9fe6020..f5fb2fc 100644 --- a/src/scheme/acpi.rs +++ b/src/scheme/acpi.rs @@ -217,13 +217,12 @@ fn serialize_table_filename( } impl Scheme for AcpiScheme { - fn open(&self, path: &[u8], flags: usize, opener_uid: u32, _opener_gid: u32) -> Result { + fn open(&self, path: &str, flags: usize, opener_uid: u32, _opener_gid: u32) -> Result { if opener_uid != 0 { return Err(Error::new(EACCES)); } - let path_str = str::from_utf8(path).or(Err(Error::new(ENOENT)))?; - let path_str = path_str.trim_start_matches('/'); + let path_str = path.trim_start_matches('/'); // TODO: Use some kind of component iterator. diff --git a/src/scheme/debug.rs b/src/scheme/debug.rs index 6b82441..b470953 100644 --- a/src/scheme/debug.rs +++ b/src/scheme/debug.rs @@ -57,7 +57,7 @@ impl DebugScheme { } impl Scheme for DebugScheme { - fn open(&self, path: &[u8], flags: usize, uid: u32, _gid: u32) -> Result { + fn open(&self, path: &str, flags: usize, uid: u32, _gid: u32) -> Result { if uid != 0 { return Err(Error::new(EPERM)); } diff --git a/src/scheme/event.rs b/src/scheme/event.rs index e7ea1ff..3ae5015 100644 --- a/src/scheme/event.rs +++ b/src/scheme/event.rs @@ -9,7 +9,7 @@ use crate::syscall::scheme::Scheme; pub struct EventScheme; impl Scheme for EventScheme { - fn open(&self, _path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result { + fn open(&self, _path: &str, _flags: usize, _uid: u32, _gid: u32) -> Result { let id = next_queue_id(); queues_mut().insert(id, Arc::new(EventQueue::new(id))); diff --git a/src/scheme/initfs.rs b/src/scheme/initfs.rs index d5d6447..80a557d 100644 --- a/src/scheme/initfs.rs +++ b/src/scheme/initfs.rs @@ -41,9 +41,8 @@ impl InitFsScheme { } impl Scheme for InitFsScheme { - fn open(&self, path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result { - let path_utf8 = str::from_utf8(path).or(Err(Error::new(ENOENT)))?; - let path_trimmed = path_utf8.trim_matches('/'); + fn open(&self, path: &str, _flags: usize, _uid: u32, _gid: u32) -> Result { + let path_trimmed = path.trim_matches('/'); //Have to iterate to get the path without allocation for entry in self.files.iter() { diff --git a/src/scheme/irq.rs b/src/scheme/irq.rs index d5938d0..ee24f7d 100644 --- a/src/scheme/irq.rs +++ b/src/scheme/irq.rs @@ -139,11 +139,10 @@ const fn vector_to_irq(vector: u8) -> u8 { } impl Scheme for IrqScheme { - fn open(&self, path: &[u8], flags: usize, uid: u32, _gid: u32) -> Result { + fn open(&self, path: &str, flags: usize, uid: u32, _gid: u32) -> Result { if uid != 0 { return Err(Error::new(EACCES)) } - let path_str = str::from_utf8(path).or(Err(Error::new(ENOENT)))?; - let path_str = path_str.trim_start_matches('/'); + let path_str = path.trim_start_matches('/'); let handle: Handle = if path_str.is_empty() { if flags & O_DIRECTORY == 0 && flags & O_STAT == 0 { return Err(Error::new(EISDIR)) } diff --git a/src/scheme/itimer.rs b/src/scheme/itimer.rs index 92ca1ab..11bc559 100644 --- a/src/scheme/itimer.rs +++ b/src/scheme/itimer.rs @@ -23,10 +23,8 @@ impl ITimerScheme { } impl Scheme for ITimerScheme { - fn open(&self, path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result { - let path_str = str::from_utf8(path).or(Err(Error::new(ENOENT)))?; - - let clock = path_str.parse::().or(Err(Error::new(ENOENT)))?; + fn open(&self, path: &str, _flags: usize, _uid: u32, _gid: u32) -> Result { + let clock = path.parse::().or(Err(Error::new(ENOENT)))?; match clock { CLOCK_REALTIME => (), diff --git a/src/scheme/live.rs b/src/scheme/live.rs index c66c860..678845e 100644 --- a/src/scheme/live.rs +++ b/src/scheme/live.rs @@ -52,7 +52,7 @@ impl DiskScheme { } impl Scheme for DiskScheme { - fn open(&self, _path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result { + fn open(&self, _path: &str, _flags: usize, _uid: u32, _gid: u32) -> Result { let id = self.next_id.fetch_add(1, Ordering::SeqCst); self.handles.write().insert(id, Handle { path: b"0", diff --git a/src/scheme/memory.rs b/src/scheme/memory.rs index 9ee45c1..2991921 100644 --- a/src/scheme/memory.rs +++ b/src/scheme/memory.rs @@ -15,7 +15,7 @@ impl MemoryScheme { } } impl Scheme for MemoryScheme { - fn open(&self, _path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result { + fn open(&self, _path: &str, _flags: usize, _uid: u32, _gid: u32) -> Result { Ok(0) } diff --git a/src/scheme/mod.rs b/src/scheme/mod.rs index 6638e34..55d1cb9 100644 --- a/src/scheme/mod.rs +++ b/src/scheme/mod.rs @@ -6,10 +6,13 @@ //! The kernel validates paths and file descriptors before they are passed to schemes, //! also stripping the scheme identifier of paths if necessary. -use alloc::sync::Arc; -use alloc::boxed::Box; -use alloc::collections::BTreeMap; -use alloc::vec::Vec; +use alloc::{ + boxed::Box, + collections::BTreeMap, + string::ToString, + sync::Arc, + vec::Vec, +}; use core::sync::atomic::AtomicUsize; use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -92,11 +95,11 @@ int_like!(SchemeId, AtomicSchemeId, usize, AtomicUsize); int_like!(FileHandle, AtomicFileHandle, usize, AtomicUsize); pub struct SchemeIter<'a> { - inner: Option<::alloc::collections::btree_map::Iter<'a, Box<[u8]>, SchemeId>> + inner: Option<::alloc::collections::btree_map::Iter<'a, Box, SchemeId>> } impl<'a> Iterator for SchemeIter<'a> { - type Item = (&'a Box<[u8]>, &'a SchemeId); + type Item = (&'a Box, &'a SchemeId); fn next(&mut self) -> Option { self.inner.as_mut().and_then(|iter| iter.next()) @@ -106,7 +109,7 @@ impl<'a> Iterator for SchemeIter<'a> { /// Scheme list type pub struct SchemeList { map: BTreeMap>, - names: BTreeMap, SchemeId>>, + names: BTreeMap, SchemeId>>, next_ns: usize, next_id: usize } @@ -133,7 +136,7 @@ impl SchemeList { //TODO: Only memory: is in the null namespace right now. It should be removed when //anonymous mmap's are implemented - self.insert(ns, Box::new(*b"memory"), |_| Arc::new(MemoryScheme::new())).unwrap(); + self.insert(ns, "memory", |_| Arc::new(MemoryScheme::new())).unwrap(); } /// Initialize a new namespace @@ -142,12 +145,12 @@ impl SchemeList { self.next_ns += 1; self.names.insert(ns, BTreeMap::new()); - self.insert(ns, Box::new(*b""), |scheme_id| Arc::new(RootScheme::new(ns, scheme_id))).unwrap(); - self.insert(ns, Box::new(*b"event"), |_| Arc::new(EventScheme)).unwrap(); - self.insert(ns, Box::new(*b"itimer"), |_| Arc::new(ITimerScheme::new())).unwrap(); - self.insert(ns, Box::new(*b"memory"), |_| Arc::new(MemoryScheme::new())).unwrap(); - self.insert(ns, Box::new(*b"sys"), |_| Arc::new(SysScheme::new())).unwrap(); - self.insert(ns, Box::new(*b"time"), |scheme_id| Arc::new(TimeScheme::new(scheme_id))).unwrap(); + self.insert(ns, "", |scheme_id| Arc::new(RootScheme::new(ns, scheme_id))).unwrap(); + self.insert(ns, "event", |_| Arc::new(EventScheme)).unwrap(); + self.insert(ns, "itimer", |_| Arc::new(ITimerScheme::new())).unwrap(); + self.insert(ns, "memory", |_| Arc::new(MemoryScheme::new())).unwrap(); + self.insert(ns, "sys", |_| Arc::new(SysScheme::new())).unwrap(); + self.insert(ns, "time", |scheme_id| Arc::new(TimeScheme::new(scheme_id))).unwrap(); ns } @@ -159,23 +162,23 @@ impl SchemeList { // These schemes should only be available on the root #[cfg(feature = "acpi")] { - self.insert(ns, Box::new(*b"acpi"), |_| Arc::new(AcpiScheme::new())).unwrap(); + self.insert(ns, "acpi", |_| Arc::new(AcpiScheme::new())).unwrap(); } - self.insert(ns, Box::new(*b"debug"), |scheme_id| Arc::new(DebugScheme::new(scheme_id))).unwrap(); - self.insert(ns, Box::new(*b"initfs"), |_| Arc::new(InitFsScheme::new())).unwrap(); - self.insert(ns, Box::new(*b"irq"), |scheme_id| Arc::new(IrqScheme::new(scheme_id))).unwrap(); - self.insert(ns, Box::new(*b"proc"), |scheme_id| Arc::new(ProcScheme::new(scheme_id))).unwrap(); - self.insert(ns, Box::new(*b"serio"), |scheme_id| Arc::new(SerioScheme::new(scheme_id))).unwrap(); + self.insert(ns, "debug", |scheme_id| Arc::new(DebugScheme::new(scheme_id))).unwrap(); + self.insert(ns, "initfs", |_| Arc::new(InitFsScheme::new())).unwrap(); + self.insert(ns, "irq", |scheme_id| Arc::new(IrqScheme::new(scheme_id))).unwrap(); + self.insert(ns, "proc", |scheme_id| Arc::new(ProcScheme::new(scheme_id))).unwrap(); + self.insert(ns, "serio", |scheme_id| Arc::new(SerioScheme::new(scheme_id))).unwrap(); #[cfg(feature = "live")] { - self.insert(ns, Box::new(*b"disk/live"), |_| Arc::new(self::live::DiskScheme::new())).unwrap(); + self.insert(ns, "disk/live", |_| Arc::new(self::live::DiskScheme::new())).unwrap(); } // Pipe is special and needs to be in the root namespace - self.insert(ns, Box::new(*b"pipe"), |scheme_id| Arc::new(PipeScheme::new(scheme_id))).unwrap(); + self.insert(ns, "pipe", |scheme_id| Arc::new(PipeScheme::new(scheme_id))).unwrap(); } - pub fn make_ns(&mut self, from: SchemeNamespace, names: &[&[u8]]) -> Result { + pub fn make_ns(&mut self, from: SchemeNamespace, names: &[&str]) -> Result { // Create an empty namespace let to = self.new_ns(); @@ -188,7 +191,7 @@ impl SchemeList { }; if let Some(ref mut names) = self.names.get_mut(&to) { - assert!(names.insert(name.to_vec().into_boxed_slice(), id).is_none()); + assert!(names.insert(name.to_string().into_boxed_str(), id).is_none()); } else { panic!("scheme namespace not found"); } @@ -212,7 +215,7 @@ impl SchemeList { self.map.get(&id) } - pub fn get_name(&self, ns: SchemeNamespace, name: &[u8]) -> Option<(SchemeId, &Arc)> { + pub fn get_name(&self, ns: SchemeNamespace, name: &str) -> Option<(SchemeId, &Arc)> { if let Some(names) = self.names.get(&ns) { if let Some(&id) = names.get(name) { return self.get(id).map(|scheme| (id, scheme)); @@ -222,11 +225,11 @@ impl SchemeList { } /// Create a new scheme. - pub fn insert(&mut self, ns: SchemeNamespace, name: Box<[u8]>, scheme_fn: F) -> Result + pub fn insert(&mut self, ns: SchemeNamespace, name: &str, scheme_fn: F) -> Result where F: Fn(SchemeId) -> Arc { if let Some(names) = self.names.get(&ns) { - if names.contains_key(&name) { + if names.contains_key(name) { return Err(Error::new(EEXIST)); } } @@ -252,7 +255,7 @@ impl SchemeList { assert!(self.map.insert(id, scheme).is_none()); if let Some(ref mut names) = self.names.get_mut(&ns) { - assert!(names.insert(name, id).is_none()); + assert!(names.insert(name.to_string().into_boxed_str(), id).is_none()); } else { // Nonexistent namespace, posssibly null namespace return Err(Error::new(ENODEV)); diff --git a/src/scheme/proc.rs b/src/scheme/proc.rs index 60a7395..a842641 100644 --- a/src/scheme/proc.rs +++ b/src/scheme/proc.rs @@ -24,6 +24,7 @@ use core::{ cmp, mem, slice, + str, sync::atomic::{AtomicUsize, Ordering}, }; use spin::RwLock; @@ -208,8 +209,7 @@ impl ProcScheme { } impl Scheme for ProcScheme { - fn open(&self, path: &[u8], flags: usize, uid: u32, gid: u32) -> Result { - let path = core::str::from_utf8(path).map_err(|_| Error::new(EINVAL))?; + fn open(&self, path: &str, flags: usize, uid: u32, gid: u32) -> Result { let mut parts = path.splitn(2, '/'); let pid = parts.next() .and_then(|s| s.parse().ok()) @@ -310,8 +310,10 @@ impl Scheme for ProcScheme { handle.info }; - let mut path = format!("{}/", info.pid.into()).into_bytes(); - path.extend_from_slice(buf); + let buf_str = str::from_utf8(buf).map_err(|_| Error::new(EINVAL))?; + + let mut path = format!("{}/", info.pid.into()); + path.push_str(buf_str); let (uid, gid) = { let contexts = context::contexts(); diff --git a/src/scheme/root.rs b/src/scheme/root.rs index ca5e789..248c4b5 100644 --- a/src/scheme/root.rs +++ b/src/scheme/root.rs @@ -1,7 +1,10 @@ -use alloc::sync::Arc; -use alloc::boxed::Box; -use alloc::collections::BTreeMap; -use alloc::vec::Vec; +use alloc::{ + boxed::Box, + collections::BTreeMap, + string::ToString, + sync::Arc, + vec::Vec, +}; use core::str; use core::sync::atomic::{AtomicUsize, Ordering}; use spin::{Mutex, RwLock}; @@ -67,9 +70,8 @@ impl RootScheme { } impl Scheme for RootScheme { - fn open(&self, path: &[u8], flags: usize, uid: u32, _gid: u32) -> Result { - let path_utf8 = str::from_utf8(path).or(Err(Error::new(ENOENT)))?; - let path_trimmed = path_utf8.trim_matches('/'); + fn open(&self, path: &str, flags: usize, uid: u32, _gid: u32) -> Result { + let path = path.trim_matches('/'); //TODO: Make this follow standards for flags and errors if flags & O_CREAT == O_CREAT { @@ -83,10 +85,10 @@ impl Scheme for RootScheme { let id = self.next_id.fetch_add(1, Ordering::SeqCst); let inner = { - let path_box = path_trimmed.as_bytes().to_vec().into_boxed_slice(); + let path_box = path.to_string().into_boxed_str(); let mut schemes = scheme::schemes_mut(); - let inner = Arc::new(UserInner::new(self.scheme_id, id, path_box.clone(), flags, context)); - schemes.insert(self.scheme_ns, path_box, |scheme_id| { + let inner = Arc::new(UserInner::new(self.scheme_id, id, path_box, flags, context)); + schemes.insert(self.scheme_ns, path, |scheme_id| { inner.scheme_id.store(scheme_id, Ordering::SeqCst); Arc::new(UserScheme::new(Arc::downgrade(&inner))) })?; @@ -99,7 +101,7 @@ impl Scheme for RootScheme { } else { Err(Error::new(EACCES)) } - } else if path_trimmed.is_empty() { + } else if path.is_empty() { let scheme_ns = { let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; @@ -111,7 +113,7 @@ impl Scheme for RootScheme { { let schemes = scheme::schemes(); for (name, _scheme_id) in schemes.iter_name(scheme_ns) { - data.extend_from_slice(name); + data.extend_from_slice(name.as_bytes()); data.push(b'\n'); } } @@ -126,7 +128,7 @@ impl Scheme for RootScheme { Ok(id) } else { let inner = Arc::new( - path_trimmed.as_bytes().to_vec().into_boxed_slice() + path.as_bytes().to_vec().into_boxed_slice() ); let id = self.next_id.fetch_add(1, Ordering::SeqCst); @@ -135,9 +137,8 @@ impl Scheme for RootScheme { } } - fn unlink(&self, path: &[u8], uid: u32, _gid: u32) -> Result { - let path_utf8 = str::from_utf8(path).or(Err(Error::new(ENOENT)))?; - let path_trimmed = path_utf8.trim_matches('/'); + fn unlink(&self, path: &str, uid: u32, _gid: u32) -> Result { + let path = path.trim_matches('/'); if uid == 0 { let inner = { @@ -145,7 +146,7 @@ impl Scheme for RootScheme { handles.iter().find_map(|(_id, handle)| { match handle { Handle::Scheme(inner) => { - if path_trimmed.as_bytes() == inner.name.as_ref() { + if path == inner.name.as_ref() { return Some(inner.clone()); } }, @@ -257,9 +258,10 @@ impl Scheme for RootScheme { match handle { Handle::Scheme(inner) => { + let name = inner.name.as_bytes(); let mut j = 0; - while i < buf.len() && j < inner.name.len() { - buf[i] = inner.name[j]; + while i < buf.len() && j < name.len() { + buf[i] = name[j]; i += 1; j += 1; } diff --git a/src/scheme/serio.rs b/src/scheme/serio.rs index f5b79ed..0a74742 100644 --- a/src/scheme/serio.rs +++ b/src/scheme/serio.rs @@ -60,13 +60,12 @@ impl SerioScheme { } impl Scheme for SerioScheme { - fn open(&self, path: &[u8], flags: usize, uid: u32, _gid: u32) -> Result { + fn open(&self, path: &str, flags: usize, uid: u32, _gid: u32) -> Result { if uid != 0 { return Err(Error::new(EPERM)); } - let index = str::from_utf8(path) - .or(Err(Error::new(ENOENT)))? + let index = path .parse::() .or(Err(Error::new(ENOENT)))?; if index >= INPUT.len() { diff --git a/src/scheme/sys/mod.rs b/src/scheme/sys/mod.rs index ef7947b..da6576c 100644 --- a/src/scheme/sys/mod.rs +++ b/src/scheme/sys/mod.rs @@ -23,7 +23,7 @@ mod syscall; mod uname; struct Handle { - path: &'static [u8], + path: &'static str, data: Vec, mode: u16, seek: usize @@ -34,26 +34,26 @@ type SysFn = dyn Fn() -> Result> + Send + Sync; /// System information scheme pub struct SysScheme { next_id: AtomicUsize, - files: BTreeMap<&'static [u8], Box>, + files: BTreeMap<&'static str, Box>, handles: RwLock> } impl SysScheme { pub fn new() -> SysScheme { - let mut files: BTreeMap<&'static [u8], Box> = BTreeMap::new(); + let mut files: BTreeMap<&'static str, Box> = BTreeMap::new(); - files.insert(b"block", Box::new(block::resource)); - files.insert(b"context", Box::new(context::resource)); - files.insert(b"cpu", Box::new(cpu::resource)); - files.insert(b"exe", Box::new(exe::resource)); - files.insert(b"iostat", Box::new(iostat::resource)); - files.insert(b"log", Box::new(log::resource)); - files.insert(b"scheme", Box::new(scheme::resource)); - files.insert(b"scheme_num", Box::new(scheme_num::resource)); - files.insert(b"syscall", Box::new(syscall::resource)); - files.insert(b"uname", Box::new(uname::resource)); + files.insert("block", Box::new(block::resource)); + files.insert("context", Box::new(context::resource)); + files.insert("cpu", Box::new(cpu::resource)); + files.insert("exe", Box::new(exe::resource)); + files.insert("iostat", Box::new(iostat::resource)); + files.insert("log", Box::new(log::resource)); + files.insert("scheme", Box::new(scheme::resource)); + files.insert("scheme_num", Box::new(scheme_num::resource)); + files.insert("syscall", Box::new(syscall::resource)); + files.insert("uname", Box::new(uname::resource)); #[cfg(target_arch = "x86_64")] - files.insert(b"spurious_irq", Box::new(irq::spurious_irq_resource)); + files.insert("spurious_irq", Box::new(irq::spurious_irq_resource)); SysScheme { next_id: AtomicUsize::new(0), @@ -64,22 +64,21 @@ impl SysScheme { } impl Scheme for SysScheme { - fn open(&self, path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result { - let path_utf8 = str::from_utf8(path).or(Err(Error::new(ENOENT)))?; - let path_trimmed = path_utf8.trim_matches('/'); + fn open(&self, path: &str, _flags: usize, _uid: u32, _gid: u32) -> Result { + let path = path.trim_matches('/'); - if path_trimmed.is_empty() { + if path.is_empty() { let mut data = Vec::new(); for entry in self.files.iter() { if ! data.is_empty() { data.push(b'\n'); } - data.extend_from_slice(entry.0); + data.extend_from_slice(entry.0.as_bytes()); } let id = self.next_id.fetch_add(1, Ordering::SeqCst); self.handles.write().insert(id, Handle { - path: b"", + path: "", data, mode: MODE_DIR | 0o444, seek: 0 @@ -88,7 +87,7 @@ impl Scheme for SysScheme { } else { //Have to iterate to get the path without allocation for entry in self.files.iter() { - if entry.0 == &path_trimmed.as_bytes() { + if entry.0 == &path { let id = self.next_id.fetch_add(1, Ordering::SeqCst); let data = entry.1()?; self.handles.write().insert(id, Handle { @@ -139,9 +138,10 @@ impl Scheme for SysScheme { i += 1; } + let path = handle.path.as_bytes(); let mut j = 0; - while i < buf.len() && j < handle.path.len() { - buf[i] = handle.path[j]; + while i < buf.len() && j < path.len() { + buf[i] = path[j]; i += 1; j += 1; } diff --git a/src/scheme/sys/scheme.rs b/src/scheme/sys/scheme.rs index f97c6e3..e0a367a 100644 --- a/src/scheme/sys/scheme.rs +++ b/src/scheme/sys/scheme.rs @@ -16,7 +16,7 @@ pub fn resource() -> Result> { let schemes = scheme::schemes(); for (name, _scheme_id) in schemes.iter_name(scheme_ns) { - data.extend_from_slice(name); + data.extend_from_slice(name.as_bytes()); data.push(b'\n'); } diff --git a/src/scheme/sys/scheme_num.rs b/src/scheme/sys/scheme_num.rs index fbd85ac..37e5649 100644 --- a/src/scheme/sys/scheme_num.rs +++ b/src/scheme/sys/scheme_num.rs @@ -17,7 +17,7 @@ pub fn resource() -> Result> { let schemes = scheme::schemes(); for (name, &scheme_id) in schemes.iter_name(scheme_ns) { data.extend_from_slice(format!("{:>4}: ", scheme_id.into()).as_bytes()); - data.extend_from_slice(name); + data.extend_from_slice(name.as_bytes()); data.push(b'\n'); } diff --git a/src/scheme/time.rs b/src/scheme/time.rs index 2ff548d..bc0143b 100644 --- a/src/scheme/time.rs +++ b/src/scheme/time.rs @@ -28,10 +28,8 @@ impl TimeScheme { } impl Scheme for TimeScheme { - fn open(&self, path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result { - let path_str = str::from_utf8(path).or(Err(Error::new(ENOENT)))?; - - let clock = path_str.parse::().or(Err(Error::new(ENOENT)))?; + fn open(&self, path: &str, _flags: usize, _uid: u32, _gid: u32) -> Result { + let clock = path.parse::().or(Err(Error::new(ENOENT)))?; match clock { CLOCK_REALTIME => (), diff --git a/src/scheme/user.rs b/src/scheme/user.rs index a0e3046..4dab187 100644 --- a/src/scheme/user.rs +++ b/src/scheme/user.rs @@ -23,7 +23,7 @@ use crate::syscall::scheme::Scheme; pub struct UserInner { root_id: SchemeId, handle_id: usize, - pub name: Box<[u8]>, + pub name: Box, pub flags: usize, pub scheme_id: AtomicSchemeId, next_id: AtomicU64, @@ -36,7 +36,7 @@ pub struct UserInner { } impl UserInner { - pub fn new(root_id: SchemeId, handle_id: usize, name: Box<[u8]>, flags: usize, context: Weak>) -> UserInner { + pub fn new(root_id: SchemeId, handle_id: usize, name: Box, flags: usize, context: Weak>) -> UserInner { UserInner { root_id, handle_id, @@ -103,13 +103,27 @@ impl UserInner { /// Map a readable structure to the scheme's userspace and return the /// pointer pub fn capture(&self, buf: &[u8]) -> Result { - UserInner::capture_inner(&self.context, 0, buf.as_ptr() as usize, buf.len(), PROT_READ, None).map(|addr| addr.data()) + UserInner::capture_inner( + &self.context, + 0, + buf.as_ptr() as usize, + buf.len(), + PROT_READ, + None + ).map(|addr| addr.data()) } /// Map a writeable structure to the scheme's userspace and return the /// pointer pub fn capture_mut(&self, buf: &mut [u8]) -> Result { - UserInner::capture_inner(&self.context, 0, buf.as_mut_ptr() as usize, buf.len(), PROT_WRITE, None).map(|addr| addr.data()) + UserInner::capture_inner( + &self.context, + 0, + buf.as_mut_ptr() as usize, + buf.len(), + PROT_WRITE, + None + ).map(|addr| addr.data()) } fn capture_inner(context_weak: &Weak>, to_address: usize, address: usize, size: usize, flags: MapFlags, desc_opt: Option) @@ -257,33 +271,33 @@ impl UserScheme { } impl Scheme for UserScheme { - fn open(&self, path: &[u8], flags: usize, _uid: u32, _gid: u32) -> Result { + fn open(&self, path: &str, flags: usize, _uid: u32, _gid: u32) -> Result { let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?; - let address = inner.capture(path)?; + let address = inner.capture(path.as_bytes())?; let result = inner.call(SYS_OPEN, address, path.len(), flags); let _ = inner.release(address); result } - fn chmod(&self, path: &[u8], mode: u16, _uid: u32, _gid: u32) -> Result { + fn chmod(&self, path: &str, mode: u16, _uid: u32, _gid: u32) -> Result { let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?; - let address = inner.capture(path)?; + let address = inner.capture(path.as_bytes())?; let result = inner.call(SYS_CHMOD, address, path.len(), mode as usize); let _ = inner.release(address); result } - fn rmdir(&self, path: &[u8], _uid: u32, _gid: u32) -> Result { + fn rmdir(&self, path: &str, _uid: u32, _gid: u32) -> Result { let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?; - let address = inner.capture(path)?; + let address = inner.capture(path.as_bytes())?; let result = inner.call(SYS_RMDIR, address, path.len(), 0); let _ = inner.release(address); result } - fn unlink(&self, path: &[u8], _uid: u32, _gid: u32) -> Result { + fn unlink(&self, path: &str, _uid: u32, _gid: u32) -> Result { let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?; - let address = inner.capture(path)?; + let address = inner.capture(path.as_bytes())?; let result = inner.call(SYS_UNLINK, address, path.len(), 0); let _ = inner.release(address); result @@ -513,9 +527,9 @@ impl Scheme for UserScheme { result } - fn frename(&self, file: usize, path: &[u8], _uid: u32, _gid: u32) -> Result { + fn frename(&self, file: usize, path: &str, _uid: u32, _gid: u32) -> Result { let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?; - let address = inner.capture(path)?; + let address = inner.capture(path.as_bytes())?; let result = inner.call(SYS_FRENAME, file, address, path.len()); let _ = inner.release(address); result diff --git a/src/syscall/fs.rs b/src/syscall/fs.rs index bd4684a..a271e5d 100644 --- a/src/syscall/fs.rs +++ b/src/syscall/fs.rs @@ -1,6 +1,7 @@ //! Filesystem syscalls use alloc::sync::Arc; use alloc::vec::Vec; +use core::str; use core::sync::atomic::Ordering; use spin::RwLock; @@ -55,7 +56,7 @@ pub fn file_op_mut_slice(a: usize, fd: FileHandle, slice: &mut [u8]) -> Result Result { +pub fn chdir(path: &str) -> Result { let fd = open(path, O_RDONLY | O_DIRECTORY)?; let mut stat = Stat::default(); let stat_res = file_op_mut_slice(syscall::number::SYS_FSTAT, fd, &mut stat); @@ -79,16 +80,17 @@ pub fn getcwd(buf: &mut [u8]) -> Result { let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); let cwd = context.cwd.read(); + let cwd_bytes = cwd.as_bytes(); let mut i = 0; - while i < buf.len() && i < cwd.len() { - buf[i] = cwd[i]; + while i < buf.len() && i < cwd_bytes.len() { + buf[i] = cwd_bytes[i]; i += 1; } Ok(i) } /// Open syscall -pub fn open(path: &[u8], flags: usize) -> Result { +pub fn open(path: &str, flags: usize) -> Result { let (mut path_canon, uid, gid, scheme_ns, umask) = { let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; @@ -103,7 +105,7 @@ pub fn open(path: &[u8], flags: usize) -> Result { for _level in 0..32 { // XXX What should the limit be? //println!(" level {} = {:?}", _level, ::core::str::from_utf8(&path_canon)); - let mut parts = path_canon.splitn(2, |&b| b == b':'); + let mut parts = path_canon.splitn(2, ':'); let scheme_name_opt = parts.next(); let reference_opt = parts.next(); @@ -114,7 +116,7 @@ pub fn open(path: &[u8], flags: usize) -> Result { let (scheme_id, scheme) = schemes.get_name(scheme_ns, scheme_name).ok_or(Error::new(ENODEV))?; (scheme_id, Arc::clone(&scheme)) }; - let reference = reference_opt.unwrap_or(b""); + let reference = reference_opt.unwrap_or(""); let file_id = match scheme.open(reference, flags, uid, gid) { Ok(ok) => ok, Err(err) => if err.errno == EXDEV { @@ -128,10 +130,12 @@ pub fn open(path: &[u8], flags: usize) -> Result { let count = res?; + let buf_str = str::from_utf8(&buf[..count]).map_err(|_| Error::new(EINVAL))?; + let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; let context = context_lock.read(); - path_canon = context.canonicalize(&buf[..count]); + path_canon = context.canonicalize(buf_str); continue; } else { @@ -196,7 +200,7 @@ pub fn pipe2(fds: &mut [usize], flags: usize) -> Result { } /// chmod syscall -pub fn chmod(path: &[u8], mode: u16) -> Result { +pub fn chmod(path: &str, mode: u16) -> Result { let (path_canon, uid, gid, scheme_ns) = { let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; @@ -204,7 +208,7 @@ pub fn chmod(path: &[u8], mode: u16) -> Result { (context.canonicalize(path), context.euid, context.egid, context.ens) }; - let mut parts = path_canon.splitn(2, |&b| b == b':'); + let mut parts = path_canon.splitn(2, ':'); let scheme_name_opt = parts.next(); let reference_opt = parts.next(); @@ -214,11 +218,11 @@ pub fn chmod(path: &[u8], mode: u16) -> Result { let (_scheme_id, scheme) = schemes.get_name(scheme_ns, scheme_name).ok_or(Error::new(ENODEV))?; Arc::clone(&scheme) }; - scheme.chmod(reference_opt.unwrap_or(b""), mode, uid, gid) + scheme.chmod(reference_opt.unwrap_or(""), mode, uid, gid) } /// rmdir syscall -pub fn rmdir(path: &[u8]) -> Result { +pub fn rmdir(path: &str) -> Result { let (path_canon, uid, gid, scheme_ns) = { let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; @@ -226,7 +230,7 @@ pub fn rmdir(path: &[u8]) -> Result { (context.canonicalize(path), context.euid, context.egid, context.ens) }; - let mut parts = path_canon.splitn(2, |&b| b == b':'); + let mut parts = path_canon.splitn(2, ':'); let scheme_name_opt = parts.next(); let reference_opt = parts.next(); @@ -236,11 +240,11 @@ pub fn rmdir(path: &[u8]) -> Result { let (_scheme_id, scheme) = schemes.get_name(scheme_ns, scheme_name).ok_or(Error::new(ENODEV))?; Arc::clone(&scheme) }; - scheme.rmdir(reference_opt.unwrap_or(b""), uid, gid) + scheme.rmdir(reference_opt.unwrap_or(""), uid, gid) } /// Unlink syscall -pub fn unlink(path: &[u8]) -> Result { +pub fn unlink(path: &str) -> Result { let (path_canon, uid, gid, scheme_ns) = { let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; @@ -248,7 +252,7 @@ pub fn unlink(path: &[u8]) -> Result { (context.canonicalize(path), context.euid, context.egid, context.ens) }; - let mut parts = path_canon.splitn(2, |&b| b == b':'); + let mut parts = path_canon.splitn(2, ':'); let scheme_name_opt = parts.next(); let reference_opt = parts.next(); @@ -258,7 +262,7 @@ pub fn unlink(path: &[u8]) -> Result { let (_scheme_id, scheme) = schemes.get_name(scheme_ns, scheme_name).ok_or(Error::new(ENODEV))?; Arc::clone(&scheme) }; - scheme.unlink(reference_opt.unwrap_or(b""), uid, gid) + scheme.unlink(reference_opt.unwrap_or(""), uid, gid) } /// Close syscall @@ -409,7 +413,7 @@ pub fn fcntl(fd: FileHandle, cmd: usize, arg: usize) -> Result { } } -pub fn frename(fd: FileHandle, path: &[u8]) -> Result { +pub fn frename(fd: FileHandle, path: &str) -> Result { let file = { let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::new(ESRCH))?; @@ -424,7 +428,7 @@ pub fn frename(fd: FileHandle, path: &[u8]) -> Result { (context.canonicalize(path), context.euid, context.egid, context.ens) }; - let mut parts = path_canon.splitn(2, |&b| b == b':'); + let mut parts = path_canon.splitn(2, ':'); let scheme_name_opt = parts.next(); let reference_opt = parts.next(); @@ -438,7 +442,7 @@ pub fn frename(fd: FileHandle, path: &[u8]) -> Result { let description = file.description.read(); if scheme_id == description.scheme { - scheme.frename(description.number, reference_opt.unwrap_or(b""), uid, gid) + scheme.frename(description.number, reference_opt.unwrap_or(""), uid, gid) } else { Err(Error::new(EXDEV)) } diff --git a/src/syscall/mod.rs b/src/syscall/mod.rs index 9f8139b..2287e25 100644 --- a/src/syscall/mod.rs +++ b/src/syscall/mod.rs @@ -75,7 +75,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u SYS_DUP2 => dup2(fd, FileHandle::from(c), validate_slice(d as *const u8, e)?).map(FileHandle::into), SYS_FCNTL => fcntl(fd, c, d), SYS_FEXEC => fexec(fd, validate_slice(c as *const [usize; 2], d)?, validate_slice(e as *const [usize; 2], f)?), - SYS_FRENAME => frename(fd, validate_slice(c as *const u8, d)?), + SYS_FRENAME => frename(fd, validate_str(c as *const u8, d)?), SYS_FUNMAP => funmap(b, c), SYS_FMAP_OLD => { { @@ -100,10 +100,10 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u } }, SYS_CLASS_PATH => match a { - SYS_OPEN => open(validate_slice(b as *const u8, c)?, d).map(FileHandle::into), - SYS_CHMOD => chmod(validate_slice(b as *const u8, c)?, d as u16), - SYS_RMDIR => rmdir(validate_slice(b as *const u8, c)?), - SYS_UNLINK => unlink(validate_slice(b as *const u8, c)?), + SYS_OPEN => open(validate_str(b as *const u8, c)?, d).map(FileHandle::into), + SYS_CHMOD => chmod(validate_str(b as *const u8, c)?, d as u16), + SYS_RMDIR => rmdir(validate_str(b as *const u8, c)?), + SYS_UNLINK => unlink(validate_str(b as *const u8, c)?), _ => Err(Error::new(ENOSYS)) }, _ => match a { @@ -145,7 +145,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u SYS_EXIT => exit((b & 0xFF) << 8), SYS_KILL => kill(ContextId::from(b), c), SYS_WAITPID => waitpid(ContextId::from(b), c, WaitFlags::from_bits_truncate(d)).map(ContextId::into), - SYS_CHDIR => chdir(validate_slice(b as *const u8, c)?), + SYS_CHDIR => chdir(validate_str(b as *const u8, c)?), SYS_IOPL => iopl(b, stack), SYS_GETCWD => getcwd(validate_slice_mut(b as *mut u8, c)?), SYS_GETEGID => getegid(), diff --git a/src/syscall/privilege.rs b/src/syscall/privilege.rs index 2ad14d5..bd07278 100644 --- a/src/syscall/privilege.rs +++ b/src/syscall/privilege.rs @@ -3,7 +3,7 @@ use alloc::vec::Vec; use crate::context; use crate::scheme::{self, SchemeNamespace}; use crate::syscall::error::*; -use crate::syscall::validate::validate_slice; +use crate::syscall::validate::validate_str; pub fn getegid() -> Result { let contexts = context::contexts(); @@ -50,7 +50,7 @@ pub fn getuid() -> Result { pub fn mkns(name_ptrs: &[[usize; 2]]) -> Result { let mut names = Vec::new(); for name_ptr in name_ptrs { - names.push(validate_slice(name_ptr[0] as *const u8, name_ptr[1])?); + names.push(validate_str(name_ptr[0] as *const u8, name_ptr[1])?); } let (uid, from) = { diff --git a/src/syscall/process.rs b/src/syscall/process.rs index d91d049..3710001 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -7,7 +7,7 @@ use alloc::{ }; use core::alloc::{GlobalAlloc, Layout}; use core::ops::DerefMut; -use core::{intrinsics, mem}; +use core::{intrinsics, mem, str}; use spin::RwLock; use crate::context::file::FileDescriptor; @@ -1030,9 +1030,11 @@ pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]> } interp.truncate(i); - println!(" interpreter: {:?}", ::core::str::from_utf8(&interp)); + let interp_str = str::from_utf8(&interp).map_err(|_| Error::new(EINVAL))?; - let interp_fd = super::fs::open(&interp, super::flag::O_RDONLY | super::flag::O_CLOEXEC)?; + println!(" interpreter: {}", interp_str); + + let interp_fd = super::fs::open(interp_str, super::flag::O_RDONLY | super::flag::O_CLOEXEC)?; let mut args_vec = Vec::from(args); //TODO: pass file handle in auxv diff --git a/src/syscall/validate.rs b/src/syscall/validate.rs index 5806a53..346dcd0 100644 --- a/src/syscall/validate.rs +++ b/src/syscall/validate.rs @@ -1,4 +1,4 @@ -use core::{mem, slice}; +use core::{mem, slice, str}; use crate::paging::{ActivePageTable, PageTableType, Page, VirtualAddress, VirtualAddressType}; use crate::paging::entry::EntryFlags; @@ -51,3 +51,10 @@ pub fn validate_slice_mut(ptr: *mut T, len: usize) -> Result<&'static mut [T] Ok(unsafe { slice::from_raw_parts_mut(ptr, len) }) } } + +/// Convert a pointer and length to str, if valid +//TODO: Mark unsafe +pub fn validate_str(ptr: *const u8, len: usize) -> Result<&'static str> { + let slice = validate_slice(ptr, len)?; + str::from_utf8(slice).map_err(|_| Error::new(EINVAL)) +} diff --git a/syscall b/syscall index bfdd0bb..87e913d 160000 --- a/syscall +++ b/syscall @@ -1 +1 @@ -Subproject commit bfdd0bb8d64c078192e44e83719a8aa4b3bf85a1 +Subproject commit 87e913d991810374688f3284154d9216ffa8f73e From 37e6951501213d2aae2570a1486e500b27b0bbc1 Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Wed, 28 Apr 2021 20:59:52 -0600 Subject: [PATCH 50/55] Print CPU and PID when exception occurs --- src/arch/aarch64/interrupt/exception.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/arch/aarch64/interrupt/exception.rs b/src/arch/aarch64/interrupt/exception.rs index 01d0ef3..2995389 100644 --- a/src/arch/aarch64/interrupt/exception.rs +++ b/src/arch/aarch64/interrupt/exception.rs @@ -1,4 +1,6 @@ use crate::{ + context, + cpu_id, interrupt::stack_trace, syscall, syscall::flag::*, @@ -31,6 +33,7 @@ exception_stack!(synchronous_exception_at_el0, |stack| { println!("FATAL: Not an SVC induced synchronous exception"); stack.dump(); stack_trace(); + println!("CPU {}, PID {:?}", cpu_id(), context::context_id()); loop {} } From 73c77d756ddf5ed17417a7a0d6084d19785d3939 Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Wed, 28 Apr 2021 21:15:21 -0600 Subject: [PATCH 51/55] Print context name in exception --- src/arch/aarch64/interrupt/exception.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/arch/aarch64/interrupt/exception.rs b/src/arch/aarch64/interrupt/exception.rs index 2995389..b481591 100644 --- a/src/arch/aarch64/interrupt/exception.rs +++ b/src/arch/aarch64/interrupt/exception.rs @@ -33,7 +33,19 @@ exception_stack!(synchronous_exception_at_el0, |stack| { println!("FATAL: Not an SVC induced synchronous exception"); stack.dump(); stack_trace(); + println!("CPU {}, PID {:?}", cpu_id(), context::context_id()); + + // This could deadlock, but at this point we are going to halt anyways + { + let contexts = context::contexts(); + if let Some(context_lock) = contexts.current() { + let context = context_lock.read(); + println!("NAME: {}", *context.name.read()); + } + } + + // Halt loop {} } From af17eeec3ab2ebb6ae48d50253fb38a2352240fc Mon Sep 17 00:00:00 2001 From: 4lDO2 <4lDO2@protonmail.com> Date: Sat, 20 Feb 2021 17:14:39 +0100 Subject: [PATCH 52/55] Give schemes a dangling address for empty slices. This allows schemes to avoid checking the length against zero before constructing a slice from pointer+len that the kernel gave. Additionally, the address is now non-canonical on x86, meaning that userspace will fail instead of continuing with UB, if they would ever forget to check the length. --- src/scheme/user.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/scheme/user.rs b/src/scheme/user.rs index 4dab187..1372ae2 100644 --- a/src/scheme/user.rs +++ b/src/scheme/user.rs @@ -131,7 +131,18 @@ impl UserInner { // TODO: More abstractions over grant creation! if size == 0 { - return Ok(VirtualAddress::new(0)); + // NOTE: Rather than returning NULL, we return a dummy dangling address, that is also + // non-canonical on x86. This means that scheme handlers do not need to check the + // length before creating a Rust slice (which cannot have NULL as address regardless of + // the length; this actually made nulld think that an empty path was invalid UTF-8 + // because of enum layout optimization), independent of whatever alignment this slice + // will have. Additionally, they would generate a general protection fault immediately + // if they ever tried to access this dangling address. + + // Set the most significant bit. + let dangling: usize = 1 << (core::mem::size_of::() * 8 - 1); + + return Ok(VirtualAddress::new(dangling)); } let context_lock = context_weak.upgrade().ok_or(Error::new(ESRCH))?; From ad39568fe903c64229938f82bbc48d9919ba5e6a Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Sun, 2 May 2021 19:53:31 -0600 Subject: [PATCH 53/55] Fix unwrapping stack when there are kernel addresses (and there always are) --- src/arch/aarch64/interrupt/trace.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/arch/aarch64/interrupt/trace.rs b/src/arch/aarch64/interrupt/trace.rs index 37eb101..ca1d472 100644 --- a/src/arch/aarch64/interrupt/trace.rs +++ b/src/arch/aarch64/interrupt/trace.rs @@ -12,18 +12,23 @@ pub unsafe fn stack_trace() { println!("TRACE: {:>016x}", fp); //Maximum 64 frames - let active_table = ActivePageTable::new(PageTableType::User); + let active_ktable = ActivePageTable::new(PageTableType::Kernel); + let active_utable = ActivePageTable::new(PageTableType::User); + let in_kernel_or_user_table = |ptr| { + active_ktable.translate(VirtualAddress::new(ptr)).is_some() || + active_utable.translate(VirtualAddress::new(ptr)).is_some() + }; for _frame in 0..64 { if let Some(pc_fp) = fp.checked_add(mem::size_of::()) { - if active_table.translate(VirtualAddress::new(fp)).is_some() && active_table.translate(VirtualAddress::new(pc_fp)).is_some() { + if in_kernel_or_user_table(fp) && in_kernel_or_user_table(pc_fp) { let pc = *(pc_fp as *const usize); if pc == 0 { println!(" {:>016x}: EMPTY RETURN", fp); break; } - println!(" {:>016x}: {:>016x}", fp, pc); + println!(" FP {:>016x}: PC {:>016x}", fp, pc); fp = *(fp as *const usize); -// symbol_trace(pc); + //TODO symbol_trace(pc); } else { println!(" {:>016x}: GUARD PAGE", fp); break; From dfdb562e6b04daa1906295379e9c88031c910f9f Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Mon, 3 May 2021 20:33:31 -0600 Subject: [PATCH 54/55] Use RMM PhysicalAddress --- src/arch/aarch64/paging/mod.rs | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/src/arch/aarch64/paging/mod.rs b/src/arch/aarch64/paging/mod.rs index 43c8d13..e0e5384 100644 --- a/src/arch/aarch64/paging/mod.rs +++ b/src/arch/aarch64/paging/mod.rs @@ -12,6 +12,8 @@ use self::entry::{EntryFlags, TableDescriptorFlags}; use self::mapper::{Mapper, MapperFlushAll, MapperType}; use self::temporary_page::TemporaryPage; +pub use rmm::PhysicalAddress; + pub mod entry; pub mod mapper; pub mod table; @@ -374,20 +376,6 @@ impl InactivePageTable { } } -/// A physical address. -#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] -pub struct PhysicalAddress(usize); - -impl PhysicalAddress { - pub fn new(address: usize) -> Self { - PhysicalAddress(address) - } - - pub fn data(&self) -> usize { - self.0 - } -} - /// A virtual address. #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct VirtualAddress(usize); From 17c261553b3bba705cd5ec7625f5bde9bacbc2dd Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Mon, 3 May 2021 20:43:18 -0600 Subject: [PATCH 55/55] Fixes for building x86_64 --- src/arch/x86_64/consts.rs | 3 +++ src/arch/x86_64/interrupt/trace.rs | 4 +-- src/arch/x86_64/paging/mod.rs | 43 ++++++++++++++++++++++++++---- src/arch/x86_64/rmm.rs | 13 ++++----- src/context/arch/x86_64.rs | 4 +-- src/context/list.rs | 1 + src/syscall/process.rs | 18 +++++++++---- 7 files changed, 66 insertions(+), 20 deletions(-) diff --git a/src/arch/x86_64/consts.rs b/src/arch/x86_64/consts.rs index 5409caf..8f46749 100644 --- a/src/arch/x86_64/consts.rs +++ b/src/arch/x86_64/consts.rs @@ -21,6 +21,9 @@ /// Size of kernel heap pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB + /// Offset of temporary mapping for misc kernel bring-up actions + pub const KERNEL_TMP_MISC_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE; + /// Offset to kernel percpu variables //TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE; pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000; diff --git a/src/arch/x86_64/interrupt/trace.rs b/src/arch/x86_64/interrupt/trace.rs index f14d198..4b224d7 100644 --- a/src/arch/x86_64/interrupt/trace.rs +++ b/src/arch/x86_64/interrupt/trace.rs @@ -2,7 +2,7 @@ use core::{mem, str}; use goblin::elf::sym; use rustc_demangle::demangle; -use crate::paging::{ActivePageTable, VirtualAddress}; +use crate::paging::{ActivePageTable, PageTableType, VirtualAddress}; /// Get a stack trace //TODO: Check for stack being mapped before dereferencing @@ -13,7 +13,7 @@ pub unsafe fn stack_trace() { println!("TRACE: {:>016X}", rbp); //Maximum 64 frames - let active_table = ActivePageTable::new(); + let active_table = ActivePageTable::new(PageTableType::User); for _frame in 0..64 { if let Some(rip_rbp) = rbp.checked_add(mem::size_of::()) { if active_table.translate(VirtualAddress::new(rbp)).is_some() && active_table.translate(VirtualAddress::new(rip_rbp)).is_some() { diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs index 1b509fe..cbad3e0 100644 --- a/src/arch/x86_64/paging/mod.rs +++ b/src/arch/x86_64/paging/mod.rs @@ -12,7 +12,7 @@ use self::entry::EntryFlags; use self::mapper::{Mapper, MapperFlushAll}; use self::temporary_page::TemporaryPage; -pub use rmm::{PhysicalAddress, VirtualAddress}; +pub use rmm::PhysicalAddress; pub mod entry; pub mod mapper; @@ -186,7 +186,7 @@ pub unsafe fn init( init_pat(); - let mut active_table = ActivePageTable::new_unlocked(); + let mut active_table = ActivePageTable::new_unlocked(PageTableType::User); let flush_all = map_tss(cpu_id, &mut active_table); flush_all.flush(&mut active_table); @@ -200,7 +200,7 @@ pub unsafe fn init_ap( ) -> usize { init_pat(); - let mut active_table = ActivePageTable::new_unlocked(); + let mut active_table = ActivePageTable::new_unlocked(PageTableType::User); let mut new_table = InactivePageTable::from_address(bsp_table); @@ -227,6 +227,11 @@ pub struct ActivePageTable { locked: bool, } +pub enum PageTableType { + User, + Kernel +} + impl Deref for ActivePageTable { type Target = Mapper; @@ -242,7 +247,7 @@ impl DerefMut for ActivePageTable { } impl ActivePageTable { - pub unsafe fn new() -> ActivePageTable { + pub unsafe fn new(_table_type: PageTableType) -> ActivePageTable { page_table_lock(); ActivePageTable { mapper: Mapper::new(), @@ -250,7 +255,7 @@ impl ActivePageTable { } } - pub unsafe fn new_unlocked() -> ActivePageTable { + pub unsafe fn new_unlocked(_table_type: PageTableType) -> ActivePageTable { ActivePageTable { mapper: Mapper::new(), locked: false, @@ -376,6 +381,34 @@ impl InactivePageTable { } } +/// A virtual address. +#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] +pub struct VirtualAddress(usize); + +#[derive(Debug, PartialEq)] +pub enum VirtualAddressType { + User, + Kernel +} + +impl VirtualAddress { + pub fn new(address: usize) -> Self { + VirtualAddress(address) + } + + pub fn data(&self) -> usize { + self.0 + } + + pub fn get_type(&self) -> VirtualAddressType { + if ((self.0 >> 48) & 0xffff) == 0xffff { + VirtualAddressType::Kernel + } else { + VirtualAddressType::User + } + } +} + /// Page #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct Page { diff --git a/src/arch/x86_64/rmm.rs b/src/arch/x86_64/rmm.rs index 3cc1905..1a5ad28 100644 --- a/src/arch/x86_64/rmm.rs +++ b/src/arch/x86_64/rmm.rs @@ -8,6 +8,7 @@ use rmm::{ FrameCount, FrameUsage, MemoryArea, + PageFlags, PageMapper, PhysicalAddress, VirtualAddress, @@ -27,7 +28,7 @@ extern "C" { static mut __rodata_end: u8; } -unsafe fn page_flags(virt: VirtualAddress) -> usize { +unsafe fn page_flags(virt: VirtualAddress) -> PageFlags { let virt_addr = virt.data(); // Test for being inside a region @@ -40,13 +41,13 @@ unsafe fn page_flags(virt: VirtualAddress) -> usize { if in_section!(text) { // Remap text read-only, execute - 0 + PageFlags::new().write(false).execute(true) } else if in_section!(rodata) { // Remap rodata read-only, no execute - A::ENTRY_FLAG_NO_EXEC + PageFlags::new().write(false).execute(false) } else { - // Remap everything else writable, no execute - A::ENTRY_FLAG_WRITABLE | A::ENTRY_FLAG_NO_EXEC + // Remap everything else read-write, no execute + PageFlags::new().write(true).execute(false) } } @@ -101,7 +102,7 @@ unsafe fn inner(areas: &'static [MemoryArea], kernel_base: usize, kerne //TODO: remove backwards compatible recursive mapping mapper.table().set_entry(511, rmm::PageEntry::new( - mapper.table().phys().data() | A::ENTRY_FLAG_WRITABLE | A::ENTRY_FLAG_PRESENT | A::ENTRY_FLAG_NO_EXEC + mapper.table().phys().data() | A::ENTRY_FLAG_READWRITE | A::ENTRY_FLAG_PRESENT | A::ENTRY_FLAG_NO_EXEC )); println!("Table: {:X}", mapper.table().phys().data()); diff --git a/src/context/arch/x86_64.rs b/src/context/arch/x86_64.rs index 64b0268..8c20ca8 100644 --- a/src/context/arch/x86_64.rs +++ b/src/context/arch/x86_64.rs @@ -54,7 +54,7 @@ impl Context { } } - pub fn get_page_table(&mut self) -> usize { + pub fn get_page_utable(&mut self) -> usize { self.cr3 } @@ -102,7 +102,7 @@ impl Context { self.fx = address; } - pub fn set_page_table(&mut self, address: usize) { + pub fn set_page_utable(&mut self, address: usize) { self.cr3 = address; } diff --git a/src/context/list.rs b/src/context/list.rs index 704642e..68fa204 100644 --- a/src/context/list.rs +++ b/src/context/list.rs @@ -97,6 +97,7 @@ impl ContextList { } context.arch.set_page_utable(unsafe { ActivePageTable::new(PageTableType::User).address() }); + #[cfg(target_arch = "aarch64")] context.arch.set_page_ktable(unsafe { ActivePageTable::new(PageTableType::Kernel).address() }); context.arch.set_fx(fx.as_ptr() as usize); context.arch.set_stack(stack.as_ptr() as usize + offset); diff --git a/src/syscall/process.rs b/src/syscall/process.rs index 3710001..b79bb39 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -363,14 +363,22 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table"); InactivePageTable::new(frame, &mut active_utable, &mut temporary_upage) }; + context.arch.set_page_utable(unsafe { new_utable.address() }); + #[cfg(target_arch = "aarch64")] let mut new_ktable = { - let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table"); - InactivePageTable::new(frame, &mut active_ktable, &mut temporary_kpage) + let mut new_ktable = { + let frame = allocate_frames(1).expect("no more frames in syscall::clone new_table"); + InactivePageTable::new(frame, &mut active_ktable, &mut temporary_kpage) + }; + context.arch.set_page_ktable(unsafe { new_ktable.address() }); + new_ktable }; - context.arch.set_page_utable(unsafe { new_utable.address() }); - context.arch.set_page_ktable(unsafe { new_ktable.address() }); + #[cfg(target_arch = "x86_64")] + let mut new_ktable = unsafe { + InactivePageTable::from_address(new_utable.address()) + }; // Copy kernel image mapping { @@ -531,7 +539,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result { } } } - + // Setup user TLS if let Some(mut tls) = tls_opt { // Copy TLS mapping