Fixes for compiling aarch64

This commit is contained in:
Jeremy Soller
2022-07-29 18:06:53 -06:00
parent 897cd4c9f4
commit e99cbcf5d2
28 changed files with 496 additions and 798 deletions

7
Cargo.lock generated
View File

@@ -75,6 +75,7 @@ dependencies = [
"linked_list_allocator 0.9.1",
"log",
"memoffset",
"paste",
"raw-cpuid",
"redox_syscall",
"rmm",
@@ -131,6 +132,12 @@ dependencies = [
"autocfg",
]
[[package]]
name = "paste"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc"
[[package]]
name = "plain"
version = "0.2.3"

View File

@@ -37,6 +37,7 @@ default-features = false
[target.'cfg(target_arch = "aarch64")'.dependencies]
byteorder = { version = "1", default-features = false }
fdt = { git = "https://gitlab.redox-os.org/thomhuds/fdt.git", default-features = false }
paste = "1.0.7"
[target.'cfg(any(target_arch = "x86", target_arch = "x86_64"))'.dependencies]
raw-cpuid = "10.2.0"

View File

@@ -53,6 +53,9 @@
pub const USER_OFFSET: usize = 0;
pub const USER_PML4: usize = (USER_OFFSET & PML4_MASK)/PML4_SIZE;
/// End offset of the user image, i.e. kernel start
pub const USER_END_OFFSET: usize = 256 * PML4_SIZE;
/// Offset to user TCB
pub const USER_TCB_OFFSET: usize = 0xB000_0000;

View File

@@ -4,7 +4,7 @@ use crate::device::cpu::registers::{control_regs};
pub mod registers;
bitfield! {
bitfield::bitfield! {
pub struct MachineId(u32);
get_implementer, _: 31, 24;
get_variant, _: 23, 20;

View File

@@ -1,5 +1,7 @@
//! Functions to read and write control registers.
use core::arch::asm;
bitflags! {
pub struct MairEl1: u64 {
const DEVICE_MEMORY = 0x00;
@@ -10,76 +12,76 @@ bitflags! {
pub unsafe fn ttbr0_el1() -> u64 {
let ret: u64;
llvm_asm!("mrs $0, ttbr0_el1" : "=r" (ret));
asm!("mrs {}, ttbr0_el1", out(reg) ret);
ret
}
pub unsafe fn ttbr0_el1_write(val: u64) {
llvm_asm!("msr ttbr0_el1, $0" :: "r" (val) : "memory");
asm!("msr ttbr0_el1, {}", in(reg) val);
}
pub unsafe fn ttbr1_el1() -> u64 {
let ret: u64;
llvm_asm!("mrs $0, ttbr1_el1" : "=r" (ret));
asm!("mrs {}, ttbr1_el1", out(reg) ret);
ret
}
pub unsafe fn ttbr1_el1_write(val: u64) {
llvm_asm!("msr ttbr1_el1, $0" :: "r" (val) : "memory");
asm!("msr ttbr1_el1, {}", in(reg) val);
}
pub unsafe fn mair_el1() -> MairEl1 {
let ret: u64;
llvm_asm!("mrs $0, mair_el1" : "=r" (ret));
asm!("mrs {}, mair_el1", out(reg) ret);
MairEl1::from_bits_truncate(ret)
}
pub unsafe fn mair_el1_write(val: MairEl1) {
llvm_asm!("msr mair_el1, $0" :: "r" (val.bits()) : "memory");
asm!("msr mair_el1, {}", in(reg) val.bits());
}
pub unsafe fn tpidr_el0_write(val: u64) {
llvm_asm!("msr tpidr_el0, $0" :: "r" (val) : "memory");
asm!("msr tpidr_el0, {}", in(reg) val);
}
pub unsafe fn tpidr_el1_write(val: u64) {
llvm_asm!("msr tpidr_el1, $0" :: "r" (val) : "memory");
asm!("msr tpidr_el1, {}", in(reg) val);
}
pub unsafe fn esr_el1() -> u32 {
let ret: u32;
llvm_asm!("mrs $0, esr_el1" : "=r" (ret));
asm!("mrs {}, esr_el1", out(reg) ret);
ret
}
pub unsafe fn cntfreq_el0() -> u32 {
let ret: u32;
llvm_asm!("mrs $0, cntfrq_el0" : "=r" (ret));
asm!("mrs {}, cntfrq_el0", out(reg) ret);
ret
}
pub unsafe fn tmr_ctrl() -> u32 {
let ret: u32;
llvm_asm!("mrs $0, cntp_ctl_el0" : "=r" (ret));
asm!("mrs {}, cntp_ctl_el0", out(reg) ret);
ret
}
pub unsafe fn tmr_ctrl_write(val: u32) {
llvm_asm!("msr cntp_ctl_el0, $0" :: "r" (val) : "memory");
asm!("msr cntp_ctl_el0, {}", in(reg) val);
}
pub unsafe fn tmr_tval() -> u32 {
let ret: u32;
llvm_asm!("mrs $0, cntp_tval_el0" : "=r" (ret));
asm!("mrs {}, cntp_tval_el0", out(reg) ret);
ret
}
pub unsafe fn tmr_tval_write(val: u32) {
llvm_asm!("msr cntp_tval_el0, $0" :: "r" (val) : "memory");
asm!("msr cntp_tval_el0, {}", in(reg) val);
}
pub unsafe fn midr() -> u32 {
let ret: u32;
llvm_asm!("mrs $0, midr_el1" : "=r" (ret));
asm!("mrs {}, midr_el1", out(reg) ret);
ret
}

View File

@@ -1,9 +1,11 @@
//! Functions to flush the translation lookaside buffer (TLB).
use core::arch::asm;
pub unsafe fn flush(_addr: usize) {
llvm_asm!("tlbi vmalle1is");
asm!("tlbi vmalle1is");
}
pub unsafe fn flush_all() {
llvm_asm!("tlbi vmalle1is");
asm!("tlbi vmalle1is");
}

View File

@@ -1,7 +1,7 @@
use core::intrinsics::{volatile_load, volatile_store};
use crate::memory::Frame;
use crate::paging::{ActivePageTable, PhysicalAddress, Page, PageFlags, TableKind, VirtualAddress};
use crate::paging::{KernelMapper, PhysicalAddress, Page, PageFlags, TableKind, VirtualAddress};
static GICD_CTLR: u32 = 0x000;
static GICD_TYPER: u32 = 0x004;
@@ -56,14 +56,18 @@ pub struct GicDistIf {
impl GicDistIf {
unsafe fn init(&mut self) {
// Map in the Distributor interface
let mut active_table = ActivePageTable::new(TableKind::Kernel);
let mut mapper = KernelMapper::lock();
let start_frame = Frame::containing_address(PhysicalAddress::new(0x08000000));
let end_frame = Frame::containing_address(PhysicalAddress::new(0x08000000 + 0x10000 - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().data() + crate::KERNEL_DEVMAP_OFFSET));
let result = active_table.map_to(page, frame, PageFlags::new().write(true));
result.flush();
mapper
.get_mut()
.expect("failed to access KernelMapper for mapping GIC distributor")
.map_phys(page.start_address(), frame.start_address(), PageFlags::new().write(true))
.expect("failed to map GIC distributor")
.flush();
}
self.address = crate::KERNEL_DEVMAP_OFFSET + 0x08000000;
@@ -73,8 +77,12 @@ impl GicDistIf {
let end_frame = Frame::containing_address(PhysicalAddress::new(0x08010000 + 0x10000 - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().data() + crate::KERNEL_DEVMAP_OFFSET));
let result = active_table.map_to(page, frame, PageFlags::new().write(true));
result.flush();
mapper
.get_mut()
.expect("failed to access KernelMapper for mapping GIC interface")
.map_phys(page.start_address(), frame.start_address(), PageFlags::new().write(true))
.expect("failed to map GIC interface")
.flush();
}
GIC_CPU_IF.address = crate::KERNEL_DEVMAP_OFFSET + 0x08010000;

View File

@@ -1,5 +1,3 @@
use crate::paging::ActivePageTable;
pub mod cpu;
pub mod gic;
pub mod generic_timer;
@@ -7,7 +5,7 @@ pub mod serial;
pub mod rtc;
pub mod uart_pl011;
pub unsafe fn init(_active_table: &mut ActivePageTable) {
pub unsafe fn init() {
println!("GIC INIT");
gic::init();
println!("GIT INIT");

View File

@@ -1,7 +1,7 @@
use core::intrinsics::{volatile_load, volatile_store};
use crate::memory::Frame;
use crate::paging::{ActivePageTable, PhysicalAddress, Page, PageFlags, TableKind, VirtualAddress};
use crate::paging::{KernelMapper, PhysicalAddress, Page, PageFlags, TableKind, VirtualAddress};
use crate::time;
static RTC_DR: u32 = 0x000;
@@ -28,15 +28,19 @@ struct Pl031rtc {
impl Pl031rtc {
unsafe fn init(&mut self) {
let mut active_table = ActivePageTable::new(TableKind::Kernel);
let mut mapper = KernelMapper::lock();
let start_frame = Frame::containing_address(PhysicalAddress::new(0x09010000));
let end_frame = Frame::containing_address(PhysicalAddress::new(0x09010000 + 0x1000 - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().data() + crate::KERNEL_DEVMAP_OFFSET));
let result = active_table.map_to(page, frame, PageFlags::new().write(true));
result.flush();
mapper
.get_mut()
.expect("failed to access KernelMapper for mapping RTC")
.map_phys(page.start_address(), frame.start_address(), PageFlags::new().write(true))
.expect("failed to map RTC")
.flush();
}
self.address = crate::KERNEL_DEVMAP_OFFSET + 0x09010000;

View File

@@ -5,7 +5,7 @@ use crate::device::uart_pl011::SerialPort;
use crate::init::device_tree;
use crate::memory::Frame;
use crate::paging::mapper::PageFlushAll;
use crate::paging::{ActivePageTable, Page, PageFlags, PhysicalAddress, TableKind, VirtualAddress};
use crate::paging::{KernelMapper, Page, PageFlags, PhysicalAddress, TableKind, VirtualAddress};
pub static COM1: Mutex<Option<SerialPort>> = Mutex::new(None);

View File

@@ -23,7 +23,7 @@ fn root_cell_sz(dt: &fdt::DeviceTree) -> Option<(u32, u32)> {
fn memory_ranges(dt: &fdt::DeviceTree, address_cells: usize, size_cells: usize, ranges: &mut [(usize, usize); 10]) -> usize {
let memory_node = dt.find_node("/memory").unwrap();
let (memory_node, _memory_cells) = dt.find_node("/memory").unwrap();
let reg = memory_node.properties().find(|p| p.name.contains("reg")).unwrap();
let chunk_sz = (address_cells + size_cells) * 4;
let chunk_count = (reg.data.len() / chunk_sz);
@@ -51,7 +51,7 @@ pub fn diag_uart_range(dtb_base: usize, dtb_size: usize) -> Option<(usize, usize
let data = unsafe { slice::from_raw_parts(dtb_base as *const u8, dtb_size) };
let dt = fdt::DeviceTree::new(data).unwrap();
let chosen_node = dt.find_node("/chosen").unwrap();
let (chosen_node, _chosen_cells) = dt.find_node("/chosen").unwrap();
let stdout_path = chosen_node.properties().find(|p| p.name.contains("stdout-path")).unwrap();
let uart_node_name = core::str::from_utf8(stdout_path.data).unwrap()
.split('/')
@@ -92,7 +92,7 @@ pub fn fill_env_data(dtb_base: usize, dtb_size: usize, env_base: usize) -> usize
let data = unsafe { slice::from_raw_parts(dtb_base as *const u8, dtb_size) };
let dt = fdt::DeviceTree::new(data).unwrap();
let chosen_node = dt.find_node("/chosen").unwrap();
let (chosen_node, _chosen_cells) = dt.find_node("/chosen").unwrap();
if let Some(bootargs) = chosen_node.properties().find(|p| p.name.contains("bootargs")) {
let bootargs_len = bootargs.data.len();

View File

@@ -1,3 +1,5 @@
use core::arch::asm;
use crate::{
context,
cpu_id,
@@ -25,9 +27,6 @@ exception_stack!(synchronous_exception_at_el1_with_spx, |stack| {
exception_stack!(synchronous_exception_at_el0, |stack| {
with_exception_stack!(|stack| {
let fp;
asm!("mov {}, fp", out(reg) fp);
let exception_code = (stack.iret.esr_el1 & (0x3f << 26)) >> 26;
if exception_code != 0b010101 {
println!("FATAL: Not an SVC induced synchronous exception");
@@ -50,7 +49,7 @@ exception_stack!(synchronous_exception_at_el0, |stack| {
}
let scratch = &stack.scratch;
syscall::syscall(scratch.x8, scratch.x0, scratch.x1, scratch.x2, scratch.x3, scratch.x4, fp, stack)
syscall::syscall(scratch.x8, scratch.x0, scratch.x1, scratch.x2, scratch.x3, scratch.x4, stack)
});
});

View File

@@ -174,7 +174,7 @@ impl InterruptStack {
#[macro_export]
macro_rules! aarch64_asm {
($($strings:expr,)+) => {
global_asm!(concat!(
core::arch::global_asm!(concat!(
$($strings),+,
));
};

View File

@@ -1,5 +1,7 @@
//! Interrupt instructions
use core::arch::asm;
#[macro_use]
pub mod handler;
@@ -14,13 +16,13 @@ pub use self::trace::stack_trace;
/// Clear interrupts
#[inline(always)]
pub unsafe fn disable() {
llvm_asm!("msr daifset, #2");
asm!("msr daifset, #2");
}
/// Set interrupts
#[inline(always)]
pub unsafe fn enable() {
llvm_asm!("msr daifclr, #2");
asm!("msr daifclr, #2");
}
/// Set interrupts and halt
@@ -28,8 +30,8 @@ pub unsafe fn enable() {
/// Performing enable followed by halt is not guaranteed to be atomic, use this instead!
#[inline(always)]
pub unsafe fn enable_and_halt() {
llvm_asm!("msr daifclr, #2");
llvm_asm!("wfi");
asm!("msr daifclr, #2");
asm!("wfi");
}
/// Set interrupts and nop
@@ -37,21 +39,21 @@ pub unsafe fn enable_and_halt() {
/// Simply enabling interrupts does not gurantee that they will trigger, use this instead!
#[inline(always)]
pub unsafe fn enable_and_nop() {
llvm_asm!("msr daifclr, #2");
llvm_asm!("nop");
asm!("msr daifclr, #2");
asm!("nop");
}
/// Halt instruction
#[inline(always)]
pub unsafe fn halt() {
llvm_asm!("wfi");
asm!("wfi");
}
/// Pause instruction
/// Safe because it is similar to a NOP, and has no memory effects
#[inline(always)]
pub fn pause() {
unsafe { llvm_asm!("nop") };
unsafe { asm!("nop") };
}
pub fn available_irqs_iter(cpu_id: usize) -> impl Iterator<Item = u8> + 'static {

View File

@@ -1,16 +1,18 @@
use core::mem;
use core::{arch::asm, mem};
use goblin::elf::sym;
use crate::paging::{ActivePageTable, TableKind, VirtualAddress};
use crate::paging::{KernelMapper, TableKind, VirtualAddress};
/// Get a stack trace
//TODO: Check for stack being mapped before dereferencing
#[inline(never)]
pub unsafe fn stack_trace() {
let mut fp: usize;
llvm_asm!("" : "={fp}"(fp) : : : "volatile");
asm!("mov {}, fp", out(reg) fp);
println!("TRACE: {:>016x}", fp);
/*TODO: implement using rmm
//Maximum 64 frames
let active_ktable = ActivePageTable::new(TableKind::Kernel);
let active_utable = ActivePageTable::new(TableKind::User);
@@ -37,6 +39,7 @@ pub unsafe fn stack_trace() {
println!(" {:>016x}: fp OVERFLOW", fp);
}
}
*/
}
///
/// Get a symbol

View File

@@ -29,3 +29,5 @@ pub mod stop;
/// Early init support
pub mod init;
pub use ::rmm::AArch64Arch as CurrentRmmArch;

View File

@@ -1,174 +1,23 @@
use core::mem;
use core::ptr::Unique;
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::memory::{allocate_frames, deallocate_frames, Frame};
use super::{ActivePageTable, Page, PAGE_SIZE, PageFlags, PhysicalAddress, TableKind, VirtualAddress};
use super::entry::{EntryFlags, PageDescriptorFlags};
use super::table::{self, Table, Level4};
use super::RmmA;
pub use rmm::{PageFlush, PageFlushAll};
pub use rmm::{Flusher, PageFlush, PageFlushAll};
#[derive(Debug)]
pub struct Mapper {
p4: Unique<Table<Level4>>,
pub table_kind: TableKind
pub struct InactiveFlusher { _inner: () }
impl InactiveFlusher {
// TODO: cpu id
pub fn new() -> Self { Self { _inner: () } }
}
impl Mapper {
/// Create a new page table
pub unsafe fn new(table_kind: TableKind) -> Mapper {
match table_kind {
TableKind::User => Mapper { p4: Unique::new_unchecked(table::U4), table_kind },
TableKind::Kernel => Mapper { p4: Unique::new_unchecked(table::P4), table_kind }
}
}
pub fn p4(&self) -> &Table<Level4> {
unsafe { self.p4.as_ref() }
}
pub fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.as_mut() }
}
/// Map a page to a frame
pub fn map_to(&mut self, page: Page, frame: Frame, flags: PageFlags<RmmA>) -> PageFlush<RmmA> {
let p3 = self.p4_mut().next_table_create(page.p4_index());
let p2 = p3.next_table_create(page.p3_index());
let p1 = p2.next_table_create(page.p2_index());
assert!(p1[page.p1_index()].is_unused(),
"{:X}: Set to {:X}: {:?}, requesting {:X}: {:?}",
page.start_address().data(),
p1[page.p1_index()].address().data(), p1[page.p1_index()].flags(),
frame.start_address().data(), flags);
p1.increment_entry_count();
p1[page.p1_index()].set(frame, flags);
PageFlush::new(page.start_address())
}
/// Map a page to the next free frame
pub fn map(&mut self, page: Page, flags: PageFlags<RmmA>) -> PageFlush<RmmA> {
let frame = allocate_frames(1).expect("out of frames");
self.map_to(page, frame, flags)
}
/// Update flags for a page
pub fn remap(&mut self, page: Page, flags: PageFlags<RmmA>) -> PageFlush<RmmA> {
let p3 = self.p4_mut().next_table_mut(page.p4_index()).expect("failed to remap: no p3");
let p2 = p3.next_table_mut(page.p3_index()).expect("failed to remap: no p2");
let p1 = p2.next_table_mut(page.p2_index()).expect("failed to remap: no p1");
let frame = p1[page.p1_index()].pointed_frame_at_l1().expect("failed to remap: not mapped");
p1[page.p1_index()].set(frame, flags);
PageFlush::new(page.start_address())
}
/// Identity map a frame
pub fn identity_map(&mut self, frame: Frame, flags: PageFlags<RmmA>) -> PageFlush<RmmA> {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().data()));
self.map_to(page, frame, flags)
}
fn unmap_inner(&mut self, page: Page, keep_parents: bool) -> Frame {
let frame;
let p4 = self.p4_mut();
if let Some(p3) = p4.next_table_mut(page.p4_index()) {
if let Some(p2) = p3.next_table_mut(page.p3_index()) {
if let Some(p1) = p2.next_table_mut(page.p2_index()) {
frame = if let Some(frame) = p1[page.p1_index()].pointed_frame_at_l1() {
frame
} else {
panic!("unmap_inner({:X}): frame not found", page.start_address().data())
};
p1.decrement_entry_count();
p1[page.p1_index()].set_unused();
if keep_parents || ! p1.is_unused() {
return frame;
}
} else {
panic!("unmap_inner({:X}): p1 not found", page.start_address().data());
}
if let Some(p1_frame) = p2[page.p2_index()].pointed_frame() {
//println!("unmap_inner: Free p1 {:?}", p1_frame);
p2.decrement_entry_count();
p2[page.p2_index()].set_unused();
deallocate_frames(p1_frame, 1);
} else {
panic!("unmap_inner({:X}): p1_frame not found", page.start_address().data());
}
if ! p2.is_unused() {
return frame;
}
} else {
panic!("unmap_inner({:X}): p2 not found", page.start_address().data());
}
if let Some(p2_frame) = p3[page.p3_index()].pointed_frame() {
//println!("unmap_inner: Free p2 {:?}", p2_frame);
p3.decrement_entry_count();
p3[page.p3_index()].set_unused();
deallocate_frames(p2_frame, 1);
} else {
panic!("unmap_inner({:X}): p2_frame not found", page.start_address().data());
}
if ! p3.is_unused() {
return frame;
}
} else {
panic!("unmap_inner({:X}): p3 not found", page.start_address().data());
}
if let Some(p3_frame) = p4[page.p4_index()].pointed_frame() {
//println!("unmap_inner: Free p3 {:?}", p3_frame);
p4.decrement_entry_count();
p4[page.p4_index()].set_unused();
deallocate_frames(p3_frame, 1);
} else {
panic!("unmap_inner({:X}): p3_frame not found", page.start_address().data());
}
frame
}
/// Unmap a page
pub fn unmap(&mut self, page: Page) -> PageFlush<RmmA> {
let frame = self.unmap_inner(page, false);
deallocate_frames(frame, 1);
PageFlush::new(page.start_address())
}
/// Unmap a page, return frame without free
pub fn unmap_return(&mut self, page: Page, keep_parents: bool) -> (PageFlush<RmmA>, Frame) {
let frame = self.unmap_inner(page, keep_parents);
(PageFlush::new(page.start_address()), frame)
}
pub fn translate_page(&self, page: Page) -> Option<Frame> {
self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
}
pub fn translate_page_flags(&self, page: Page) -> Option<PageFlags<RmmA>> {
self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| Some(p1[page.p1_index()].flags()))
}
/// Translate a virtual address to a physical one
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address.data() % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| PhysicalAddress::new(frame.start_address().data() + offset))
impl Flusher<RmmA> for InactiveFlusher {
fn consume(&mut self, flush: PageFlush<RmmA>) {
// TODO: Push to TLB "mailbox" or tell it to reload CR3 if there are too many entries.
unsafe { flush.ignore(); }
}
}
impl Drop for InactiveFlusher {
fn drop(&mut self) {
ipi(IpiKind::Tlb, IpiTarget::Other);
}
}

View File

@@ -2,65 +2,33 @@
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::{mem, ptr};
use core::ops::{Deref, DerefMut};
use spin::Mutex;
use crate::device::cpu::registers::{control_regs, tlb};
use crate::memory::{allocate_frames, Frame};
use self::mapper::{Mapper, PageFlushAll};
use self::temporary_page::TemporaryPage;
use self::entry::EntryFlags;
use self::mapper::PageFlushAll;
pub use rmm::{
AArch64Arch as RmmA,
Arch as RmmArch,
Flusher,
PageFlags,
PhysicalAddress,
TableKind,
VirtualAddress,
};
pub type PageMapper = rmm::PageMapper<RmmA, crate::arch::rmm::LockedAllocator>;
pub use crate::rmm::KernelMapper;
pub mod entry;
pub mod mapper;
pub mod table;
pub mod temporary_page;
/// Number of entries per page table
pub const ENTRY_COUNT: usize = 512;
pub const ENTRY_COUNT: usize = RmmA::PAGE_ENTRIES;
/// Size of pages
pub const PAGE_SIZE: usize = 4096;
//TODO: This is a rudimentary recursive mutex used to naively fix multi_core issues, replace it!
pub struct PageTableLock {
cpu_id: usize,
count: usize,
}
pub static PAGE_TABLE_LOCK: Mutex<PageTableLock> = Mutex::new(PageTableLock {
cpu_id: 0,
count: 0,
});
fn page_table_lock() {
let cpu_id = crate::cpu_id();
loop {
{
let mut lock = PAGE_TABLE_LOCK.lock();
if lock.count == 0 || lock.cpu_id == cpu_id {
lock.cpu_id = cpu_id;
lock.count += 1;
return;
}
}
crate::arch::interrupt::pause();
}
}
fn page_table_unlock() {
let mut lock = PAGE_TABLE_LOCK.lock();
lock.count -= 1;
}
pub const PAGE_SIZE: usize = RmmA::PAGE_SIZE;
/// Setup Memory Access Indirection Register
unsafe fn init_mair() {
@@ -73,8 +41,8 @@ unsafe fn init_mair() {
control_regs::mair_el1_write(val);
}
/// Map TSS
unsafe fn map_tss(cpu_id: usize) {
/// Map percpu
unsafe fn map_percpu(cpu_id: usize, mapper: &mut PageMapper) -> PageFlushAll<RmmA> {
extern "C" {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
@@ -90,18 +58,18 @@ unsafe fn map_tss(cpu_id: usize) {
let start = crate::KERNEL_PERCPU_OFFSET + crate::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let mut mapper = crate::rmm::mapper_current();
let flush_all = PageFlushAll::new();
let mut flush_all = PageFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
let result = mapper.map(
page.start_address(),
PageFlags::new().write(true)
).expect("Failed to map TSS page");
PageFlags::new().write(true).custom_flag(EntryFlags::GLOBAL.bits(), cfg!(not(feature = "pti"))),
)
.expect("failed to allocate page table frames while mapping percpu");
flush_all.consume(result);
}
flush_all.flush();
flush_all
}
/// Copy tdata, clear tbss, set TCB self pointer
@@ -145,7 +113,7 @@ unsafe fn init_tcb(cpu_id: usize) -> usize {
/// Returns page table and thread control block offset
pub unsafe fn init(
cpu_id: usize,
) -> (ActivePageTable, usize) {
) -> usize {
extern "C" {
/// The starting byte of the text (code) data segment.
static mut __text_start: u8;
@@ -173,206 +141,32 @@ pub unsafe fn init(
static mut __bss_end: u8;
}
println!("INIT MAIR START");
init_mair();
println!("INIT MAIR COMPLETE");
println!("ACTIVE TABLE START");
let active_table = ActivePageTable::new_unlocked(TableKind::Kernel);
println!("ACTIVE TABLE COMPLETE");
let flush_all = map_percpu(cpu_id, KernelMapper::lock_manually(cpu_id).get_mut().expect("expected KernelMapper not to be locked re-entrant in paging::init"));
flush_all.flush();
println!("MAP TSS START");
map_tss(cpu_id);
println!("MAP TSS COMPLETE");
println!("INIT TCB START");
let tcb = init_tcb(cpu_id);
println!("INIT_TCB COMPLETE");
return (active_table, tcb);
return init_tcb(cpu_id);
}
pub unsafe fn init_ap(
cpu_id: usize,
bsp_table: usize,
bsp_table: &mut KernelMapper,
) -> usize {
init_mair();
let mut active_table = ActivePageTable::new_unlocked(TableKind::Kernel);
{
let flush_all = map_percpu(cpu_id, bsp_table.get_mut().expect("KernelMapper locked re-entrant for AP"));
let mut new_table = InactivePageTable::from_address(bsp_table);
// The flush can be ignored as this is not the active table. See later make_current().
flush_all.ignore();
};
// This switches the active table, which is setup by the bootloader, to a correct table
// setup by the lambda above. This will also flush the TLB
active_table.switch(new_table);
map_tss(cpu_id);
bsp_table.make_current();
init_tcb(cpu_id)
}
#[derive(Debug)]
pub struct ActivePageTable {
mapper: Mapper,
locked: bool,
}
impl Deref for ActivePageTable {
type Target = Mapper;
fn deref(&self) -> &Mapper {
&self.mapper
}
}
impl DerefMut for ActivePageTable {
fn deref_mut(&mut self) -> &mut Mapper {
&mut self.mapper
}
}
impl ActivePageTable {
pub unsafe fn new(table_kind: TableKind) -> ActivePageTable {
page_table_lock();
ActivePageTable {
mapper: Mapper::new(table_kind),
locked: true,
}
}
pub unsafe fn new_unlocked(table_kind: TableKind) -> ActivePageTable {
ActivePageTable {
mapper: Mapper::new(table_kind),
locked: false,
}
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
let old_table: InactivePageTable;
match self.mapper.table_kind {
TableKind::User => {
old_table = InactivePageTable { frame: Frame::containing_address(PhysicalAddress::new(unsafe { control_regs::ttbr0_el1() } as usize)) };
unsafe { control_regs::ttbr0_el1_write(new_table.frame.start_address().data() as u64) };
},
TableKind::Kernel => {
old_table = InactivePageTable { frame: Frame::containing_address(PhysicalAddress::new(unsafe { control_regs::ttbr1_el1() } as usize)) };
unsafe { control_regs::ttbr1_el1_write(new_table.frame.start_address().data() as u64) };
}
}
unsafe { tlb::flush_all() };
old_table
}
pub fn flush(&mut self, page: Page) {
unsafe {
tlb::flush(page.start_address().data());
}
}
pub fn flush_all(&mut self) {
unsafe {
tlb::flush_all();
}
}
pub fn with<F>(&mut self, table: &mut InactivePageTable, temporary_page: &mut TemporaryPage, f: F)
where F: FnOnce(&mut Mapper)
{
{
let backup = Frame::containing_address(PhysicalAddress::new(unsafe {
match self.mapper.table_kind {
TableKind::User => control_regs::ttbr0_el1() as usize,
TableKind::Kernel => control_regs::ttbr1_el1() as usize,
}
}));
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(
backup.clone(),
PageFlags::new_table().write(true), //TODO: RISC-V will not like this
self,
);
// overwrite recursive mapping
self.p4_mut()[crate::RECURSIVE_PAGE_PML4].set(
table.frame.clone(),
PageFlags::new_table().write(true), //TODO: RISC-V will not like this
);
self.flush_all();
// execute f in the new context
f(self);
// restore recursive mapping to original p4 table
p4_table[crate::RECURSIVE_PAGE_PML4].set(
backup,
PageFlags::new_table().write(true), //TODO: RISC-V will not like this
);
self.flush_all();
}
temporary_page.unmap(self);
}
pub unsafe fn address(&self) -> usize {
match self.mapper.table_kind {
TableKind::User => control_regs::ttbr0_el1() as usize,
TableKind::Kernel => control_regs::ttbr1_el1() as usize,
}
}
}
impl Drop for ActivePageTable {
fn drop(&mut self) {
if self.locked {
page_table_unlock();
self.locked = false;
}
}
}
pub struct InactivePageTable {
frame: Frame,
}
impl InactivePageTable {
pub fn new(
frame: Frame,
active_table: &mut ActivePageTable,
temporary_page: &mut TemporaryPage,
) -> InactivePageTable {
{
let table = temporary_page.map_table_frame(
frame.clone(),
PageFlags::new_table().write(true), //TODO: RISC-V will not like this
active_table,
);
// now we are able to zero the table
table.zero();
// set up recursive mapping for the table
table[crate::RECURSIVE_PAGE_PML4].set(
frame.clone(),
PageFlags::new_table().write(true), //TODO: RISC-V will not like this
);
}
temporary_page.unmap(active_table);
InactivePageTable { frame: frame }
}
pub unsafe fn from_address(address: usize) -> InactivePageTable {
InactivePageTable {
frame: Frame::containing_address(PhysicalAddress::new(address)),
}
}
pub unsafe fn address(&self) -> usize {
self.frame.start_address().data()
}
}
/// Page
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Page {
@@ -408,13 +202,19 @@ impl Page {
}
}
pub fn range_inclusive(start: Page, end: Page) -> PageIter {
pub fn range_inclusive(start: Page, r#final: Page) -> PageIter {
PageIter { start, end: r#final.next() }
}
pub fn range_exclusive(start: Page, end: Page) -> PageIter {
PageIter { start, end }
}
pub fn next(self) -> Page {
self.next_by(1)
}
pub fn next_by(self, n: usize) -> Page {
Self {
number: self.number + 1,
number: self.number + n,
}
}
}
@@ -428,7 +228,7 @@ impl Iterator for PageIter {
type Item = Page;
fn next(&mut self) -> Option<Page> {
if self.start <= self.end {
if self.start < self.end {
let page = self.start;
self.start = self.start.next();
Some(page)
@@ -437,3 +237,12 @@ impl Iterator for PageIter {
}
}
}
/// Round down to the nearest multiple of page size
pub fn round_down_pages(number: usize) -> usize {
number - number % PAGE_SIZE
}
/// Round up to the nearest multiple of page size
pub fn round_up_pages(number: usize) -> usize {
round_down_pages(number + PAGE_SIZE - 1)
}

View File

@@ -1,160 +0,0 @@
//! # Page table
//! Code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
use crate::memory::allocate_frames;
use super::entry::{TableDescriptorFlags, Entry};
use super::ENTRY_COUNT;
pub const P4: *mut Table<Level4> = 0xffff_ffff_ffff_f000 as *mut _;
pub const U4: *mut Table<Level4> = 0x0000_ffff_ffff_f000 as *mut _;
const KSPACE_ADDR_MASK: usize = 0xffff_0000_0000_0000;
const USPACE_ADDR_MASK: usize = 0x0000_ffff_ffff_ffff;
pub trait TableLevel {}
pub enum Level4 {}
pub enum Level3 {}
pub enum Level2 {}
pub enum Level1 {}
impl TableLevel for Level4 {}
impl TableLevel for Level3 {}
impl TableLevel for Level2 {}
impl TableLevel for Level1 {}
pub trait HierarchicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl HierarchicalLevel for Level4 {
type NextLevel = Level3;
}
impl HierarchicalLevel for Level3 {
type NextLevel = Level2;
}
impl HierarchicalLevel for Level2 {
type NextLevel = Level1;
}
pub struct Table<L: TableLevel> {
entries: [Entry; ENTRY_COUNT],
level: PhantomData<L>,
}
impl<L> Table<L> where L: TableLevel {
pub fn is_unused(&self) -> bool {
if self.entry_count() > 0 {
return false;
}
true
}
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
entry.set_zero();
}
}
/// Set number of entries in first table entry
/// FIXMES:
/// Only 1 bit per table entry seems to work. So we need 9 entries (!).
/// This is one reason why we need to have a non-recursive paging scheme.
/// These updates require memory barriers and TLB invalidations.
fn set_entry_count(&mut self, count: u64) {
debug_assert!(count <= ENTRY_COUNT as u64, "count can't be greater than ENTRY_COUNT");
self.entries[0].set_counter_bits((count >> 0) & 0x1);
self.entries[1].set_counter_bits((count >> 1) & 0x1);
self.entries[2].set_counter_bits((count >> 2) & 0x1);
self.entries[3].set_counter_bits((count >> 3) & 0x1);
self.entries[4].set_counter_bits((count >> 4) & 0x1);
self.entries[5].set_counter_bits((count >> 5) & 0x1);
self.entries[6].set_counter_bits((count >> 6) & 0x1);
self.entries[7].set_counter_bits((count >> 7) & 0x1);
self.entries[8].set_counter_bits((count >> 8) & 0x1);
}
/// Get number of entries from first table entry
fn entry_count(&self) -> u64 {
let mut count: u64 = (self.entries[0].counter_bits() & 0x1) << 0;
count |= (self.entries[1].counter_bits() & 0x1) << 1;
count |= (self.entries[2].counter_bits() & 0x1) << 2;
count |= (self.entries[3].counter_bits() & 0x1) << 3;
count |= (self.entries[4].counter_bits() & 0x1) << 4;
count |= (self.entries[5].counter_bits() & 0x1) << 5;
count |= (self.entries[6].counter_bits() & 0x1) << 6;
count |= (self.entries[7].counter_bits() & 0x1) << 7;
count |= (self.entries[8].counter_bits() & 0x1) << 8;
count
}
pub fn increment_entry_count(&mut self) {
let current_count = self.entry_count();
self.set_entry_count(current_count + 1);
}
pub fn decrement_entry_count(&mut self) {
let current_count = self.entry_count();
self.set_entry_count(current_count - 1);
}
}
impl<L> Table<L> where L: HierarchicalLevel {
pub fn next_table(&self, index: usize) -> Option<&Table<L::NextLevel>> {
self.next_table_address(index).map(|address| unsafe { &*(address as *const _) })
}
pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table<L::NextLevel>> {
self.next_table_address(index).map(|address| unsafe { &mut *(address as *mut _) })
}
pub fn next_table_create(&mut self, index: usize) -> &mut Table<L::NextLevel> {
if self.next_table(index).is_none() {
let frame = allocate_frames(1).expect("no frames available");
self.increment_entry_count();
/* Allow users to go down the page table, implement permissions at the page level */
let mut perms = TableDescriptorFlags::VALID;
perms |= TableDescriptorFlags::TABLE;
self[index].page_table_entry_set(frame, perms);
self.next_table_mut(index).unwrap().zero();
}
self.next_table_mut(index).unwrap()
}
fn next_table_address(&self, index: usize) -> Option<usize> {
let entry_flags = self[index].page_table_entry_flags();
if entry_flags.contains(TableDescriptorFlags::VALID) {
let table_address = self as *const _ as usize;
if (table_address & KSPACE_ADDR_MASK) != 0 {
Some((table_address << 9) | (index << 12))
} else {
Some(((table_address << 9) | (index << 12)) & USPACE_ADDR_MASK)
}
} else {
None
}
}
}
impl<L> Index<usize> for Table<L> where L: TableLevel {
type Output = Entry;
fn index(&self, index: usize) -> &Entry {
&self.entries[index]
}
}
impl<L> IndexMut<usize> for Table<L> where L: TableLevel {
fn index_mut(&mut self, index: usize) -> &mut Entry {
&mut self.entries[index]
}
}

View File

@@ -1,42 +0,0 @@
//! Temporarily map a page
//! From [Phil Opp's Blog](http://os.phil-opp.com/remap-the-kernel.html)
use crate::memory::Frame;
use super::{ActivePageTable, Page, PageFlags, RmmA, VirtualAddress};
use super::table::{Table, Level1};
pub struct TemporaryPage {
page: Page,
}
impl TemporaryPage {
pub fn new(page: Page) -> TemporaryPage {
TemporaryPage { page }
}
pub fn start_address (&self) -> VirtualAddress {
self.page.start_address()
}
/// Maps the temporary page to the given frame in the active table.
/// Returns the start address of the temporary page.
pub fn map(&mut self, frame: Frame, flags: PageFlags<RmmA>, active_table: &mut ActivePageTable) -> VirtualAddress {
assert!(active_table.translate_page(self.page).is_none(), "temporary page is already mapped");
let result = active_table.map_to(self.page, frame, flags);
result.flush();
self.page.start_address()
}
/// Maps the temporary page to the given page table frame in the active
/// table. Returns a reference to the now mapped table.
pub fn map_table_frame(&mut self, frame: Frame, flags: PageFlags<RmmA>, active_table: &mut ActivePageTable) -> &mut Table<Level1> {
unsafe { &mut *(self.map(frame, flags, active_table).data() as *mut Table<Level1>) }
}
/// Unmaps the temporary page in the active table.
pub fn unmap(&mut self, active_table: &mut ActivePageTable) {
let (result, _frame) = active_table.unmap_return(self.page, true);
result.flush();
}
}

View File

@@ -1,3 +1,9 @@
use core::{
cmp,
mem,
slice,
sync::atomic::{self, AtomicUsize, Ordering},
};
use rmm::{
KILOBYTE,
MEGABYTE,
@@ -11,12 +17,11 @@ use rmm::{
MemoryArea,
PageFlags,
PageMapper,
PageTable,
PhysicalAddress,
VirtualAddress,
};
use spin::Mutex;
use spin::{Mutex, MutexGuard};
extern "C" {
/// The starting byte of the text (code) data segment.
@@ -29,6 +34,25 @@ extern "C" {
static mut __rodata_end: u8;
}
// Keep synced with OsMemoryKind in bootloader
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[repr(u64)]
pub enum BootloaderMemoryKind {
Null = 0,
Free = 1,
Reclaim = 2,
Reserved = 3,
}
// Keep synced with OsMemoryEntry in bootloader
#[derive(Clone, Copy, Debug)]
#[repr(packed)]
pub struct BootloaderMemoryEntry {
pub base: u64,
pub size: u64,
pub kind: BootloaderMemoryKind,
}
unsafe fn page_flags<A: Arch>(virt: VirtualAddress) -> PageFlags<A> {
let virt_addr = virt.data();
@@ -52,68 +76,27 @@ unsafe fn page_flags<A: Arch>(virt: VirtualAddress) -> PageFlags<A> {
}
}
unsafe fn dump_tables<A: Arch>(table: PageTable<A>) {
let level = table.level();
for i in 0..A::PAGE_ENTRIES {
if let Some(entry) = table.entry(i) {
if entry.present() {
let base = table.entry_base(i).unwrap();
let address = entry.address();
let flags = entry.flags();
for level in level..A::PAGE_LEVELS {
print!(" ");
}
println!(
"{}: map 0x{:X} to 0x{:X} flags 0x{:X}",
i,
base.data(),
address.data(),
flags
);
// This somewhat handles block entries
if flags & (1 << 1) != 0 {
if let Some(next) = table.next(i) {
for level in level..A::PAGE_LEVELS {
print!(" ");
}
println!("{{");
dump_tables(next);
for level in level..A::PAGE_LEVELS {
print!(" ");
}
println!("}}");
}
}
}
}
}
}
unsafe fn inner<A: Arch>(areas: &'static [MemoryArea], kernel_base: usize, kernel_size_aligned: usize, bump_offset: usize) -> BuddyAllocator<A> {
unsafe fn inner<A: Arch>(
areas: &'static [MemoryArea],
kernel_base: usize, kernel_size_aligned: usize,
stack_base: usize, stack_size_aligned: usize,
env_base: usize, env_size_aligned: usize,
acpi_base: usize, acpi_size_aligned: usize,
initfs_base: usize, initfs_size_aligned: usize,
) -> BuddyAllocator<A> {
// First, calculate how much memory we have
let mut size = 0;
for area in areas.iter() {
if area.size > 0 {
println!("{:X?}", area);
log::debug!("{:X?}", area);
size += area.size;
}
}
println!("Memory: {} MB", (size + (MEGABYTE - 1)) / MEGABYTE);
log::info!("Memory: {} MB", (size + (MEGABYTE - 1)) / MEGABYTE);
// Create a basic allocator for the first pages
let mut bump_allocator = BumpAllocator::<A>::new(areas, bump_offset);
{
let mut mapper = PageMapper::<A, _>::current(
&mut bump_allocator
);
println!("Old Table: {:X}", mapper.table().phys().data());
//dump_tables(mapper.table());
}
let mut bump_allocator = BumpAllocator::<A>::new(areas, 0);
{
let mut mapper = PageMapper::<A, _>::create(
@@ -135,7 +118,7 @@ unsafe fn inner<A: Arch>(areas: &'static [MemoryArea], kernel_base: usize, kerne
}
}
//TODO: this is a hack to add the aarch64 kernel mapping
// Map kernel at KERNEL_OFFSET and identity map too
for i in 0..kernel_size_aligned / A::PAGE_SIZE {
let phys = PhysicalAddress::new(kernel_base + i * A::PAGE_SIZE);
let virt = VirtualAddress::new(crate::KERNEL_OFFSET + i * A::PAGE_SIZE);
@@ -146,8 +129,38 @@ unsafe fn inner<A: Arch>(areas: &'static [MemoryArea], kernel_base: usize, kerne
flags
).expect("failed to map frame");
flush.ignore(); // Not the active table
let virt = A::phys_to_virt(phys);
let flush = mapper.map_phys(
virt,
phys,
flags
).expect("failed to map frame");
flush.ignore(); // Not the active table
}
let mut identity_map = |base, size_aligned| {
// Map stack with identity mapping
for i in 0..size / A::PAGE_SIZE {
let phys = PhysicalAddress::new(base + i * A::PAGE_SIZE);
let virt = A::phys_to_virt(phys);
let flags = page_flags::<A>(virt);
let flush = mapper.map_phys(
virt,
phys,
flags
).expect("failed to map frame");
flush.ignore(); // Not the active table
}
};
identity_map(stack_base, stack_size_aligned);
identity_map(env_base, env_size_aligned);
identity_map(acpi_base, acpi_size_aligned);
identity_map(initfs_base, initfs_size_aligned);
//TODO: this is another hack to map our UART
match crate::device::serial::COM1.lock().as_ref().map(|x| x.base()) {
Some(serial_base) => {
@@ -161,13 +174,14 @@ unsafe fn inner<A: Arch>(areas: &'static [MemoryArea], kernel_base: usize, kerne
None => (),
}
//TODO: remove backwards compatible recursive mapping
mapper.table().set_entry(511, rmm::PageEntry::new(
mapper.table().phys().data() | A::ENTRY_FLAG_READWRITE | A::ENTRY_FLAG_DEFAULT_TABLE
));
println!("New Table: {:X}", mapper.table().phys().data());
//dump_tables(mapper.table());
log::debug!("Table: {:X}", mapper.table().phys().data());
for i in 0..512 {
if let Some(entry) = mapper.table().entry(i) {
if entry.present() {
log::debug!("{}: {:X}", i, entry.data());
}
}
}
// Use the new table
mapper.make_current();
@@ -175,26 +189,20 @@ unsafe fn inner<A: Arch>(areas: &'static [MemoryArea], kernel_base: usize, kerne
// Create the physical memory map
let offset = bump_allocator.offset();
println!("Permanently used: {} KB", (offset + (KILOBYTE - 1)) / KILOBYTE);
log::info!("Permanently used: {} KB", (offset + (KILOBYTE - 1)) / KILOBYTE);
BuddyAllocator::<A>::new(bump_allocator).expect("failed to create BuddyAllocator")
}
pub struct LockedAllocator {
inner: Mutex<Option<BuddyAllocator<RmmA>>>,
}
// There can only be one allocator (at the moment), so making this a ZST is great!
#[derive(Clone, Copy)]
pub struct LockedAllocator;
impl LockedAllocator {
const fn new() -> Self {
Self {
inner: Mutex::new(None)
}
}
}
static INNER_ALLOCATOR: Mutex<Option<BuddyAllocator<RmmA>>> = Mutex::new(None);
impl FrameAllocator for LockedAllocator {
unsafe fn allocate(&mut self, count: FrameCount) -> Option<PhysicalAddress> {
if let Some(ref mut allocator) = *self.inner.lock() {
if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock() {
allocator.allocate(count)
} else {
None
@@ -202,59 +210,153 @@ impl FrameAllocator for LockedAllocator {
}
unsafe fn free(&mut self, address: PhysicalAddress, count: FrameCount) {
if let Some(ref mut allocator) = *self.inner.lock() {
if let Some(ref mut allocator) = *INNER_ALLOCATOR.lock() {
allocator.free(address, count)
}
}
unsafe fn usage(&self) -> FrameUsage {
if let Some(ref allocator) = *self.inner.lock() {
if let Some(ref allocator) = *INNER_ALLOCATOR.lock() {
allocator.usage()
} else {
FrameUsage::new(FrameCount::new(0), FrameCount::new(0))
}
}
}
impl core::fmt::Debug for LockedAllocator {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match INNER_ALLOCATOR.try_lock().as_deref() {
Some(Some(alloc)) => write!(f, "[locked allocator: {:?}]", unsafe { alloc.usage() }),
Some(None) => write!(f, "[uninitialized lock allocator]"),
None => write!(f, "[failed to lock]"),
}
}
}
static mut AREAS: [MemoryArea; 512] = [MemoryArea {
base: PhysicalAddress::new(0),
size: 0,
}; 512];
pub static mut FRAME_ALLOCATOR: LockedAllocator = LockedAllocator::new();
pub static FRAME_ALLOCATOR: LockedAllocator = LockedAllocator;
pub unsafe fn mapper_new(table_addr: PhysicalAddress) -> PageMapper<'static, RmmA, LockedAllocator> {
PageMapper::new(table_addr, &mut FRAME_ALLOCATOR)
const NO_PROCESSOR: usize = !0;
static LOCK_OWNER: AtomicUsize = AtomicUsize::new(NO_PROCESSOR);
static LOCK_COUNT: AtomicUsize = AtomicUsize::new(0);
// TODO: Support, perhaps via const generics, embedding address checking in PageMapper, thereby
// statically enforcing that the kernel mapper can only map things in the kernel half, and vice
// versa.
/// A guard to the global lock protecting the upper 128 TiB of kernel address space.
///
/// NOTE: Use this with great care! Since heap allocations may also require this lock when the heap
/// needs to be expended, it must not be held while memory allocations are done!
// TODO: Make the lock finer-grained so that e.g. the heap part can be independent from e.g.
// PHYS_PML4?
pub struct KernelMapper {
mapper: crate::paging::PageMapper,
ro: bool,
}
impl KernelMapper {
fn lock_inner(current_processor: usize) -> bool {
loop {
match LOCK_OWNER.compare_exchange_weak(NO_PROCESSOR, current_processor, Ordering::Acquire, Ordering::Relaxed) {
Ok(_) => break,
// already owned by this hardware thread
Err(id) if id == current_processor => break,
// either CAS failed, or some other hardware thread holds the lock
Err(_) => core::hint::spin_loop(),
}
}
let prev_count = LOCK_COUNT.fetch_add(1, Ordering::Relaxed);
atomic::compiler_fence(Ordering::Acquire);
prev_count > 0
}
pub unsafe fn lock_for_manual_mapper(current_processor: usize, mapper: crate::paging::PageMapper) -> Self {
let ro = Self::lock_inner(current_processor);
Self {
mapper,
ro,
}
}
pub fn lock_manually(current_processor: usize) -> Self {
unsafe { Self::lock_for_manual_mapper(current_processor, PageMapper::new(RmmA::table(), FRAME_ALLOCATOR)) }
}
pub fn lock() -> Self {
Self::lock_manually(crate::cpu_id())
}
pub fn get_mut(&mut self) -> Option<&mut crate::paging::PageMapper> {
if self.ro {
None
} else {
Some(&mut self.mapper)
}
}
}
impl core::ops::Deref for KernelMapper {
type Target = crate::paging::PageMapper;
fn deref(&self) -> &Self::Target {
&self.mapper
}
}
impl Drop for KernelMapper {
fn drop(&mut self) {
if LOCK_COUNT.fetch_sub(1, Ordering::Relaxed) == 1 {
LOCK_OWNER.store(NO_PROCESSOR, Ordering::Release);
}
atomic::compiler_fence(Ordering::Release);
}
}
//TODO: global paging lock?
pub unsafe fn mapper_create() -> Option<PageMapper<'static, RmmA, LockedAllocator>> {
PageMapper::create(&mut FRAME_ALLOCATOR)
}
pub unsafe fn mapper_current() -> PageMapper<'static, RmmA, LockedAllocator> {
PageMapper::current(&mut FRAME_ALLOCATOR)
}
pub unsafe fn init(kernel_base: usize, kernel_size: usize) {
pub unsafe fn init(
kernel_base: usize, kernel_size: usize,
stack_base: usize, stack_size: usize,
env_base: usize, env_size: usize,
acpi_base: usize, acpi_size: usize,
areas_base: usize, areas_size: usize,
initfs_base: usize, initfs_size: usize,
) {
type A = RmmA;
let real_base = 0;
let real_size = 0x100000;
let real_end = real_base + real_size;
let kernel_size_aligned = ((kernel_size + (A::PAGE_SIZE - 1))/A::PAGE_SIZE) * A::PAGE_SIZE;
let kernel_end = kernel_base + kernel_size_aligned;
println!("kernel_end: {:X}", kernel_end);
let stack_size_aligned = ((stack_size + (A::PAGE_SIZE - 1))/A::PAGE_SIZE) * A::PAGE_SIZE;
let stack_end = stack_base + stack_size_aligned;
let env_size_aligned = ((env_size + (A::PAGE_SIZE - 1))/A::PAGE_SIZE) * A::PAGE_SIZE;
let env_end = env_base + env_size_aligned;
let acpi_size_aligned = ((acpi_size + (A::PAGE_SIZE - 1))/A::PAGE_SIZE) * A::PAGE_SIZE;
let acpi_end = acpi_base + acpi_size_aligned;
let initfs_size_aligned = ((initfs_size + (A::PAGE_SIZE - 1))/A::PAGE_SIZE) * A::PAGE_SIZE;
let initfs_end = initfs_base + initfs_size_aligned;
let bootloader_areas = slice::from_raw_parts(
areas_base as *const BootloaderMemoryEntry,
areas_size / mem::size_of::<BootloaderMemoryEntry>()
);
// Copy memory map from bootloader location, and page align it
let mut area_i = 0;
let mut bump_offset = 0;
for i in 0..512 {
let old = &crate::init::device_tree::MEMORY_MAP[i];
if old._type != 1 {
for bootloader_area in bootloader_areas.iter() {
if bootloader_area.kind != BootloaderMemoryKind::Free {
// Not a free area
continue;
}
let mut base = old.base_addr as usize;
let mut size = old.length as usize;
let mut base = bootloader_area.base as usize;
let mut size = bootloader_area.size as usize;
log::debug!("{:X}:{:X}", base, size);
// Page align base
let base_offset = (A::PAGE_SIZE - (base & A::PAGE_OFFSET_MASK)) & A::PAGE_OFFSET_MASK;
@@ -267,26 +369,69 @@ pub unsafe fn init(kernel_base: usize, kernel_size: usize) {
// Page align size
size &= !A::PAGE_OFFSET_MASK;
log::debug!(" => {:X}:{:X}", base, size);
let mut new_base = base;
// Ensure real-mode areas are not used
if base < real_end && base + size > real_base {
log::warn!("{:X}:{:X} overlaps with real mode {:X}:{:X}", base, size, real_base, real_size);
new_base = cmp::max(new_base, real_end);
}
// Ensure kernel areas are not used
if base < kernel_end && base + size > kernel_base {
log::warn!("{:X}:{:X} overlaps with kernel {:X}:{:X}", base, size, kernel_base, kernel_size);
new_base = cmp::max(new_base, kernel_end);
}
// Ensure stack areas are not used
if base < stack_end && base + size > stack_base {
log::warn!("{:X}:{:X} overlaps with stack {:X}:{:X}", base, size, stack_base, stack_size);
new_base = cmp::max(new_base, stack_end);
}
// Ensure env areas are not used
if base < env_end && base + size > env_base {
log::warn!("{:X}:{:X} overlaps with env {:X}:{:X}", base, size, env_base, env_size);
new_base = cmp::max(new_base, env_end);
}
// Ensure acpi areas are not used
if base < acpi_end && base + size > acpi_base {
log::warn!("{:X}:{:X} overlaps with acpi {:X}:{:X}", base, size, acpi_base, acpi_size);
new_base = cmp::max(new_base, acpi_end);
}
if base < initfs_end && base + size > initfs_base {
log::warn!("{:X}:{:X} overlaps with initfs {:X}:{:X}", base, size, initfs_base, initfs_size);
new_base = cmp::max(new_base, initfs_end);
}
if new_base != base {
let end = base + size;
let new_size = end.checked_sub(new_base).unwrap_or(0);
log::info!("{:X}:{:X} moved to {:X}:{:X}", base, size, new_base, new_size);
base = new_base;
size = new_size;
}
if size == 0 {
// Area is zero sized
continue;
}
if base + size < kernel_end {
// Area is below static kernel data
bump_offset += size;
} else if base < kernel_end {
// Area contains static kernel data
bump_offset += kernel_end - base;
}
AREAS[area_i].base = PhysicalAddress::new(base);
AREAS[area_i].size = size;
area_i += 1;
}
println!("bump_offset: {:X}", bump_offset);
let allocator = inner::<A>(&AREAS, kernel_base, kernel_size_aligned, bump_offset);
*FRAME_ALLOCATOR.inner.lock() = Some(allocator);
let allocator = inner::<A>(
&AREAS,
kernel_base, kernel_size_aligned,
stack_base, stack_size_aligned,
env_base, env_size_aligned,
acpi_base, acpi_size_aligned,
initfs_base, initfs_size_aligned,
);
*INNER_ALLOCATOR.lock() = Some(allocator);
}

View File

@@ -7,14 +7,16 @@ use core::slice;
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use crate::memory::{Frame};
use crate::paging::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress};
use crate::paging::{Page, PAGE_SIZE, PhysicalAddress, VirtualAddress};
use crate::allocator;
use crate::device;
#[cfg(feature = "graphical_debug")]
use crate::devices::graphical_debug;
use crate::init::device_tree;
use crate::interrupt;
use crate::log::{self, info};
use crate::paging;
use crate::paging::{self, KernelMapper};
/// Test of zero values in BSS.
static BSS_TEST_ZERO: usize = 0;
@@ -35,20 +37,29 @@ static BSP_READY: AtomicBool = AtomicBool::new(false);
#[repr(packed)]
pub struct KernelArgs {
kernel_base: u64,
kernel_size: u64,
stack_base: u64,
stack_size: u64,
env_base: u64,
env_size: u64,
dtb_base: u64,
dtb_size: u64,
kernel_base: usize,
kernel_size: usize,
stack_base: usize,
stack_size: usize,
env_base: usize,
env_size: usize,
dtb_base: usize,
dtb_size: usize,
areas_base: usize,
areas_size: usize,
/// The physical base 64-bit pointer to the contiguous bootstrap/initfs.
bootstrap_base: usize,
/// Size of contiguous bootstrap/initfs physical region, not necessarily page aligned.
bootstrap_size: usize,
/// Entry point the kernel will jump to.
bootstrap_entry: usize,
}
/// The entry to Rust, all things must be initialized
#[no_mangle]
pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
let env = {
let bootstrap = {
let args = &*args_ptr;
let kernel_base = args.kernel_base as usize;
@@ -72,6 +83,13 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
// Try to find serial port prior to logging
device::serial::init_early(crate::KERNEL_DEVMAP_OFFSET + dtb_base, dtb_size);
// Convert env to slice
let env = slice::from_raw_parts((args.env_base + crate::PHYS_OFFSET) as *const u8, args.env_size);
// Set up graphical debug
#[cfg(feature = "graphical_debug")]
graphical_debug::init(env);
// Initialize logger
log::init_logger(|r| {
use core::fmt::Write;
@@ -85,10 +103,13 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
});
info!("Redox OS starting...");
info!("Kernel: {:X}:{:X}", kernel_base, kernel_base + kernel_size);
info!("Stack: {:X}:{:X}", stack_base, stack_base + stack_size);
info!("Env: {:X}:{:X}", env_base, env_base + env_size);
info!("DTB: {:X}:{:X}", dtb_base, dtb_base + dtb_size);
info!("Kernel: {:X}:{:X}", args.kernel_base, args.kernel_base + args.kernel_size);
info!("Stack: {:X}:{:X}", args.stack_base, args.stack_base + args.stack_size);
info!("Env: {:X}:{:X}", args.env_base, args.env_base + args.env_size);
info!("RSDPs: {:X}:{:X}", args.dtb_base, args.dtb_base + args.dtb_size);
info!("Areas: {:X}:{:X}", args.areas_base, args.areas_base + args.areas_size);
info!("Bootstrap: {:X}:{:X}", args.bootstrap_base, args.bootstrap_base + args.bootstrap_size);
info!("Bootstrap entry point: {:X}", args.bootstrap_entry);
println!("FILL MEMORY MAP START");
device_tree::fill_memory_map(crate::KERNEL_DEVMAP_OFFSET + dtb_base, dtb_size);
@@ -100,12 +121,19 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
// Initialize RMM
println!("RMM INIT START");
crate::arch::rmm::init(kernel_base, kernel_size + stack_size);
crate::arch::rmm::init(
args.kernel_base, args.kernel_size,
args.stack_base, args.stack_size,
args.env_base, args.env_size,
args.dtb_base, args.dtb_size,
args.areas_base, args.areas_size,
args.bootstrap_base, args.bootstrap_size,
);
println!("RMM INIT COMPLETE");
// Initialize paging
println!("PAGING INIT START");
let (mut active_table, _tcb_offset) = paging::init(0);
let tcb_offset = paging::init(0);
println!("PAGING INIT COMPLETE");
// Test tdata and tbss
@@ -125,7 +153,7 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
// Setup kernel heap
println!("ALLOCATOR INIT START");
allocator::init(&mut active_table);
allocator::init();
println!("ALLOCATOR INIT COMPLETE");
// Activate memory logging
@@ -135,7 +163,7 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
// Initialize devices
println!("DEVICE INIT START");
device::init(&mut active_table);
device::init();
println!("DEVICE INIT COMPLETE");
// Initialize all of the non-core devices not otherwise needed to complete initialization
@@ -145,11 +173,16 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
BSP_READY.store(true, Ordering::SeqCst);
slice::from_raw_parts(env_base as *const u8, env_size)
crate::Bootstrap {
base: crate::memory::Frame::containing_address(crate::paging::PhysicalAddress::new(args.bootstrap_base)),
page_count: args.bootstrap_size / crate::memory::PAGE_SIZE,
entry: args.bootstrap_entry,
env,
}
};
println!("KMAIN");
crate::kmain(CPU_COUNT.load(Ordering::SeqCst), env);
crate::kmain(CPU_COUNT.load(Ordering::SeqCst), bootstrap);
}
#[repr(packed)]
@@ -166,7 +199,15 @@ pub unsafe extern fn kstart_ap(args_ptr: *const KernelArgsAp) -> ! {
}
#[naked]
pub unsafe fn usermode(ip: usize, sp: usize, arg: usize, _singlestep: u32) -> ! {
pub unsafe fn usermode(ip: usize, sp: usize, arg: usize, _singlestep: usize) -> ! {
core::arch::asm!(
"
1:
b 1f
",
options(noreturn)
);
/*TODO: update to asm
let cpu_id: usize = 0;
let spsr: u32 = 0;
@@ -178,4 +219,5 @@ pub unsafe fn usermode(ip: usize, sp: usize, arg: usize, _singlestep: u32) -> !
llvm_asm!("eret" : : : : "volatile");
unreachable!();
*/
}

View File

@@ -1,10 +1,12 @@
use core::arch::asm;
#[no_mangle]
pub unsafe extern fn kreset() -> ! {
println!("kreset");
let val: u32 = 0x8400_0009;
llvm_asm!("mov x0, $0" : : "r"(val) : : "volatile");
llvm_asm!("hvc #0" : : : : "volatile");
asm!("mov x0, {}", in(reg) val);
asm!("hvc #0");
unreachable!();
}
@@ -14,8 +16,8 @@ pub unsafe extern fn kstop() -> ! {
println!("kstop");
let val: u32 = 0x8400_0008;
llvm_asm!("mov x0, $0" : : "r"(val) : : "volatile");
llvm_asm!("hvc #0" : : : : "volatile");
asm!("mov x0, {}", in(reg) val);
asm!("hvc #0");
unreachable!();
}

View File

@@ -1,5 +1,6 @@
use core::mem;
use core::sync::atomic::{AtomicBool, Ordering};
use spin::Once;
use crate::device::cpu::registers::{control_regs, tlb};
use crate::syscall::FloatRegisters;
@@ -10,6 +11,10 @@ use crate::syscall::FloatRegisters;
/// This must be done, as no locks can be held on the stack during switch
pub static CONTEXT_SWITCH_LOCK: AtomicBool = AtomicBool::new(false);
//TODO: find out ideal size
pub const KFX_SIZE: usize = 512;
pub const KFX_ALIGN: usize = 16;
#[derive(Clone, Debug)]
pub struct Context {
elr_el1: usize,
@@ -222,10 +227,17 @@ impl Context {
}
}
#[cold]
#[inline(never)]
#[naked]
pub unsafe extern "C" fn switch_to(prev: &mut Context, next: &mut Context) {
pub static EMPTY_CR3: Once<rmm::PhysicalAddress> = Once::new();
// SAFETY: EMPTY_CR3 must be initialized.
pub unsafe fn empty_cr3() -> rmm::PhysicalAddress {
debug_assert!(EMPTY_CR3.poll().is_some());
*EMPTY_CR3.get_unchecked()
}
pub unsafe fn switch_to(prev: &mut super::Context, next: &mut super::Context) {
todo!("Context::switch_to");
/*TODO: update to use asm!
let mut float_regs = &mut *(prev.fx_address as *mut FloatRegisters);
asm!(
"stp q0, q1, [{0}, #16 * 0]",
@@ -378,6 +390,7 @@ pub unsafe extern "C" fn switch_to(prev: &mut Context, next: &mut Context) {
// Jump to switch hook
asm!("b {switch_hook}", switch_hook = sym crate::context::switch_finish_hook);
*/
}
#[allow(dead_code)]
@@ -419,6 +432,14 @@ pub struct SignalHandlerStack {
#[naked]
unsafe extern fn signal_handler_wrapper() {
core::arch::asm!(
"
1:
b 1f
",
options(noreturn)
);
/*TODO: convert to asm!
#[inline(never)]
unsafe fn inner(stack: &SignalHandlerStack) {
(stack.handler)(stack.sig);
@@ -498,4 +519,5 @@ unsafe extern fn signal_handler_wrapper() {
: : : : "volatile");
llvm_asm!("mov x30, $0" : : "r"(final_lr) : "memory" : "volatile");
*/
}

View File

@@ -1,6 +1,8 @@
use core::convert::TryInto;
use crate::syscall::io::{Io, Mmio, Pio, ReadOnly};
use crate::syscall::io::{Io, Mmio, ReadOnly};
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use crate::syscall::io::Pio;
bitflags! {
/// Interrupt enable flags
@@ -42,6 +44,7 @@ pub struct SerialPort<T: Io> {
modem_sts: ReadOnly<T>,
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl SerialPort<Pio<u8>> {
pub const fn new(base: u16) -> SerialPort<Pio<u8>> {
SerialPort {

View File

@@ -46,7 +46,6 @@
#![feature(arbitrary_self_types)]
#![feature(array_chunks)]
#![feature(asm_const, asm_sym)] // TODO: Relax requirements of most asm invocations
#![cfg_attr(target_arch = "aarch64", feature(llvm_asm))] // TODO: Rewrite using asm!
#![feature(bool_to_option)]
#![feature(concat_idents)]
#![feature(const_btree_new)]

View File

@@ -602,7 +602,6 @@ pub unsafe fn usermode_bootstrap(bootstrap: &Bootstrap) -> ! {
addr_space.grants.insert(grant);
}
#[cfg(target_arch = "x86_64")]
// Start in a minimal environment without any stack.
usermode(bootstrap.entry, 0, 0, 0);
}

View File

@@ -18,11 +18,10 @@
"executables": false,
"relocation-model": "pic",
"disable-redzone": true,
"eliminate-frame-pointer": false,
"frame-pointer": "always",
"exe-suffix": "",
"has-rpath": false,
"no-compiler-rt": true,
"no-default-libraries": true,
"position-independent-executables": false,
"has-elf-tls": true
"tls-model": "global-dynamic"
}