diff --git a/.gitmodules b/.gitmodules index 94e58ad..e9b7384 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,3 +4,7 @@ [submodule "slab_allocator"] path = slab_allocator url = https://gitlab.redox-os.org/redox-os/slab_allocator +[submodule "rmm"] + path = rmm + url = https://gitlab.redox-os.org/redox-os/rmm.git + branch = master diff --git a/Cargo.lock b/Cargo.lock index 34fd4eb..1abd1a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -40,6 +40,7 @@ dependencies = [ "paste 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", "raw-cpuid 8.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.2.1", + "rmm 0.1.0", "rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", "slab_allocator 0.3.1", "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -132,6 +133,10 @@ dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "rmm" +version = "0.1.0" + [[package]] name = "rustc-demangle" version = "0.1.16" diff --git a/Cargo.toml b/Cargo.toml index 9e650f9..6ec8049 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,7 @@ redox_syscall = { path = "syscall" } slab_allocator = { path = "slab_allocator", optional = true } spin = "0.5.2" paste = "0.1.18" +rmm = { path = "rmm", default-features = false } [dependencies.goblin] version = "0.2.1" diff --git a/linkers/x86_64.ld b/linkers/x86_64.ld index e88c9d0..9d1db4e 100644 --- a/linkers/x86_64.ld +++ b/linkers/x86_64.ld @@ -1,7 +1,7 @@ ENTRY(kstart) OUTPUT_FORMAT(elf64-x86-64) -KERNEL_OFFSET = 0xffffff0000100000; +KERNEL_OFFSET = 0xFFFF800000100000; SECTIONS { . = KERNEL_OFFSET; diff --git a/rmm b/rmm new file mode 160000 index 0000000..cdbeecf --- /dev/null +++ b/rmm @@ -0,0 +1 @@ +Subproject commit cdbeecfffedf802a6fd61d93b767ff273c055d80 diff --git a/src/arch/x86_64/graphical_debug/debug.rs b/src/arch/x86_64/graphical_debug/debug.rs index c0cbc3c..98326a6 100644 --- a/src/arch/x86_64/graphical_debug/debug.rs +++ b/src/arch/x86_64/graphical_debug/debug.rs @@ -1,5 +1,3 @@ -use core::fmt; - use super::Display; pub struct DebugDisplay { diff --git a/src/arch/x86_64/graphical_debug/mod.rs b/src/arch/x86_64/graphical_debug/mod.rs index 6ea97ed..4c46d2b 100644 --- a/src/arch/x86_64/graphical_debug/mod.rs +++ b/src/arch/x86_64/graphical_debug/mod.rs @@ -27,30 +27,15 @@ pub fn init(active_table: &mut ActivePageTable) { let physbaseptr; { - let mode_info_addr = 0x5200; - - { - let page = Page::containing_address(VirtualAddress::new(mode_info_addr)); - let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().get())); - let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE); - result.flush(active_table); - } - - { - let mode_info = unsafe { &*(mode_info_addr as *const VBEModeInfo) }; - - width = mode_info.xresolution as usize; - height = mode_info.yresolution as usize; - physbaseptr = mode_info.physbaseptr as usize; - } - - { - let page = Page::containing_address(VirtualAddress::new(mode_info_addr)); - let (result, _frame) = active_table.unmap_return(page, false); - result.flush(active_table); - } + let mode_info_addr = 0x5200 + crate::KERNEL_OFFSET; + let mode_info = unsafe { &*(mode_info_addr as *const VBEModeInfo) }; + width = mode_info.xresolution as usize; + height = mode_info.yresolution as usize; + physbaseptr = mode_info.physbaseptr as usize; } + println!("Framebuffer {}x{} at {:X}", width, height, physbaseptr); + { let size = width * height; diff --git a/src/arch/x86_64/graphical_debug/primitive.rs b/src/arch/x86_64/graphical_debug/primitive.rs index ccf6e8a..cc69a05 100644 --- a/src/arch/x86_64/graphical_debug/primitive.rs +++ b/src/arch/x86_64/graphical_debug/primitive.rs @@ -2,15 +2,13 @@ #[inline(always)] #[cold] pub unsafe fn fast_copy(dst: *mut u8, src: *const u8, len: usize) { - asm!("cld - rep movsb", - in("rdi") (dst as usize), - in("rsi") (src as usize), + asm!("cld; rep movsb", + in("rdi") dst as usize, + in("rsi") src as usize, in("rcx") len, - out("cc") _, - out("rdi") _, - out("rsi") _, - out("rcx") _, + lateout("rdi") _, + lateout("rsi") _, + lateout("rcx") _, ); } @@ -18,14 +16,12 @@ pub unsafe fn fast_copy(dst: *mut u8, src: *const u8, len: usize) { #[inline(always)] #[cold] pub unsafe fn fast_set32(dst: *mut u32, src: u32, len: usize) { - asm!("cld - rep stosd", - in("rdi") (dst as usize), + asm!("cld; rep stosd", + in("rdi") dst as usize, in("eax") src, in("rcx") len, - out("cc") _, - out("rdi") _, - out("rcx") _, + lateout("rdi") _, + lateout("rcx") _, ); } @@ -33,13 +29,11 @@ pub unsafe fn fast_set32(dst: *mut u32, src: u32, len: usize) { #[inline(always)] #[cold] pub unsafe fn fast_set64(dst: *mut u64, src: u64, len: usize) { - asm!("cld - rep stosq" - in("rdi") (dst as usize), + asm!("cld; rep stosq", + in("rdi") dst as usize, in("rax") src, in("rcx") len, - out("cc") _, - out("rdi") _, - out("rcx") _, + lateout("rdi") _, + lateout("rcx") _, ); } diff --git a/src/arch/x86_64/mod.rs b/src/arch/x86_64/mod.rs index 4dbf6f1..1382c6c 100644 --- a/src/arch/x86_64/mod.rs +++ b/src/arch/x86_64/mod.rs @@ -30,6 +30,8 @@ pub mod paging; /// Page table isolation pub mod pti; +pub mod rmm; + /// Initialization and start function pub mod start; diff --git a/src/arch/x86_64/paging/mapper.rs b/src/arch/x86_64/paging/mapper.rs index 24f23e0..5d66b7b 100644 --- a/src/arch/x86_64/paging/mapper.rs +++ b/src/arch/x86_64/paging/mapper.rs @@ -191,11 +191,10 @@ impl Mapper { } if let Some(p3_frame) = p4[page.p4_index()].pointed_frame() { - //TODO: Find out why this breaks user heap //println!("Free p3 {:?}", p3_frame); - //p4.decrement_entry_count(); - //p4[page.p4_index()].set_unused(); - //deallocate_frames(p3_frame, 1); + p4.decrement_entry_count(); + p4[page.p4_index()].set_unused(); + deallocate_frames(p3_frame, 1); } else { panic!("unmap_inner({:X}): p3_frame not found", page.start_address().get()); } diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs index e3cb025..69c779f 100644 --- a/src/arch/x86_64/paging/mod.rs +++ b/src/arch/x86_64/paging/mod.rs @@ -6,10 +6,10 @@ use core::{mem, ptr}; use spin::Mutex; use x86::{controlregs, msr, tlb}; -use crate::memory::{allocate_frames, Frame}; +use crate::memory::Frame; use self::entry::EntryFlags; -use self::mapper::Mapper; +use self::mapper::{Mapper, MapperFlushAll}; use self::temporary_page::TemporaryPage; pub mod entry; @@ -86,6 +86,39 @@ unsafe fn init_pat() { ); } +/// Map TSS +unsafe fn map_tss(cpu_id: usize, mapper: &mut Mapper) -> MapperFlushAll { + extern "C" { + /// The starting byte of the thread data segment + static mut __tdata_start: u8; + /// The ending byte of the thread data segment + static mut __tdata_end: u8; + /// The starting byte of the thread BSS segment + static mut __tbss_start: u8; + /// The ending byte of the thread BSS segment + static mut __tbss_end: u8; + } + + let size = &__tbss_end as *const _ as usize - &__tdata_start as *const _ as usize; + let start = crate::KERNEL_PERCPU_OFFSET + crate::KERNEL_PERCPU_SIZE * cpu_id; + let end = start + size; + + let mut flush_all = MapperFlushAll::new(); + let start_page = Page::containing_address(VirtualAddress::new(start)); + let end_page = Page::containing_address(VirtualAddress::new(end - 1)); + for page in Page::range_inclusive(start_page, end_page) { + let result = mapper.map( + page, + EntryFlags::PRESENT + | EntryFlags::GLOBAL + | EntryFlags::NO_EXECUTE + | EntryFlags::WRITABLE, + ); + flush_all.consume(result); + } + flush_all +} + /// Copy tdata, clear tbss, set TCB self pointer unsafe fn init_tcb(cpu_id: usize) -> usize { extern "C" { @@ -121,11 +154,6 @@ unsafe fn init_tcb(cpu_id: usize) -> usize { /// Returns page table and thread control block offset pub unsafe fn init( cpu_id: usize, - kernel_start: usize, - kernel_end: usize, - stack_start: usize, - stack_end: usize, - other_ro_ranges: &[(usize, usize)], // base + length ) -> (ActivePageTable, usize) { extern "C" { /// The starting byte of the text (code) data segment. @@ -158,156 +186,16 @@ pub unsafe fn init( let mut active_table = ActivePageTable::new_unlocked(); - let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new( - crate::USER_TMP_MISC_OFFSET, - ))); + let flush_all = map_tss(cpu_id, &mut active_table); + flush_all.flush(&mut active_table); - let mut new_table = { - let frame = allocate_frames(1).expect("no more frames in paging::init new_table"); - InactivePageTable::new(frame, &mut active_table, &mut temporary_page) - }; - - active_table.with(&mut new_table, &mut temporary_page, |mapper| { - // Remap stack writable, no execute - { - let start_frame = - Frame::containing_address(PhysicalAddress::new(stack_start - crate::KERNEL_OFFSET)); - let end_frame = Frame::containing_address(PhysicalAddress::new( - stack_end - crate::KERNEL_OFFSET - 1, - )); - for frame in Frame::range_inclusive(start_frame, end_frame) { - let page = Page::containing_address(VirtualAddress::new( - frame.start_address().get() + crate::KERNEL_OFFSET, - )); - let result = mapper.map_to( - page, - frame, - EntryFlags::PRESENT - | EntryFlags::GLOBAL - | EntryFlags::NO_EXECUTE - | EntryFlags::WRITABLE, - ); - // The flush can be ignored as this is not the active table. See later active_table.switch - /* unsafe */ - { - result.ignore(); - } - } - } - - // Map all frames in kernel - { - let start_frame = Frame::containing_address(PhysicalAddress::new(kernel_start)); - let end_frame = Frame::containing_address(PhysicalAddress::new(kernel_end - 1)); - for frame in Frame::range_inclusive(start_frame, end_frame) { - let phys_addr = frame.start_address().get(); - let virt_addr = phys_addr + crate::KERNEL_OFFSET; - - macro_rules! in_section { - ($n: ident) => { - virt_addr >= &concat_idents!(__, $n, _start) as *const u8 as usize - && virt_addr < &concat_idents!(__, $n, _end) as *const u8 as usize - }; - } - - let flags = if in_section!(text) { - // Remap text read-only - EntryFlags::PRESENT | EntryFlags::GLOBAL - } else if in_section!(rodata) { - // Remap rodata read-only, no execute - EntryFlags::PRESENT | EntryFlags::GLOBAL | EntryFlags::NO_EXECUTE - } else if in_section!(data) { - // Remap data writable, no execute - EntryFlags::PRESENT - | EntryFlags::GLOBAL - | EntryFlags::NO_EXECUTE - | EntryFlags::WRITABLE - } else if in_section!(tdata) { - // Remap tdata master read-only, no execute - EntryFlags::PRESENT | EntryFlags::GLOBAL | EntryFlags::NO_EXECUTE - } else if in_section!(bss) { - // Remap bss writable, no execute - EntryFlags::PRESENT - | EntryFlags::GLOBAL - | EntryFlags::NO_EXECUTE - | EntryFlags::WRITABLE - } else { - // Remap anything else read-only, no execute - EntryFlags::PRESENT | EntryFlags::GLOBAL | EntryFlags::NO_EXECUTE - }; - - let page = Page::containing_address(VirtualAddress::new(virt_addr)); - let result = mapper.map_to(page, frame, flags); - // The flush can be ignored as this is not the active table. See later active_table.switch - /* unsafe */ - { - result.ignore(); - } - } - } - - // Map tdata and tbss - { - let size = &__tbss_end as *const _ as usize - &__tdata_start as *const _ as usize; - - let start = crate::KERNEL_PERCPU_OFFSET + crate::KERNEL_PERCPU_SIZE * cpu_id; - let end = start + size; - - let start_page = Page::containing_address(VirtualAddress::new(start)); - let end_page = Page::containing_address(VirtualAddress::new(end - 1)); - for page in Page::range_inclusive(start_page, end_page) { - let result = mapper.map( - page, - EntryFlags::PRESENT - | EntryFlags::GLOBAL - | EntryFlags::NO_EXECUTE - | EntryFlags::WRITABLE, - ); - // The flush can be ignored as this is not the active table. See later active_table.switch - result.ignore(); - } - } - // Map all other necessary address ranges coming from the bootloader. - // The address ranges may overlap, but this is not a problem since they have the same - // flags. - { - for (range_start, range_size) in other_ro_ranges { - let start_phys_addr = Frame::containing_address(PhysicalAddress::new((range_start / 4096) * 4096 - crate::KERNEL_OFFSET)); - let end_phys_addr = Frame::containing_address(PhysicalAddress::new(((range_start + range_size + 4095) / 4096) * 4096 - crate::KERNEL_OFFSET)); - - for frame in Frame::range_inclusive(start_phys_addr, end_phys_addr) { - let page = Page::containing_address(VirtualAddress::new(crate::KERNEL_OFFSET + frame.start_address().get())); - let result = mapper.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::GLOBAL | EntryFlags::NO_EXECUTE); - result.ignore(); - } - } - } - }); - - // This switches the active table, which is setup by the bootloader, to a correct table - // setup by the lambda above. This will also flush the TLB - active_table.switch(new_table); - - (active_table, init_tcb(cpu_id)) + return (active_table, init_tcb(cpu_id)); } pub unsafe fn init_ap( cpu_id: usize, bsp_table: usize, - stack_start: usize, - stack_end: usize, ) -> usize { - extern "C" { - /// The starting byte of the thread data segment - static mut __tdata_start: u8; - /// The ending byte of the thread data segment - static mut __tdata_end: u8; - /// The starting byte of the thread BSS segment - static mut __tbss_start: u8; - /// The ending byte of the thread BSS segment - static mut __tbss_end: u8; - } - init_pat(); let mut active_table = ActivePageTable::new_unlocked(); @@ -319,52 +207,9 @@ pub unsafe fn init_ap( ))); active_table.with(&mut new_table, &mut temporary_page, |mapper| { - // Map tdata and tbss - { - let size = &__tbss_end as *const _ as usize - &__tdata_start as *const _ as usize; - - let start = crate::KERNEL_PERCPU_OFFSET + crate::KERNEL_PERCPU_SIZE * cpu_id; - let end = start + size; - - let start_page = Page::containing_address(VirtualAddress::new(start)); - let end_page = Page::containing_address(VirtualAddress::new(end - 1)); - for page in Page::range_inclusive(start_page, end_page) { - let result = mapper.map( - page, - EntryFlags::PRESENT - | EntryFlags::GLOBAL - | EntryFlags::NO_EXECUTE - | EntryFlags::WRITABLE, - ); - // The flush can be ignored as this is not the active table. See later active_table.switch - result.ignore(); - } - } - - let mut remap = |start: usize, end: usize, flags: EntryFlags| { - if end > start { - let start_frame = Frame::containing_address(PhysicalAddress::new(start)); - let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1)); - for frame in Frame::range_inclusive(start_frame, end_frame) { - let page = Page::containing_address(VirtualAddress::new( - frame.start_address().get() + crate::KERNEL_OFFSET, - )); - let result = mapper.map_to(page, frame, flags); - // The flush can be ignored as this is not the active table. See later active_table.switch - result.ignore(); - } - } - }; - - // Remap stack writable, no execute - remap( - stack_start - crate::KERNEL_OFFSET, - stack_end - crate::KERNEL_OFFSET, - EntryFlags::PRESENT - | EntryFlags::GLOBAL - | EntryFlags::NO_EXECUTE - | EntryFlags::WRITABLE, - ); + let flush_all = map_tss(cpu_id, mapper); + // The flush can be ignored as this is not the active table. See later active_table.switch + flush_all.ignore(); }); // This switches the active table, which is setup by the bootloader, to a correct table diff --git a/src/arch/x86_64/rmm.rs b/src/arch/x86_64/rmm.rs new file mode 100644 index 0000000..8717d08 --- /dev/null +++ b/src/arch/x86_64/rmm.rs @@ -0,0 +1,221 @@ +use rmm::{ + KILOBYTE, + MEGABYTE, + Arch, + BuddyAllocator, + BumpAllocator, + FrameAllocator, + FrameCount, + FrameUsage, + MemoryArea, + PageMapper, + PhysicalAddress, + VirtualAddress, + X8664Arch, +}; + +use spin::Mutex; + +extern "C" { + /// The starting byte of the text (code) data segment. + static mut __text_start: u8; + /// The ending byte of the text (code) data segment. + static mut __text_end: u8; + /// The starting byte of the _.rodata_ (read-only data) segment. + static mut __rodata_start: u8; + /// The ending byte of the _.rodata_ (read-only data) segment. + static mut __rodata_end: u8; +} + +unsafe fn page_flags(virt: VirtualAddress) -> usize { + let virt_addr = virt.data(); + + // Test for being inside a region + macro_rules! in_section { + ($n: ident) => { + virt_addr >= &concat_idents!(__, $n, _start) as *const u8 as usize + && virt_addr < &concat_idents!(__, $n, _end) as *const u8 as usize + }; + } + + if in_section!(text) { + // Remap text read-only, execute + 0 + } else if in_section!(rodata) { + // Remap rodata read-only, no execute + A::ENTRY_FLAG_NO_EXEC + } else { + // Remap everything else writable, no execute + A::ENTRY_FLAG_WRITABLE | A::ENTRY_FLAG_NO_EXEC + } +} + +unsafe fn inner(areas: &'static [MemoryArea], bump_offset: usize) -> BuddyAllocator { + // First, calculate how much memory we have + let mut size = 0; + for area in areas.iter() { + if area.size > 0 { + println!("{:X?}", area); + size += area.size; + } + } + + println!("Memory: {} MB", (size + (MEGABYTE - 1)) / MEGABYTE); + + // Create a basic allocator for the first pages + let mut bump_allocator = BumpAllocator::::new(areas, bump_offset); + + //TODO: memory protection + { + let mut mapper = PageMapper::::create( + &mut bump_allocator + ).expect("failed to create Mapper"); + + // Map all physical areas at PHYS_OFFSET + for area in areas.iter() { + for i in 0..area.size / A::PAGE_SIZE { + let phys = area.base.add(i * A::PAGE_SIZE); + let virt = A::phys_to_virt(phys); + let flags = page_flags::(virt); + let flush = mapper.map_phys( + virt, + phys, + flags + ).expect("failed to map frame"); + flush.ignore(); // Not the active table + } + } + + //TODO: remove backwards compatible recursive mapping + mapper.table().set_entry(511, rmm::PageEntry::new( + mapper.table().phys().data() | A::ENTRY_FLAG_WRITABLE | A::ENTRY_FLAG_PRESENT | A::ENTRY_FLAG_NO_EXEC + )); + + println!("Table: {:X}", mapper.table().phys().data()); + for i in 0..512 { + if let Some(entry) = mapper.table().entry(i) { + if entry.present() { + println!("{}: {:X}", i, entry.data()); + } + } + } + + // Use the new table + mapper.make_current(); + } + + // Create the physical memory map + let offset = bump_allocator.offset(); + println!("Permanently used: {} KB", (offset + (KILOBYTE - 1)) / KILOBYTE); + + BuddyAllocator::::new(bump_allocator).expect("failed to create BuddyAllocator") +} + +pub struct LockedAllocator { + inner: Mutex>>, +} + +impl LockedAllocator { + const fn new() -> Self { + Self { + inner: Mutex::new(None) + } + } +} + +impl FrameAllocator for LockedAllocator { + unsafe fn allocate(&mut self, count: FrameCount) -> Option { + if let Some(ref mut allocator) = *self.inner.lock() { + allocator.allocate(count) + } else { + None + } + } + + unsafe fn free(&mut self, address: PhysicalAddress, count: FrameCount) { + if let Some(ref mut allocator) = *self.inner.lock() { + allocator.free(address, count) + } + } + + unsafe fn usage(&self) -> FrameUsage { + if let Some(ref allocator) = *self.inner.lock() { + allocator.usage() + } else { + FrameUsage::new(FrameCount::new(0), FrameCount::new(0)) + } + } +} + +static mut AREAS: [MemoryArea; 512] = [MemoryArea { + base: PhysicalAddress::new(0), + size: 0, +}; 512]; + +pub static mut FRAME_ALLOCATOR: LockedAllocator = LockedAllocator::new(); + +pub unsafe fn mapper_new(table_addr: PhysicalAddress) -> PageMapper<'static, X8664Arch, LockedAllocator> { + PageMapper::new(table_addr, &mut FRAME_ALLOCATOR) +} + +//TODO: global paging lock? +pub unsafe fn mapper_create() -> Option> { + PageMapper::create(&mut FRAME_ALLOCATOR) +} + +pub unsafe fn mapper_current() -> PageMapper<'static, X8664Arch, LockedAllocator> { + PageMapper::current(&mut FRAME_ALLOCATOR) +} + +pub unsafe fn init(kernel_end: usize) { + type A = X8664Arch; + + println!("kernel_end: {:X}", kernel_end); + + // Copy memory map from bootloader location, and page align it + let mut area_i = 0; + let mut bump_offset = 0; + for i in 0..512 { + let old = *(0x500 as *const crate::memory::MemoryArea).add(i); + if old._type != 1 { + // Not a free area + continue; + } + + let mut base = old.base_addr as usize; + let mut size = old.length as usize; + + // Page align base + let base_offset = (A::PAGE_SIZE - (base & A::PAGE_OFFSET_MASK)) & A::PAGE_OFFSET_MASK; + if base_offset > size { + // Area is too small to page align base + continue; + } + base += base_offset; + size -= base_offset; + + // Page align size + size &= !A::PAGE_OFFSET_MASK; + if size == 0 { + // Area is zero sized + continue; + } + + if base + size < kernel_end { + // Area is below static kernel data + bump_offset += size; + } else if base < kernel_end { + // Area contains static kernel data + bump_offset += kernel_end - base; + } + + AREAS[area_i].base = PhysicalAddress::new(base); + AREAS[area_i].size = size; + area_i += 1; + } + + println!("bump_offset: {:X}", bump_offset); + + let allocator = inner::(&AREAS, bump_offset); + *FRAME_ALLOCATOR.inner.lock() = Some(allocator); +} diff --git a/src/arch/x86_64/start.rs b/src/arch/x86_64/start.rs index 505d089..664dfcf 100644 --- a/src/arch/x86_64/start.rs +++ b/src/arch/x86_64/start.rs @@ -18,7 +18,6 @@ use crate::gdt; use crate::idt; use crate::interrupt; use crate::log::{self, info}; -use crate::memory; use crate::paging; /// Test of zero values in BSS. @@ -101,23 +100,17 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! { info!("Env: {:X}:{:X}", env_base, env_base + env_size); info!("RSDPs: {:X}:{:X}", acpi_rsdps_base, acpi_rsdps_base + acpi_rsdps_size); - let ext_mem_ranges = if args.acpi_rsdps_base != 0 && args.acpi_rsdps_size > 0 { - Some([(acpi_rsdps_base as usize, acpi_rsdps_size as usize)]) - } else { - None - }; - // Set up GDT before paging gdt::init(); // Set up IDT before paging idt::init(); - // Initialize memory management - memory::init(0, kernel_base + ((kernel_size + 4095)/4096) * 4096); + // Initialize RMM + crate::arch::rmm::init(kernel_base + ((kernel_size + 4095)/4096) * 4096); // Initialize paging - let (mut active_table, tcb_offset) = paging::init(0, kernel_base, kernel_base + kernel_size, stack_base, stack_base + stack_size, ext_mem_ranges.as_ref().map(|arr| &arr[..]).unwrap_or(&[])); + let (mut active_table, tcb_offset) = paging::init(0); // Set up GDT after paging with TLS gdt::init_paging(tcb_offset, stack_base + stack_size); @@ -171,9 +164,6 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! { // Initialize all of the non-core devices not otherwise needed to complete initialization device::init_noncore(); - // Initialize memory functions after core has loaded - memory::init_noncore(); - // Stop graphical debug #[cfg(feature="graphical_debug")] graphical_debug::fini(&mut active_table); @@ -200,7 +190,7 @@ pub unsafe extern fn kstart_ap(args_ptr: *const KernelArgsAp) -> ! { let args = &*args_ptr; let cpu_id = args.cpu_id as usize; let bsp_table = args.page_table as usize; - let stack_start = args.stack_start as usize; + let _stack_start = args.stack_start as usize; let stack_end = args.stack_end as usize; assert_eq!(BSS_TEST_ZERO, 0); @@ -213,7 +203,7 @@ pub unsafe extern fn kstart_ap(args_ptr: *const KernelArgsAp) -> ! { idt::init(); // Initialize paging - let tcb_offset = paging::init_ap(cpu_id, bsp_table, stack_start, stack_end); + let tcb_offset = paging::init_ap(cpu_id, bsp_table); // Set up GDT with TLS gdt::init_paging(tcb_offset, stack_end); diff --git a/src/consts.rs b/src/consts.rs index 8f59b47..5409caf 100644 --- a/src/consts.rs +++ b/src/consts.rs @@ -12,11 +12,11 @@ pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK)/PML4_SIZE; /// Offset of kernel - pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE; + pub const KERNEL_OFFSET: usize = 0xFFFF_8000_0000_0000; //TODO: better calculation pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK)/PML4_SIZE; /// Offset to kernel heap - pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE; + pub const KERNEL_HEAP_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE; pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK)/PML4_SIZE; /// Size of kernel heap pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB diff --git a/src/memory/bump.rs b/src/memory/bump.rs deleted file mode 100644 index e824fa4..0000000 --- a/src/memory/bump.rs +++ /dev/null @@ -1,151 +0,0 @@ -//! # Bump frame allocator -//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html) - -use crate::paging::PhysicalAddress; -use super::{Frame, FrameAllocator, MemoryArea, MemoryAreaIter}; - -use syscall::{PartialAllocStrategy, PhysallocFlags}; - -pub struct BumpAllocator { - next_free_frame: Frame, - current_area: Option<&'static MemoryArea>, - areas: MemoryAreaIter, - kernel_start: Frame, - kernel_end: Frame -} - -impl BumpAllocator { - pub fn new(kernel_start: usize, kernel_end: usize, memory_areas: MemoryAreaIter) -> Self { - let mut allocator = Self { - next_free_frame: Frame::containing_address(PhysicalAddress::new(0)), - current_area: None, - areas: memory_areas, - kernel_start: Frame::containing_address(PhysicalAddress::new(kernel_start)), - kernel_end: Frame::containing_address(PhysicalAddress::new(kernel_end)) - }; - allocator.choose_next_area(); - allocator - } - - fn choose_next_area(&mut self) { - self.current_area = self.areas.clone().filter(|area| { - let address = area.base_addr + area.length - 1; - Frame::containing_address(PhysicalAddress::new(address as usize)) >= self.next_free_frame - }).min_by_key(|area| area.base_addr); - - if let Some(area) = self.current_area { - let start_frame = Frame::containing_address(PhysicalAddress::new(area.base_addr as usize)); - if self.next_free_frame < start_frame { - self.next_free_frame = start_frame; - } - } - } -} - -impl FrameAllocator for BumpAllocator { - #[allow(unused)] - fn set_noncore(&mut self, noncore: bool) {} - - fn free_frames(&self) -> usize { - let mut count = 0; - - for area in self.areas.clone() { - let start_frame = Frame::containing_address(PhysicalAddress::new(area.base_addr as usize)); - let end_frame = Frame::containing_address(PhysicalAddress::new((area.base_addr + area.length - 1) as usize)); - for frame in Frame::range_inclusive(start_frame, end_frame) { - if frame >= self.kernel_start && frame <= self.kernel_end { - // Inside of kernel range - } else if frame >= self.next_free_frame { - // Frame is in free range - count += 1; - } else { - // Inside of used range - } - } - } - - count - } - - fn used_frames(&self) -> usize { - let mut count = 0; - - for area in self.areas.clone() { - let start_frame = Frame::containing_address(PhysicalAddress::new(area.base_addr as usize)); - let end_frame = Frame::containing_address(PhysicalAddress::new((area.base_addr + area.length - 1) as usize)); - for frame in Frame::range_inclusive(start_frame, end_frame) { - if frame >= self.kernel_start && frame <= self.kernel_end { - // Inside of kernel range - count += 1 - } else if frame >= self.next_free_frame { - // Frame is in free range - } else { - count += 1; - } - } - } - - count - } - - fn allocate_frames3(&mut self, count: usize, flags: PhysallocFlags, strategy: Option, min: usize) -> Option<(Frame, usize)> { - // TODO: Comply with flags and allocation strategies better. - if count == 0 { - return None; - } else if let Some(area) = self.current_area { - let space32 = flags.contains(PhysallocFlags::SPACE_32); - let partial_alloc = flags.contains(PhysallocFlags::PARTIAL_ALLOC); - let mut actual_size = count; - - // "Clone" the frame to return it if it's free. Frame doesn't - // implement Clone, but we can construct an identical frame. - let start_frame = Frame { number: self.next_free_frame.number }; - let mut end_frame = Frame { number: self.next_free_frame.number + (count - 1) }; - let min_end_frame = if partial_alloc { Frame { number: self.next_free_frame.number + (min - 1) } } else { Frame { number: self.next_free_frame.number + (count - 1) } }; - - // the last frame of the current area - let current_area_last_frame = { - let address = area.base_addr + area.length - 1; - Frame::containing_address(PhysicalAddress::new(address as usize)) - }; - - if end_frame > current_area_last_frame && min_end_frame > current_area_last_frame { - // all frames of current area are used, switch to next area - self.choose_next_area(); - return self.allocate_frames3(count, flags, strategy, min) - } else if partial_alloc { - end_frame = Frame { number: self.next_free_frame.number + (min - 1) }; - actual_size = min; - } - - if space32 && end_frame.start_address().get() + super::PAGE_SIZE >= 0x1_0000_0000 { - // assuming that the bump allocator always advances, and that the memory map is sorted, - // when allocating in 32-bit space we can only return None when the free range was - // outside 0x0000_0000-0xFFFF_FFFF. - // - // we don't want to skip an entire memory region just because one 32-bit allocation failed. - return None; - } - - if (start_frame >= self.kernel_start && start_frame <= self.kernel_end) - || (end_frame >= self.kernel_start && end_frame <= self.kernel_end) { - // `frame` is used by the kernel - self.next_free_frame = Frame { - number: self.kernel_end.number + 1 - }; - // `frame` was not valid, try it again with the updated `next_free_frame` - return self.allocate_frames3(count, flags, strategy, min) - } - - // frame is unused, increment `next_free_frame` and return it - self.next_free_frame.number += actual_size; - return Some((start_frame, actual_size)); - } else { - None // no free memory areas left, and thus no frames left - } - } - - fn deallocate_frames(&mut self, _frame: Frame, _count: usize) { - //panic!("BumpAllocator::deallocate_frame: not supported: {:?}", frame); - } -} diff --git a/src/memory/mod.rs b/src/memory/mod.rs index 3dd8af2..a8fce8c 100644 --- a/src/memory/mod.rs +++ b/src/memory/mod.rs @@ -1,34 +1,15 @@ //! # Memory management //! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html) -use crate::log::info; +use crate::arch::rmm::FRAME_ALLOCATOR; pub use crate::paging::{PAGE_SIZE, PhysicalAddress}; -use self::bump::BumpAllocator; -use self::recycle::RecycleAllocator; - -use spin::Mutex; +use rmm::{ + FrameAllocator, + FrameCount, +}; use syscall::{PartialAllocStrategy, PhysallocFlags}; -pub mod bump; -pub mod recycle; - -/// The current memory map. It's size is maxed out to 512 entries, due to it being -/// from 0x500 to 0x5000 (800 is the absolute total) -static mut MEMORY_MAP: [MemoryArea; 512] = [MemoryArea { base_addr: 0, length: 0, _type: 0, acpi: 0 }; 512]; - -/// Memory does not exist -pub const MEMORY_AREA_NULL: u32 = 0; - -/// Memory is free to use -pub const MEMORY_AREA_FREE: u32 = 1; - -/// Memory is reserved -pub const MEMORY_AREA_RESERVED: u32 = 2; - -/// Memory is used by ACPI, and can be reclaimed -pub const MEMORY_AREA_ACPI: u32 = 3; - /// A memory map area #[derive(Copy, Clone, Debug, Default)] #[repr(packed)] @@ -39,101 +20,50 @@ pub struct MemoryArea { pub acpi: u32 } -#[derive(Clone)] -pub struct MemoryAreaIter { - _type: u32, - i: usize -} - -impl MemoryAreaIter { - fn new(_type: u32) -> Self { - MemoryAreaIter { - _type, - i: 0 - } - } -} - -impl Iterator for MemoryAreaIter { - type Item = &'static MemoryArea; - fn next(&mut self) -> Option { - while self.i < unsafe { MEMORY_MAP.len() } { - let entry = unsafe { &MEMORY_MAP[self.i] }; - self.i += 1; - if entry._type == self._type { - return Some(entry); - } - } - None - } -} - -static ALLOCATOR: Mutex>> = Mutex::new(None); - -/// Init memory module -/// Must be called once, and only once, -pub unsafe fn init(kernel_start: usize, kernel_end: usize) { - // Copy memory map from bootloader location - for (i, entry) in MEMORY_MAP.iter_mut().enumerate() { - *entry = *(0x500 as *const MemoryArea).add(i); - if entry._type != MEMORY_AREA_NULL { - info!("{:X?}", entry); - } - } - - *ALLOCATOR.lock() = Some(RecycleAllocator::new(BumpAllocator::new(kernel_start, kernel_end, MemoryAreaIter::new(MEMORY_AREA_FREE)))); -} - -/// Init memory module after core -/// Must be called once, and only once, -pub unsafe fn init_noncore() { - if let Some(ref mut allocator) = *ALLOCATOR.lock() { - allocator.set_noncore(true) - } else { - panic!("frame allocator not initialized"); - } -} - /// Get the number of frames available pub fn free_frames() -> usize { - if let Some(ref allocator) = *ALLOCATOR.lock() { - allocator.free_frames() - } else { - panic!("frame allocator not initialized"); + unsafe { + FRAME_ALLOCATOR.usage().free().data() } } /// Get the number of frames used pub fn used_frames() -> usize { - if let Some(ref allocator) = *ALLOCATOR.lock() { - allocator.used_frames() - } else { - panic!("frame allocator not initialized"); + unsafe { + FRAME_ALLOCATOR.usage().used().data() } } /// Allocate a range of frames pub fn allocate_frames(count: usize) -> Option { - if let Some(ref mut allocator) = *ALLOCATOR.lock() { - allocator.allocate_frames(count) - } else { - panic!("frame allocator not initialized"); + unsafe { + FRAME_ALLOCATOR.allocate(FrameCount::new(count)).map(|phys| { + Frame::containing_address(PhysicalAddress::new(phys.data())) + }) } } pub fn allocate_frames_complex(count: usize, flags: PhysallocFlags, strategy: Option, min: usize) -> Option<(Frame, usize)> { - if let Some(ref mut allocator) = *ALLOCATOR.lock() { - allocator.allocate_frames3(count, flags, strategy, min) - } else { - panic!("frame allocator not initialized"); + if count == min && flags == PhysallocFlags::SPACE_64 && strategy.is_none() { + return allocate_frames(count).map(|frame| (frame, count)); } + + println!( + "!!!! allocate_frames_complex not implemented for count {}, flags {:?}, strategy {:?}, min {}", + count, + flags, + strategy, + min + ); + return None; } /// Deallocate a range of frames frame pub fn deallocate_frames(frame: Frame, count: usize) { - if let Some(ref mut allocator) = *ALLOCATOR.lock() { - allocator.deallocate_frames(frame, count) - } else { - panic!("frame allocator not initialized"); + unsafe { + FRAME_ALLOCATOR.free( + rmm::PhysicalAddress::new(frame.start_address().get()), + FrameCount::new(count) + ); } } @@ -188,17 +118,3 @@ impl Iterator for FrameIter { } } } - -pub trait FrameAllocator { - fn set_noncore(&mut self, noncore: bool); - fn free_frames(&self) -> usize; - fn used_frames(&self) -> usize; - fn allocate_frames(&mut self, size: usize) -> Option { - self.allocate_frames2(size, PhysallocFlags::SPACE_64) - } - fn allocate_frames2(&mut self, size: usize, flags: PhysallocFlags) -> Option { - self.allocate_frames3(size, flags, None, size).map(|(s, _)| s) - } - fn allocate_frames3(&mut self, size: usize, flags: PhysallocFlags, strategy: Option, min: usize) -> Option<(Frame, usize)>; - fn deallocate_frames(&mut self, frame: Frame, size: usize); -} diff --git a/src/memory/recycle.rs b/src/memory/recycle.rs deleted file mode 100644 index aac6bb9..0000000 --- a/src/memory/recycle.rs +++ /dev/null @@ -1,153 +0,0 @@ -//! Recycle allocator -//! Uses freed frames if possible, then uses inner allocator - -use alloc::vec::Vec; - -use crate::paging::PhysicalAddress; -use super::{Frame, FrameAllocator}; - -use syscall::{PartialAllocStrategy, PhysallocFlags}; - -struct Range { - base: usize, - count: usize, -} - -pub struct RecycleAllocator { - inner: T, - noncore: bool, - free: Vec, -} - -impl RecycleAllocator { - pub fn new(inner: T) -> Self { - Self { - inner, - noncore: false, - free: Vec::new(), - } - } - - fn free_count(&self) -> usize { - self.free.len() - } - - fn merge(&mut self, address: usize, count: usize) -> bool { - for i in 0 .. self.free.len() { - let changed = { - let free = &mut self.free[i]; - if address + count * super::PAGE_SIZE == free.base { - free.base = address; - free.count += count; - true - } else if free.base + free.count * super::PAGE_SIZE == address { - free.count += count; - true - } else { - false - } - }; - - if changed { - //TODO: Use do not use recursion - let Range { base: address, count } = self.free[i]; - if self.merge(address, count) { - self.free.remove(i); - } - return true; - } - } - - false - } - fn try_recycle(&mut self, count: usize, flags: PhysallocFlags, strategy: Option, min: usize) -> Option<(usize, usize)> { - let space32 = flags.contains(PhysallocFlags::SPACE_32); - let partial_alloc = flags.contains(PhysallocFlags::PARTIAL_ALLOC); - - let mut actual_size = count; - let mut current_optimal_index = None; - let mut current_optimal = self.free.first()?; - - for (free_range_index, free_range) in self.free.iter().enumerate().skip(1) { - // Later entries can be removed faster - - if space32 && free_range.base + count * super::PAGE_SIZE >= 0x1_0000_0000 { - // We need a 32-bit physical address and this range is outside that address - // space. - continue; - } - - if free_range.count < count { - if partial_alloc && free_range.count >= min && matches!(strategy, Some(PartialAllocStrategy::Greedy)) { - // The free range does not fit the entire requested range, but is still - // at least as large as the minimum range. When using the "greedy" - // strategy, we return immediately. - current_optimal_index = Some(free_range_index); - actual_size = free_range.count; - break; - } - - // Range has to fit if we want the entire frame requested. - continue; - } - if free_range.count > current_optimal.count { - // Skip this free range if it wasn't smaller than the old one; we do want to use - // the smallest range possible to reduce fragmentation as much as possible. - continue; - } - - // We found a range that fit. - current_optimal_index = Some(free_range_index); - current_optimal = free_range; - } - current_optimal_index.map(|idx| (actual_size, idx)) - } -} - -impl FrameAllocator for RecycleAllocator { - fn set_noncore(&mut self, noncore: bool) { - self.noncore = noncore; - } - - fn free_frames(&self) -> usize { - self.inner.free_frames() + self.free_count() - } - - fn used_frames(&self) -> usize { - self.inner.used_frames() - self.free_count() - } - - fn allocate_frames3(&mut self, count: usize, flags: PhysallocFlags, strategy: Option, min: usize) -> Option<(Frame, usize)> { - // TODO: Cover all different strategies. - - if let Some((actual_size, free_range_idx_to_use)) = self.try_recycle(count, flags, strategy, min) { - let (address, remove) = { - let free_range = &mut self.free[free_range_idx_to_use]; - free_range.count -= actual_size; - (free_range.base + free_range.count * super::PAGE_SIZE, free_range.count == 0) - }; - - if remove { - self.free.remove(free_range_idx_to_use); - } - - //println!("Restoring frame {:?}, {}", frame, count); - Some((Frame::containing_address(PhysicalAddress::new(address)), actual_size)) - } else { - //println!("No saved frames {}", count); - self.inner.allocate_frames3(count, flags, strategy, min) - } - } - - fn deallocate_frames(&mut self, frame: Frame, count: usize) { - if self.noncore { - let address = frame.start_address().get(); - if ! self.merge(address, count) { - self.free.push(Range { base: address, count }); - } - } else { - //println!("Could not save frame {:?}, {}", frame, count); - self.inner.deallocate_frames(frame, count); - } - } -} diff --git a/src/syscall/driver.rs b/src/syscall/driver.rs index 88fc33f..ecf4acc 100644 --- a/src/syscall/driver.rs +++ b/src/syscall/driver.rs @@ -34,7 +34,6 @@ pub fn inner_physalloc(size: usize, flags: PhysallocFlags, strategy: Option Result { diff --git a/src/syscall/mod.rs b/src/syscall/mod.rs index e6a3474..af2bc0b 100644 --- a/src/syscall/mod.rs +++ b/src/syscall/mod.rs @@ -189,8 +189,8 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u let context = context_lock.read(); let name_raw = context.name.lock(); let name = unsafe { core::str::from_utf8_unchecked(&name_raw) }; - if name == "file:/bin/cargo" || name == "file:/bin/rustc" { - if a == SYS_CLOCK_GETTIME { + if name.contains("redoxfs") { + if a == SYS_CLOCK_GETTIME || a == SYS_YIELD { false } else if (a == SYS_WRITE || a == SYS_FSYNC) && (b == 1 || b == 2) { false diff --git a/src/syscall/process.rs b/src/syscall/process.rs index de02609..d514380 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -920,7 +920,17 @@ pub fn fexec_kernel(fd: FileHandle, args: Box<[Box<[u8]>]>, vars: Box<[Box<[u8]> let elf = match elf::Elf::from(&data) { Ok(elf) => elf, Err(err) => { - println!("fexec: failed to execute {}: {}", fd.into(), err); + let contexts = context::contexts(); + if let Some(context_lock) = contexts.current() { + let context = context_lock.read(); + println!( + "{}: {}: fexec failed to execute {}: {}", + context.id.into(), + unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }, + fd.into(), + err + ); + } return Err(Error::new(ENOEXEC)); } }; diff --git a/targets/x86_64-unknown-none.json b/targets/x86_64-unknown-none.json index b64c08b..92db2c1 100644 --- a/targets/x86_64-unknown-none.json +++ b/targets/x86_64-unknown-none.json @@ -3,7 +3,7 @@ "target-endian": "little", "target-pointer-width": "64", "target-c-int-width": "32", - "data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128", + "data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", "arch": "x86_64", "os": "none", "env": "",