Migrate misc x86_64 parts to RMM.

This commit is contained in:
4lDO2
2022-07-17 14:10:51 +02:00
parent 2bb019bc44
commit 302e55098c
6 changed files with 55 additions and 61 deletions

View File

@@ -8,7 +8,7 @@ use crate::acpi::madt::{self, Madt, MadtEntry, MadtIoApic, MadtIntSrcOverride};
use crate::arch::interrupt::irq;
use crate::memory::Frame;
use crate::paging::{ActivePageTable, Page, PageFlags, PhysicalAddress, VirtualAddress};
use crate::paging::{KernelMapper, Page, PageFlags, PhysicalAddress, RmmA, RmmArch};
use crate::paging::entry::EntryFlags;
use super::pic;
@@ -229,16 +229,20 @@ pub fn src_overrides() -> &'static [Override] {
}
#[cfg(feature = "acpi")]
pub unsafe fn handle_ioapic(active_table: &mut ActivePageTable, madt_ioapic: &'static MadtIoApic) {
pub unsafe fn handle_ioapic(mapper: &mut KernelMapper, madt_ioapic: &'static MadtIoApic) {
// map the I/O APIC registers
let frame = Frame::containing_address(PhysicalAddress::new(madt_ioapic.address as usize));
let page = Page::containing_address(VirtualAddress::new(madt_ioapic.address as usize + crate::PHYS_OFFSET));
let page = Page::containing_address(RmmA::phys_to_virt(frame.start_address()));
assert_eq!(active_table.translate_page(page), None);
assert!(mapper.translate(page.start_address()).is_none());
let result = active_table.map_to(page, frame, PageFlags::new().write(true).custom_flag(EntryFlags::NO_CACHE.bits(), true));
result.flush();
mapper
.get_mut()
.expect("expected KernelMapper not to be locked re-entrant while mapping I/O APIC memory")
.map_phys(page.start_address(), frame.start_address(), PageFlags::new().write(true).custom_flag(EntryFlags::NO_CACHE.bits(), true))
.expect("failed to map I/O APIC")
.flush();
let ioapic_registers = page.start_address().data() as *const u32;
let ioapic = IoApic::new(ioapic_registers, madt_ioapic.gsi_base);
@@ -280,7 +284,7 @@ pub unsafe fn handle_src_override(src_override: &'static MadtIntSrcOverride) {
SRC_OVERRIDES.get_or_insert_with(Vec::new).push(over);
}
pub unsafe fn init(active_table: &mut ActivePageTable) {
pub unsafe fn init(active_table: &mut KernelMapper) {
let bsp_apic_id = x86::cpuid::CpuId::new().get_feature_info().unwrap().initial_local_apic_id(); // TODO
// search the madt for all IOAPICs.

View File

@@ -3,15 +3,14 @@ use core::intrinsics::{volatile_load, volatile_store};
use x86::cpuid::CpuId;
use x86::msr::*;
use crate::memory::Frame;
use crate::paging::{ActivePageTable, PhysicalAddress, Page, PageFlags, VirtualAddress};
use crate::paging::{KernelMapper, PhysicalAddress, PageFlags, RmmA, RmmArch};
pub static mut LOCAL_APIC: LocalApic = LocalApic {
address: 0,
x2: false
};
pub unsafe fn init(active_table: &mut ActivePageTable) {
pub unsafe fn init(active_table: &mut KernelMapper) {
LOCAL_APIC.init(active_table);
}
@@ -41,21 +40,25 @@ pub fn bsp_apic_id() -> Option<u32> {
}
impl LocalApic {
unsafe fn init(&mut self, active_table: &mut ActivePageTable) {
self.address = (rdmsr(IA32_APIC_BASE) as usize & 0xFFFF_0000) + crate::PHYS_OFFSET;
unsafe fn init(&mut self, mapper: &mut KernelMapper) {
let mapper = mapper.get_mut().expect("expected KernelMapper not to be locked re-entrant while initializing LAPIC");
let physaddr = PhysicalAddress::new(rdmsr(IA32_APIC_BASE) as usize & 0xFFFF_0000);
let virtaddr = RmmA::phys_to_virt(physaddr);
self.address = virtaddr.data();
self.x2 = CpuId::new().get_feature_info().unwrap().has_x2apic();
if ! self.x2 {
let page = Page::containing_address(VirtualAddress::new(self.address));
let frame = Frame::containing_address(PhysicalAddress::new(self.address - crate::PHYS_OFFSET));
log::info!("Detected xAPIC at {:#x}", frame.start_address().data());
if active_table.translate_page(page).is_some() {
log::info!("Detected xAPIC at {:#x}", physaddr.data());
if let Some((_entry, _, flush)) = mapper.unmap_phys(virtaddr) {
// Unmap xAPIC page if already mapped
let (result, _frame) = active_table.unmap_return(page, true);
result.flush();
flush.flush();
}
let result = active_table.map_to(page, frame, PageFlags::new().write(true));
result.flush();
mapper
.map_phys(virtaddr, physaddr, PageFlags::new().write(true))
.expect("failed to map local APIC memory")
.flush();
} else {
log::info!("Detected x2APIC");
}

View File

@@ -1,5 +1,3 @@
use crate::paging::ActivePageTable;
pub mod cpu;
pub mod ioapic;
pub mod local_apic;
@@ -12,13 +10,15 @@ pub mod hpet;
#[cfg(feature = "system76_ec_debug")]
pub mod system76_ec;
pub unsafe fn init(active_table: &mut ActivePageTable) {
use crate::paging::KernelMapper;
pub unsafe fn init() {
pic::init();
local_apic::init(active_table);
local_apic::init(&mut KernelMapper::lock());
}
pub unsafe fn init_after_acpi(_active_table: &mut ActivePageTable) {
pub unsafe fn init_after_acpi() {
// this will disable the IOAPIC if needed.
//ioapic::init(active_table);
//ioapic::init(mapper);
}
#[cfg(feature = "acpi")]

View File

@@ -10,7 +10,6 @@ use x86::dtables::{self, DescriptorTablePointer};
use crate::interrupt::*;
use crate::ipi::IpiKind;
use crate::paging::PageFlags;
use spin::RwLock;
@@ -172,32 +171,11 @@ pub unsafe fn init_generic(is_bsp: bool, idt: &mut Idt) {
let frames = crate::memory::allocate_frames(page_count)
.expect("failed to allocate pages for backup interrupt stack");
// Map them linearly, i.e. PHYS_OFFSET + physaddr.
let base_address = {
use crate::memory::{Frame, PhysicalAddress};
use crate::paging::{ActivePageTable, Page, VirtualAddress};
use crate::paging::{RmmA, RmmArch};
let base_virtual_address = VirtualAddress::new(frames.start_address().data() + crate::PHYS_OFFSET);
let mut active_table = ActivePageTable::new(base_virtual_address.kind());
// Physical pages are mapped linearly. So is the linearly mapped virtual memory.
let base_address = RmmA::phys_to_virt(frames.start_address());
for i in 0..page_count {
let virtual_address = VirtualAddress::new(base_virtual_address.data() + i * crate::memory::PAGE_SIZE);
let physical_address = PhysicalAddress::new(frames.start_address().data() + i * crate::memory::PAGE_SIZE);
let page = Page::containing_address(virtual_address);
let flags = PageFlags::new().write(true);
let flusher = if let Some(already_mapped) = active_table.translate_page(page) {
assert_eq!(already_mapped.start_address(), physical_address, "address already mapped, but non-linearly");
active_table.remap(page, flags)
} else {
active_table.map_to(page, Frame::containing_address(physical_address), flags)
};
flusher.flush();
}
base_virtual_address
};
// Stack always grows downwards.
let address = base_address.data() + BACKUP_STACK_SIZE;

View File

@@ -1,8 +1,9 @@
use core::{mem, str};
use goblin::elf::sym;
use rustc_demangle::demangle;
use crate::paging::{ActivePageTable, TableKind, VirtualAddress};
use crate::{context, paging::{KernelMapper, VirtualAddress}};
/// Get a stack trace
//TODO: Check for stack being mapped before dereferencing
@@ -13,12 +14,14 @@ pub unsafe fn stack_trace() {
println!("TRACE: {:>016X}", rbp);
//Maximum 64 frames
let active_table = ActivePageTable::new(TableKind::User);
let mapper = KernelMapper::lock();
for _frame in 0..64 {
if let Some(rip_rbp) = rbp.checked_add(mem::size_of::<usize>()) {
let rbp_virt = VirtualAddress::new(rbp);
let rip_rbp_virt = VirtualAddress::new(rip_rbp);
if rbp_virt.is_canonical() && rip_rbp_virt.is_canonical() && active_table.translate(rbp_virt).is_some() && active_table.translate(rip_rbp_virt).is_some() {
if rbp_virt.is_canonical() && rip_rbp_virt.is_canonical() && mapper.translate(rbp_virt).is_some() && mapper.translate(rip_rbp_virt).is_some() {
let rip = *(rip_rbp as *const usize);
if rip == 0 {
println!(" {:>016X}: EMPTY RETURN", rbp);

View File

@@ -18,7 +18,7 @@ use crate::gdt;
use crate::idt;
use crate::interrupt;
use crate::log::{self, info};
use crate::paging;
use crate::paging::{self, KernelMapper};
/// Test of zero values in BSS.
static BSS_TEST_ZERO: usize = 0;
@@ -131,7 +131,7 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
);
// Initialize paging
let (mut active_table, tcb_offset) = paging::init(0);
let tcb_offset = paging::init(0);
// Set up GDT after paging with TLS
gdt::init_paging(0, tcb_offset, args.stack_base + args.stack_size);
@@ -158,7 +158,7 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
BSP_READY.store(false, Ordering::SeqCst);
// Setup kernel heap
allocator::init(&mut active_table);
allocator::init();
// Set up double buffer for grpahical debug now that heap is available
#[cfg(feature = "graphical_debug")]
@@ -170,17 +170,17 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
log::init();
// Initialize devices
device::init(&mut active_table);
device::init();
// Read ACPI tables, starts APs
#[cfg(feature = "acpi")]
{
acpi::init(&mut active_table, if args.acpi_rsdps_base != 0 && args.acpi_rsdps_size > 0 {
acpi::init(if args.acpi_rsdps_base != 0 && args.acpi_rsdps_size > 0 {
Some(((args.acpi_rsdps_base + crate::PHYS_OFFSET) as u64, args.acpi_rsdps_size as u64))
} else {
None
});
device::init_after_acpi(&mut active_table);
device::init_after_acpi();
}
// Initialize all of the non-core devices not otherwise needed to complete initialization
@@ -230,7 +230,13 @@ pub unsafe extern fn kstart_ap(args_ptr: *const KernelArgsAp) -> ! {
idt::init();
// Initialize paging
let tcb_offset = paging::init_ap(cpu_id, bsp_table);
let tcb_offset = {
use crate::paging::{PageMapper, PhysicalAddress};
use crate::rmm::FRAME_ALLOCATOR;
let mut mapper = KernelMapper::lock_for_manual_mapper(cpu_id, PageMapper::new(PhysicalAddress::new(bsp_table), FRAME_ALLOCATOR));
paging::init_ap(cpu_id, &mut mapper)
};
// Set up GDT with TLS
gdt::init_paging(cpu_id as u32, tcb_offset, stack_end);