Use rmm::PhysicalAddress and rmm::VirtualAddress directly

This commit is contained in:
Jeremy Soller
2021-01-09 21:16:11 -07:00
parent ccddabadf7
commit 334584b3d5
19 changed files with 98 additions and 124 deletions

View File

@@ -79,7 +79,7 @@ impl Madt {
CPU_COUNT.fetch_add(1, Ordering::SeqCst);
// Allocate a stack
let stack_start = allocate_frames(64).expect("no more frames in acpi stack_start").start_address().get() + crate::KERNEL_OFFSET;
let stack_start = allocate_frames(64).expect("no more frames in acpi stack_start").start_address().data() + crate::KERNEL_OFFSET;
let stack_end = stack_start + 64 * 4096;
let ap_ready = (TRAMPOLINE + 8) as *mut u64;

View File

@@ -44,7 +44,7 @@ pub fn get_sdt(sdt_address: usize, active_table: &mut ActivePageTable) -> &'stat
{
let page = Page::containing_address(VirtualAddress::new(sdt_address));
if active_table.translate_page(page).is_none() {
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().get()));
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().data()));
let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE);
result.flush(active_table);
}
@@ -58,7 +58,7 @@ pub fn get_sdt(sdt_address: usize, active_table: &mut ActivePageTable) -> &'stat
let end_page = Page::containing_address(VirtualAddress::new(sdt_address + sdt.length as usize));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_none() {
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().get()));
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().data()));
let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE);
result.flush(active_table);
}

View File

@@ -90,7 +90,7 @@ impl RSDP {
let start_frame = Frame::containing_address(PhysicalAddress::new(start_addr));
let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
let page = Page::containing_address(VirtualAddress::new(frame.start_address().data()));
let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::NO_EXECUTE);
result.flush(active_table);
}

View File

@@ -241,7 +241,7 @@ pub unsafe fn handle_ioapic(active_table: &mut ActivePageTable, madt_ioapic: &'s
let result = active_table.map_to(page, frame, EntryFlags::PRESENT | EntryFlags::GLOBAL | EntryFlags::WRITABLE | EntryFlags::NO_CACHE);
result.flush(active_table);
let ioapic_registers = page.start_address().get() as *const u32;
let ioapic_registers = page.start_address().data() as *const u32;
let ioapic = IoApic::new(ioapic_registers, madt_ioapic.gsi_base);
assert_eq!(ioapic.regs.lock().id(), madt_ioapic.id, "mismatched ACPI MADT I/O APIC ID, and the ID reported by the I/O APIC");

View File

@@ -45,7 +45,7 @@ pub fn init(active_table: &mut ActivePageTable) {
let start_page = Page::containing_address(VirtualAddress::new(onscreen));
let end_page = Page::containing_address(VirtualAddress::new(onscreen + size * 4));
for page in Page::range_inclusive(start_page, end_page) {
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().get() - crate::KERNEL_OFFSET));
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().data() - crate::KERNEL_OFFSET));
let flags = EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::HUGE_PAGE;
let result = active_table.map_to(page, frame, flags);
flush_all.consume(result);

View File

@@ -63,8 +63,8 @@ impl Entry {
}
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
debug_assert!(frame.start_address().get() & !ADDRESS_MASK == 0);
self.0 = (frame.start_address().get() as u64) | flags.bits() | (self.0 & COUNTER_MASK);
debug_assert!(frame.start_address().data() & !ADDRESS_MASK == 0);
self.0 = (frame.start_address().data() as u64) | flags.bits() | (self.0 & COUNTER_MASK);
}
/// Get bits 52-61 in entry, used as counter for page table

View File

@@ -105,9 +105,9 @@ impl Mapper {
assert!(p1[page.p1_index()].is_unused(),
"{:X}: Set to {:X}: {:?}, requesting {:X}: {:?}",
page.start_address().get(),
p1[page.p1_index()].address().get(), p1[page.p1_index()].flags(),
frame.start_address().get(), flags);
page.start_address().data(),
p1[page.p1_index()].address().data(), p1[page.p1_index()].flags(),
frame.start_address().data(), flags);
p1.increment_entry_count();
p1[page.p1_index()].set(frame, flags | EntryFlags::PRESENT);
MapperFlush::new(page)
@@ -131,7 +131,7 @@ impl Mapper {
/// Identity map a frame
pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags) -> MapperFlush {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
let page = Page::containing_address(VirtualAddress::new(frame.start_address().data()));
self.map_to(page, frame, flags)
}
@@ -145,7 +145,7 @@ impl Mapper {
frame = if let Some(frame) = p1[page.p1_index()].pointed_frame() {
frame
} else {
panic!("unmap_inner({:X}): frame not found", page.start_address().get())
panic!("unmap_inner({:X}): frame not found", page.start_address().data())
};
p1.decrement_entry_count();
@@ -155,7 +155,7 @@ impl Mapper {
return frame;
}
} else {
panic!("unmap_inner({:X}): p1 not found", page.start_address().get());
panic!("unmap_inner({:X}): p1 not found", page.start_address().data());
}
if let Some(p1_frame) = p2[page.p2_index()].pointed_frame() {
@@ -164,14 +164,14 @@ impl Mapper {
p2[page.p2_index()].set_unused();
deallocate_frames(p1_frame, 1);
} else {
panic!("unmap_inner({:X}): p1_frame not found", page.start_address().get());
panic!("unmap_inner({:X}): p1_frame not found", page.start_address().data());
}
if ! p2.is_unused() {
return frame;
}
} else {
panic!("unmap_inner({:X}): p2 not found", page.start_address().get());
panic!("unmap_inner({:X}): p2 not found", page.start_address().data());
}
if let Some(p2_frame) = p3[page.p3_index()].pointed_frame() {
@@ -180,14 +180,14 @@ impl Mapper {
p3[page.p3_index()].set_unused();
deallocate_frames(p2_frame, 1);
} else {
panic!("unmap_inner({:X}): p2_frame not found", page.start_address().get());
panic!("unmap_inner({:X}): p2_frame not found", page.start_address().data());
}
if ! p3.is_unused() {
return frame;
}
} else {
panic!("unmap_inner({:X}): p3 not found", page.start_address().get());
panic!("unmap_inner({:X}): p3 not found", page.start_address().data());
}
if let Some(p3_frame) = p4[page.p4_index()].pointed_frame() {
@@ -196,7 +196,7 @@ impl Mapper {
p4[page.p4_index()].set_unused();
deallocate_frames(p3_frame, 1);
} else {
panic!("unmap_inner({:X}): p3_frame not found", page.start_address().get());
panic!("unmap_inner({:X}): p3_frame not found", page.start_address().data());
}
frame
@@ -231,8 +231,8 @@ impl Mapper {
/// Translate a virtual address to a physical one
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address.get() % PAGE_SIZE;
let offset = virtual_address.data() % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| PhysicalAddress::new(frame.start_address().get() + offset))
.map(|frame| PhysicalAddress::new(frame.start_address().data() + offset))
}
}

View File

@@ -12,6 +12,8 @@ use self::entry::EntryFlags;
use self::mapper::{Mapper, MapperFlushAll};
use self::temporary_page::TemporaryPage;
pub use rmm::{PhysicalAddress, VirtualAddress};
pub mod entry;
pub mod mapper;
pub mod table;
@@ -262,14 +264,14 @@ impl ActivePageTable {
)),
};
unsafe {
controlregs::cr3_write(new_table.p4_frame.start_address().get() as u64);
controlregs::cr3_write(new_table.p4_frame.start_address().data() as u64);
}
old_table
}
pub fn flush(&mut self, page: Page) {
unsafe {
tlb::flush(page.start_address().get());
tlb::flush(page.start_address().data());
}
}
@@ -370,35 +372,7 @@ impl InactivePageTable {
}
pub unsafe fn address(&self) -> usize {
self.p4_frame.start_address().get()
}
}
/// A physical address.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct PhysicalAddress(usize);
impl PhysicalAddress {
pub fn new(address: usize) -> Self {
PhysicalAddress(address)
}
pub fn get(&self) -> usize {
self.0
}
}
/// A virtual address.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct VirtualAddress(usize);
impl VirtualAddress {
pub fn new(address: usize) -> Self {
VirtualAddress(address)
}
pub fn get(&self) -> usize {
self.0
self.p4_frame.start_address().data()
}
}
@@ -430,10 +404,10 @@ impl Page {
}
pub fn containing_address(address: VirtualAddress) -> Page {
//TODO assert!(address.get() < 0x0000_8000_0000_0000 || address.get() >= 0xffff_8000_0000_0000,
// "invalid address: 0x{:x}", address.get());
//TODO assert!(address.data() < 0x0000_8000_0000_0000 || address.data() >= 0xffff_8000_0000_0000,
// "invalid address: 0x{:x}", address.data());
Page {
number: address.get() / PAGE_SIZE,
number: address.data() / PAGE_SIZE,
}
}

View File

@@ -32,7 +32,7 @@ impl TemporaryPage {
/// Maps the temporary page to the given page table frame in the active
/// table. Returns a reference to the now mapped table.
pub fn map_table_frame(&mut self, frame: Frame, flags: EntryFlags, active_table: &mut ActivePageTable) -> &mut Table<Level1> {
unsafe { &mut *(self.map(frame, flags, active_table).get() as *mut Table<Level1>) }
unsafe { &mut *(self.map(frame, flags, active_table).data() as *mut Table<Level1>) }
}
/// Unmaps the temporary page in the active table.

View File

@@ -73,7 +73,7 @@ impl UserGrants {
// Get last used region
let last = self.inner.iter().next_back().map(Region::from).unwrap_or(Region::new(VirtualAddress::new(0), 0));
// At the earliest, start at grant offset
let address = cmp::max(last.end_address().get(), crate::USER_GRANT_OFFSET);
let address = cmp::max(last.end_address().data(), crate::USER_GRANT_OFFSET);
// Create new region
Region::new(VirtualAddress::new(address), size)
}
@@ -88,8 +88,8 @@ impl UserGrants {
let mut requested = Region::new(address, size);
if
requested.end_address().get() >= crate::PML4_SIZE * 256 // There are 256 PML4 entries reserved for userspace
&& address.get() % PAGE_SIZE != 0
requested.end_address().data() >= crate::PML4_SIZE * 256 // There are 256 PML4 entries reserved for userspace
&& address.data() % PAGE_SIZE != 0
{
// ... but it was invalid
return Err(Error::new(EINVAL));
@@ -99,7 +99,7 @@ impl UserGrants {
// ... but it already exists
if flags.contains(MapFlags::MAP_FIXED_NOREPLACE) {
println!("grant: conflicts with: {:#x} - {:#x}", grant.start_address().get(), grant.end_address().get());
println!("grant: conflicts with: {:#x} - {:#x}", grant.start_address().data(), grant.end_address().data());
return Err(Error::new(EEXIST));
} else if flags.contains(MapFlags::MAP_FIXED) {
// TODO: Overwrite existing grant
@@ -146,7 +146,7 @@ impl Region {
pub fn between(start: VirtualAddress, end: VirtualAddress) -> Self {
Self::new(
start,
end.get().saturating_sub(start.get()),
end.data().saturating_sub(start.data()),
)
}
@@ -169,12 +169,12 @@ impl Region {
/// Get the last address in the region (inclusive end)
pub fn final_address(&self) -> VirtualAddress {
VirtualAddress::new(self.start.get() + self.size - 1)
VirtualAddress::new(self.start.data() + self.size - 1)
}
/// Get the start address of the next region (exclusive end)
pub fn end_address(&self) -> VirtualAddress {
VirtualAddress::new(self.start.get() + self.size)
VirtualAddress::new(self.start.data() + self.size)
}
/// Return the exact size of the region
@@ -208,7 +208,7 @@ impl Region {
/// Returns true if the address is within the regions's requested range
pub fn collides(&self, other: Self) -> bool {
self.start_address() <= other.start_address() && other.end_address().get() - self.start_address().get() < self.size()
self.start_address() <= other.start_address() && other.end_address().data() - self.start_address().data() < self.size()
}
/// Returns true if the address is within the regions's actual range (so,
/// rounded up to the page size)
@@ -252,8 +252,8 @@ impl Region {
/// Re-base address that lives inside this region, onto a new base region
pub fn rebase(self, new_base: Self, address: VirtualAddress) -> VirtualAddress {
let offset = address.get() - self.start_address().get();
let new_start = new_base.start_address().get() + offset;
let offset = address.data() - self.start_address().data();
let new_start = new_base.start_address().data() + offset;
VirtualAddress::new(new_start)
}
}
@@ -278,7 +278,7 @@ impl Ord for Region {
impl Debug for Region {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:#x}..{:#x} ({:#x} long)", self.start_address().get(), self.end_address().get(), self.size())
write!(f, "{:#x}..{:#x} ({:#x} long)", self.start_address().data(), self.end_address().data(), self.size())
}
}
@@ -317,9 +317,9 @@ impl Grant {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.get() + size - 1));
let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().get() - to.get() + from.get()));
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().data() - to.data() + from.data()));
let result = active_table.map_to(page, frame, flags);
flush_all.consume(result);
}
@@ -344,7 +344,7 @@ impl Grant {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.get() + size - 1));
let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let result = active_table.map(page, flags);
flush_all.consume(result);
@@ -371,7 +371,7 @@ impl Grant {
let mut frames = VecDeque::with_capacity(size/PAGE_SIZE);
let start_page = Page::containing_address(from);
let end_page = Page::containing_address(VirtualAddress::new(from.get() + size - 1));
let end_page = Page::containing_address(VirtualAddress::new(from.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = active_table.translate_page(page).expect("grant references unmapped memory");
frames.push_back(frame);
@@ -379,7 +379,7 @@ impl Grant {
active_table.with(new_table, temporary_page, |mapper| {
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.get() + size - 1));
let end_page = Page::containing_address(VirtualAddress::new(to.data() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = frames.pop_front().expect("grant did not find enough frames");
let result = mapper.map_to(page, frame, flags);
@@ -411,13 +411,13 @@ impl Grant {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(self.region.start);
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.get() + self.region.size - 1));
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.data() + self.region.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
let frame = active_table.translate_page(page).expect("grant references unmapped memory");
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.region.start.get() + new_start.get()));
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
if self.owned {
let result = active_table.map(new_page, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
flush_all.consume(result);
@@ -431,7 +431,7 @@ impl Grant {
if self.owned {
unsafe {
intrinsics::copy(self.region.start.get() as *const u8, new_start.get() as *mut u8, self.region.size);
intrinsics::copy(self.region.start.data() as *const u8, new_start.data() as *mut u8, self.region.size);
}
let mut flush_all = MapperFlushAll::new();
@@ -440,7 +440,7 @@ impl Grant {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.region.start.get() + new_start.get()));
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
let result = active_table.remap(new_page, flags);
flush_all.consume(result);
}
@@ -468,7 +468,7 @@ impl Grant {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(self.region.start);
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.get() + self.region.size - 1));
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.data() + self.region.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
@@ -476,7 +476,7 @@ impl Grant {
flush_all.consume(result);
active_table.with(new_table, temporary_page, |mapper| {
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.region.start.get() + new_start.get()));
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.region.start.data() + new_start.data()));
let result = mapper.map_to(new_page, frame, flags);
// Ignore result due to mapping on inactive table
unsafe { result.ignore(); }
@@ -569,7 +569,7 @@ impl Grant {
/// Also panics if the given region isn't completely contained within the
/// grant. Use `grant.intersect` to find a sub-region that works.
pub fn extract(mut self, region: Region) -> Option<(Option<Grant>, Grant, Option<Grant>)> {
assert_eq!(region.start_address().get() % PAGE_SIZE, 0, "split_out must be called on page-size aligned start address");
assert_eq!(region.start_address().data() % PAGE_SIZE, 0, "split_out must be called on page-size aligned start address");
assert_eq!(region.size() % PAGE_SIZE, 0, "split_out must be called on page-size aligned end address");
let before_grant = self.before(region).map(|region| Grant {
@@ -698,7 +698,7 @@ impl Memory {
pub fn pages(&self) -> PageIter {
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size - 1));
Page::range_inclusive(start_page, end_page)
}
@@ -717,7 +717,7 @@ impl Memory {
if clear {
assert!(self.flags.contains(EntryFlags::WRITABLE));
unsafe {
intrinsics::write_bytes(self.start_address().get() as *mut u8, 0, self.size);
intrinsics::write_bytes(self.start_address().data() as *mut u8, 0, self.size);
}
}
}
@@ -747,7 +747,7 @@ impl Memory {
flush_all.consume(result);
active_table.with(new_table, temporary_page, |mapper| {
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.start.get() + new_start.get()));
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().data() - self.start.data() + new_start.data()));
let result = mapper.map_to(new_page, frame, self.flags);
// This is not the active table, so the flush can be ignored
unsafe { result.ignore(); }
@@ -781,8 +781,8 @@ impl Memory {
if new_size > self.size {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + new_size - 1));
let start_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size - 1));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_none() {
let result = active_table.map(page, self.flags);
@@ -794,14 +794,14 @@ impl Memory {
if clear {
unsafe {
intrinsics::write_bytes((self.start.get() + self.size) as *mut u8, 0, new_size - self.size);
intrinsics::write_bytes((self.start.data() + self.size) as *mut u8, 0, new_size - self.size);
}
}
} else if new_size < self.size {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(self.start.get() + new_size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
let start_page = Page::containing_address(VirtualAddress::new(self.start.data() + new_size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.data() + self.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_some() {
let result = active_table.unmap(page);
@@ -834,8 +834,8 @@ impl Tls {
/// Load TLS data from master
pub unsafe fn load(&mut self) {
intrinsics::copy(
self.master.get() as *const u8,
(self.mem.start_address().get() + self.offset) as *mut u8,
self.master.data() as *const u8,
(self.mem.start_address().data() + self.offset) as *mut u8,
self.file_size
);
}

View File

@@ -61,7 +61,7 @@ pub fn allocate_frames_complex(count: usize, flags: PhysallocFlags, strategy: Op
pub fn deallocate_frames(frame: Frame, count: usize) {
unsafe {
FRAME_ALLOCATOR.free(
rmm::PhysicalAddress::new(frame.start_address().get()),
rmm::PhysicalAddress::new(frame.start_address().data()),
FrameCount::new(count)
);
}
@@ -90,7 +90,7 @@ impl Frame {
/// Create a frame containing `address`
pub fn containing_address(address: PhysicalAddress) -> Frame {
Frame {
number: address.get() / PAGE_SIZE
number: address.data() / PAGE_SIZE
}
}

View File

@@ -470,7 +470,7 @@ where F: FnOnce(*mut u8) -> Result<()>
active_page_table.with(&mut target_page_table, &mut TemporaryPage::new(start), |mapper| {
let mut inner = || -> Result<()> {
let start = Page::containing_address(offset);
let end = Page::containing_address(VirtualAddress::new(offset.get() + len - 1));
let end = Page::containing_address(VirtualAddress::new(offset.data() + len - 1));
for page in Page::range_inclusive(start, end) {
frames.push((
mapper.translate_page(page).ok_or(Error::new(EFAULT))?,
@@ -496,7 +496,7 @@ where F: FnOnce(*mut u8) -> Result<()>
flusher.flush(&mut active_page_table);
let res = f((start.start_address().get() + offset.get() % PAGE_SIZE) as *mut u8);
let res = f((start.start_address().data() + offset.data() % PAGE_SIZE) as *mut u8);
// Unmap all the pages (but allow no deallocation!)
let mut page = start;

View File

@@ -52,7 +52,7 @@ impl Scheme for MemoryScheme {
for page in region.pages() {
if active_table.translate_page(page).is_some() {
println!("page at {:#x} was already mapped", page.start_address().get());
println!("page at {:#x} was already mapped", page.start_address().data());
return Err(Error::new(EEXIST))
}
}
@@ -60,7 +60,7 @@ impl Scheme for MemoryScheme {
grants.insert(Grant::map(region.start_address(), region.size(), entry_flags(map.flags)));
Ok(region.start_address().get())
Ok(region.start_address().data())
}
}
fn fmap_old(&self, id: usize, map: &OldMap) -> Result<usize> {

View File

@@ -324,7 +324,7 @@ impl Scheme for ProcScheme {
let handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let mut memory = handle.data.mem_data().ok_or(Error::new(EBADF))?;
let value = calc_seek_offset_usize(memory.offset.get(), pos, whence, isize::max_value() as usize)?;
let value = calc_seek_offset_usize(memory.offset.data(), pos, whence, isize::max_value() as usize)?;
memory.offset = VirtualAddress::new(value as usize);
Ok(value)
}
@@ -363,7 +363,7 @@ impl Scheme for ProcScheme {
Ok(())
})?;
data.offset = VirtualAddress::new(data.offset.get() + buf.len());
data.offset = VirtualAddress::new(data.offset.data() + buf.len());
Ok(buf.len())
},
Operation::Regs(kind) => {
@@ -475,7 +475,7 @@ impl Scheme for ProcScheme {
Ok(())
})?;
data.offset = VirtualAddress::new(data.offset.get() + buf.len());
data.offset = VirtualAddress::new(data.offset.data() + buf.len());
Ok(buf.len())
},
Operation::Regs(kind) => match kind {

View File

@@ -103,13 +103,13 @@ impl UserInner {
/// Map a readable structure to the scheme's userspace and return the
/// pointer
pub fn capture(&self, buf: &[u8]) -> Result<usize> {
UserInner::capture_inner(&self.context, 0, buf.as_ptr() as usize, buf.len(), PROT_READ, None).map(|addr| addr.get())
UserInner::capture_inner(&self.context, 0, buf.as_ptr() as usize, buf.len(), PROT_READ, None).map(|addr| addr.data())
}
/// Map a writeable structure to the scheme's userspace and return the
/// pointer
pub fn capture_mut(&self, buf: &mut [u8]) -> Result<usize> {
UserInner::capture_inner(&self.context, 0, buf.as_mut_ptr() as usize, buf.len(), PROT_WRITE, None).map(|addr| addr.get())
UserInner::capture_inner(&self.context, 0, buf.as_mut_ptr() as usize, buf.len(), PROT_WRITE, None).map(|addr| addr.data())
}
fn capture_inner(context_weak: &Weak<RwLock<Context>>, to_address: usize, address: usize, size: usize, flags: MapFlags, desc_opt: Option<FileDescriptor>)
@@ -144,7 +144,7 @@ impl UserInner {
&mut temporary_page
));
Ok(VirtualAddress::new(to_region.start_address().get() + offset))
Ok(VirtualAddress::new(to_region.start_address().data() + offset))
}
pub fn release(&self, address: usize) -> Result<()> {
@@ -222,7 +222,7 @@ impl UserInner {
if let Ok(grant_address) = res {
self.funmap.lock().insert(Region::new(grant_address, map.size), VirtualAddress::new(address));
}
packet.a = Error::mux(res.map(|addr| addr.get()));
packet.a = Error::mux(res.map(|addr| addr.data()));
} else {
let _ = desc.close();
}
@@ -454,7 +454,7 @@ impl Scheme for UserScheme {
}
funmap.remove(&grant);
let user = Region::new(user_base, grant.size());
Some(grant.rebase(user, grant_address).get())
Some(grant.rebase(user, grant_address).data())
} else {
None
}
@@ -492,7 +492,7 @@ impl Scheme for UserScheme {
funmap.insert(after, start);
}
Some(grant.rebase(user, grant_address).get())
Some(grant.rebase(user, grant_address).data())
} else {
None
}

View File

@@ -34,7 +34,7 @@ pub fn inner_physalloc(size: usize, flags: PhysallocFlags, strategy: Option<Part
if flags.contains(PhysallocFlags::SPACE_32 | PhysallocFlags::SPACE_64) {
return Err(Error::new(EINVAL));
}
allocate_frames_complex((size + 4095) / 4096, flags, strategy, (min + 4095) / 4096).ok_or(Error::new(ENOMEM)).map(|(frame, count)| (frame.start_address().get(), count * 4096))
allocate_frames_complex((size + 4095) / 4096, flags, strategy, (min + 4095) / 4096).ok_or(Error::new(ENOMEM)).map(|(frame, count)| (frame.start_address().data(), count * 4096))
}
pub fn physalloc(size: usize) -> Result<usize> {
enforce_root()?;
@@ -95,7 +95,7 @@ pub fn inner_physmap(physical_address: usize, size: usize, flags: PhysmapFlags)
// TODO: Make this faster than Sonic himself by using le superpowers of BTreeSet
for grant in grants.iter() {
let start = grant.start_address().get();
let start = grant.start_address().data();
if to_address + full_size < start {
break;
}
@@ -148,7 +148,7 @@ pub fn virttophys(virtual_address: usize) -> Result<usize> {
let active_table = unsafe { ActivePageTable::new() };
match active_table.translate(VirtualAddress::new(virtual_address)) {
Some(physical_address) => Ok(physical_address.get()),
Some(physical_address) => Ok(physical_address.data()),
None => Err(Error::new(EFAULT))
}
}

View File

@@ -541,7 +541,7 @@ pub fn funmap(virtual_address: usize, length: usize) -> Result<usize> {
let scheme = schemes.get(scheme_id).ok_or(Error::new(EBADF))?;
scheme.clone()
};
let res = scheme.funmap(intersection.start_address().get(), intersection.size());
let res = scheme.funmap(intersection.start_address().data(), intersection.size());
let _ = desc.close();

View File

@@ -127,15 +127,15 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
for memory_shared in context.image.iter() {
memory_shared.with(|memory| {
let mut new_memory = context::memory::Memory::new(
VirtualAddress::new(memory.start_address().get() + crate::USER_TMP_OFFSET),
VirtualAddress::new(memory.start_address().data() + crate::USER_TMP_OFFSET),
memory.size(),
EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
false
);
unsafe {
intrinsics::copy(memory.start_address().get() as *const u8,
new_memory.start_address().get() as *mut u8,
intrinsics::copy(memory.start_address().data() as *const u8,
new_memory.start_address().data() as *mut u8,
memory.size());
}
@@ -158,8 +158,8 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
);
unsafe {
intrinsics::copy(stack.start_address().get() as *const u8,
new_stack.start_address().get() as *mut u8,
intrinsics::copy(stack.start_address().data() as *const u8,
new_stack.start_address().data() as *mut u8,
stack.size());
}
@@ -178,8 +178,8 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
);
unsafe {
intrinsics::copy(sigstack.start_address().get() as *const u8,
new_sigstack.start_address().get() as *mut u8,
intrinsics::copy(sigstack.start_address().data() as *const u8,
new_sigstack.start_address().data() as *mut u8,
sigstack.size());
}
@@ -207,8 +207,8 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
}
} else {
unsafe {
intrinsics::copy(tls.mem.start_address().get() as *const u8,
new_tls.mem.start_address().get() as *mut u8,
intrinsics::copy(tls.mem.start_address().data() as *const u8,
new_tls.mem.start_address().data() as *mut u8,
tls.mem.size());
}
}
@@ -222,7 +222,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
} else {
let mut grants_set = UserGrants::default();
for grant in context.grants.lock().iter() {
let start = VirtualAddress::new(grant.start_address().get() + crate::USER_TMP_GRANT_OFFSET - crate::USER_GRANT_OFFSET);
let start = VirtualAddress::new(grant.start_address().data() + crate::USER_TMP_GRANT_OFFSET - crate::USER_GRANT_OFFSET);
grants_set.insert(grant.secret_clone(start));
}
grants = Arc::new(Mutex::new(grants_set));
@@ -430,7 +430,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
// Move copy of image
for memory_shared in image.iter_mut() {
memory_shared.with(|memory| {
let start = VirtualAddress::new(memory.start_address().get() - crate::USER_TMP_OFFSET + crate::USER_OFFSET);
let start = VirtualAddress::new(memory.start_address().data() - crate::USER_TMP_OFFSET + crate::USER_OFFSET);
memory.move_to(start, &mut new_table, &mut temporary_page);
});
}
@@ -442,7 +442,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
let old_grants = mem::replace(&mut *grants, UserGrants::default());
for mut grant in old_grants.inner.into_iter() {
let start = VirtualAddress::new(grant.start_address().get() + crate::USER_GRANT_OFFSET - crate::USER_TMP_GRANT_OFFSET);
let start = VirtualAddress::new(grant.start_address().data() + crate::USER_GRANT_OFFSET - crate::USER_TMP_GRANT_OFFSET);
grant.move_to(start, &mut new_table, &mut temporary_page);
grants.insert(grant);
}
@@ -497,7 +497,7 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
//println!("{}: Copy TLS: address 0x{:x}, size 0x{:x}", context.id.into(), tls_addr, tls.mem.size());
tls.mem.move_to(VirtualAddress::new(tls_addr), &mut new_table, &mut temporary_page);
unsafe {
*(tcb_addr as *mut usize) = tls.mem.start_address().get() + tls.mem.size();
*(tcb_addr as *mut usize) = tls.mem.start_address().data() + tls.mem.size();
}
context.tls = Some(tls);
} else {
@@ -692,7 +692,7 @@ fn fexec_noreturn(
};
unsafe {
*(tcb_addr as *mut usize) = tls.mem.start_address().get() + tls.mem.size();
*(tcb_addr as *mut usize) = tls.mem.start_address().data() + tls.mem.size();
}
tls_opt = Some(tls);

View File

@@ -15,11 +15,11 @@ fn validate(address: usize, size: usize, flags: EntryFlags) -> Result<()> {
for page in Page::range_inclusive(start_page, end_page) {
if let Some(page_flags) = active_table.translate_page_flags(page) {
if ! page_flags.contains(flags) {
//println!("{:X}: Not {:?}", page.start_address().get(), flags);
//println!("{:X}: Not {:?}", page.start_address().data(), flags);
return Err(Error::new(EFAULT));
}
} else {
//println!("{:X}: Not found", page.start_address().get());
//println!("{:X}: Not found", page.start_address().data());
return Err(Error::new(EFAULT));
}
}