diff --git a/src/call.rs b/src/call.rs index 76e804f..79fa089 100644 --- a/src/call.rs +++ b/src/call.rs @@ -251,7 +251,7 @@ pub fn open>(path: T, flags: usize) -> Result { unsafe { syscall3(SYS_OPEN, path.as_ref().as_ptr() as usize, path.as_ref().len(), flags) } } -/// Allocate pages, linearly in physical memory +/// Allocate frames, linearly in physical memory. /// /// # Errors /// @@ -261,6 +261,37 @@ pub unsafe fn physalloc(size: usize) -> Result { syscall1(SYS_PHYSALLOC, size) } +/// Allocate frames, linearly in physical memory, with an extra set of flags. If the flags contain +/// [`PARTIAL_ALLOC`], this will result in `physalloc3` with `min = 1`. +/// +/// Refer to the simpler [`physalloc`] and the more complex [`physalloc3`], that this convenience +/// function is based on. +/// +/// # Errors +/// +/// * `EPERM` - `uid != 0` +/// * `ENOMEM` - the system has run out of available memory +pub unsafe fn physalloc2(size: usize, flags: usize) -> Result { + let mut ret = 1usize; + physalloc3(size, flags, &mut ret) +} + +/// Allocate frames, linearly in physical memory, with an extra set of flags. If the flags contain +/// [`PARTIAL_ALLOC`], the `min` parameter specifies the number of frames that have to be allocated +/// for this operation to succeed. The return value is the offset of the first frame, and `min` is +/// overwritten with the number of frames actually allocated. +/// +/// Refer to the simpler [`physalloc`] and the simpler library function [`physalloc2`]. +/// +/// # Errors +/// +/// * `EPERM` - `uid != 0` +/// * `ENOMEM` - the system has run out of available memory +/// * `EINVAL` - `min = 0` +pub unsafe fn physalloc3(size: usize, flags: usize, min: &mut usize) -> Result { + syscall3(SYS_PHYSALLOC3, size, flags, min as *mut usize as usize) +} + /// Free physically allocated pages /// /// # Errors diff --git a/src/flag.rs b/src/flag.rs index d413e0e..56868b4 100644 --- a/src/flag.rs +++ b/src/flag.rs @@ -114,6 +114,70 @@ bitflags! { const PHYSMAP_NO_CACHE = 0x0000_0004; } } +bitflags! { + /// Extra flags for [`physalloc2`] or [`physalloc3`]. + /// + /// [`physalloc2`]: ../call/fn.physalloc2.html + /// [`physalloc3`]: ../call/fn.physalloc3.html + pub struct PhysallocFlags: usize { + /// Only allocate memory within the 32-bit physical memory space. This is necessary for + /// some devices may not support 64-bit memory. + const SPACE_32 = 0x0000_0001; + + /// The frame that will be allocated, is going to reside anywhere in 64-bit space. This + /// flag is redundant for the most part, except when overriding some other default. + const SPACE_64 = 0x0000_0002; + + /// Do a "partial allocation", which means that not all of the frames specified in the + /// frame count `size` actually have to be allocated. This means that if the allocator was + /// unable to find a physical memory range large enough, it can instead return whatever + /// range it decides is optimal. Thus, instead of letting one driver get an expensive + /// 128MiB physical memory range when the physical memory has become fragmented, and + /// failing, it can instead be given a more optimal range. If the device supports + /// scatter-gather lists, then the driver only has to allocate more ranges, and the device + /// will do vectored I/O. + /// + /// PARTIAL_ALLOC supports different allocation strategies, refer to + /// [`Optimal`], [`GreatestRange`]. + /// + /// [`Optimal`]: ./enum.PartialAllocStrategy.html + /// [`GreatestRange`]: ./enum.PartialAllocStrategy.html + const PARTIAL_ALLOC = 0x0000_0004; + } +} + +/// The bitmask of the partial allocation strategy. Currently four different strategies are +/// supported. If [`PARTIAL_ALLOC`] is not set, this bitmask is no longer reserved. +pub const PARTIAL_ALLOC_STRATEGY_MASK: usize = 0x0003_0000; + +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +#[repr(usize)] +pub enum PartialAllocStrategy { + /// The allocator decides itself the size of the memory range, based on e.g. free memory ranges + /// and other processes which require large physical memory chunks. + Optimal = 0x0001_0000, + + /// The allocator returns the absolute greatest range it can find. + GreatestRange = 0x0002_0000, + + /// The allocator returns the first range that fits the minimum count, without searching extra. + Greedy = 0x0003_0000, +} +impl Default for PartialAllocStrategy { + fn default() -> Self { + Self::Optimal + } +} + +impl PartialAllocStrategy { + pub fn from_raw(raw: usize) -> Option { + match raw { + 0x0001_0000 => Some(Self::Optimal), + 0x0002_0000 => Some(Self::GreatestRange), + _ => None, + } + } +} // The top 48 bits of PTRACE_* are reserved, for now diff --git a/src/io/dma.rs b/src/io/dma.rs index d3ffbb9..b356c8a 100644 --- a/src/io/dma.rs +++ b/src/io/dma.rs @@ -1,19 +1,76 @@ -use core::{mem, ptr}; +use core::mem::{self, MaybeUninit}; use core::ops::{Deref, DerefMut}; +use core::{ptr, slice}; use crate::Result; +use crate::{PartialAllocStrategy, PhysallocFlags}; -struct PhysBox { +/// An RAII guard of a physical memory allocation. Currently all physically allocated memory are +/// page-aligned and take up at least 4k of space (on x86_64). +#[derive(Debug)] +pub struct PhysBox { address: usize, size: usize } impl PhysBox { - fn new(size: usize) -> Result { + /// Construct a PhysBox from an address and a size. + /// + /// # Safety + /// This function is unsafe because when dropping, Self has to a valid allocation. + pub unsafe fn from_raw_parts(address: usize, size: usize) -> Self { + Self { + address, + size, + } + } + + /// Retrieve the byte address in physical memory, of this allocation. + pub fn address(&self) -> usize { + self.address + } + + /// Retrieve the size in bytes of the alloc. + pub fn size(&self) -> usize { + self.size + } + + /// Allocate physical memory that must reside in 32-bit space. + pub fn new_in_32bit_space(size: usize) -> Result { + Self::new_with_flags(size, PhysallocFlags::SPACE_32) + } + + pub fn new_with_flags(size: usize, flags: PhysallocFlags) -> Result { + assert!(!flags.contains(PhysallocFlags::PARTIAL_ALLOC)); + + let address = unsafe { crate::physalloc2(size, flags.bits())? }; + Ok(Self { + address, + size, + }) + } + + /// "Partially" allocate physical memory, in the sense that the allocation may be smaller than + /// expected, but still with a minimum limit. This is particularly useful when the physical + /// memory space is fragmented, and a device supports scatter-gather I/O. In that case, the + /// driver can optimistically request e.g. 1 alloc of 1 MiB, with the minimum of 512 KiB. If + /// that first allocation only returns half the size, the driver can do another allocation + /// and then let the device use both buffers. + pub fn new_partial_allocation(size: usize, flags: PhysallocFlags, strategy: Option, mut min: usize) -> Result { + debug_assert!(!(flags.contains(PhysallocFlags::PARTIAL_ALLOC) && strategy.is_none())); + + let address = unsafe { crate::physalloc3(size, flags.bits() | strategy.map(|s| s as usize).unwrap_or(0), &mut min)? }; + Ok(Self { + address, + size: min, + }) + } + + pub fn new(size: usize) -> Result { let address = unsafe { crate::physalloc(size)? }; - Ok(PhysBox { - address: address, - size: size + Ok(Self { + address, + size, }) } } @@ -26,53 +83,100 @@ impl Drop for PhysBox { pub struct Dma { phys: PhysBox, - virt: *mut T + virt: *mut T, } impl Dma { - pub fn new(value: T) -> Result> { - let phys = PhysBox::new(mem::size_of::())?; - let virt = unsafe { crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? } as *mut T; - unsafe { ptr::write(virt, value); } - Ok(Dma { - phys: phys, - virt: virt - }) - } - - pub fn zeroed() -> Result> { - let phys = PhysBox::new(mem::size_of::())?; - let virt = unsafe { crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? } as *mut T; - unsafe { ptr::write_bytes(virt as *mut u8, 0, phys.size); } - Ok(Dma { - phys: phys, - virt: virt - }) - } -} -impl Dma { - pub fn physical(&self) -> usize { - self.phys.address - } -} - -impl Dma<[T]> { - /// Crates a new DMA buffer with a size only known at runtime. - /// ## Safety - /// * `T` must be properly aligned. - /// * `T` must be valid as zeroed (i.e. no NonNull pointers). - pub unsafe fn zeroed_unsized(count: usize) -> Result { - let phys = PhysBox::new(mem::size_of::() * count)?; - let virt_ptr = crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? as *mut T; - ptr::write_bytes(virt_ptr, 0, count); - - let virt = core::slice::from_raw_parts_mut(virt_ptr, count); + pub fn from_physbox_uninit(phys: PhysBox) -> Result>> { + let virt = unsafe { crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? } as *mut MaybeUninit; Ok(Dma { phys, virt, }) } + pub fn from_physbox_zeroed(phys: PhysBox) -> Result>> { + let this = Self::from_physbox_uninit(phys)?; + unsafe { ptr::write_bytes(this.virt as *mut MaybeUninit, 0, this.phys.size) } + Ok(this) + } + + pub fn from_physbox(phys: PhysBox, value: T) -> Result { + let this = Self::from_physbox_uninit(phys)?; + + Ok(unsafe { + ptr::write(this.virt, MaybeUninit::new(value)); + this.assume_init() + }) + } + + pub fn new(value: T) -> Result { + let phys = PhysBox::new(mem::size_of::())?; + Self::from_physbox(phys, value) + } + pub fn zeroed() -> Result>> { + let phys = PhysBox::new(mem::size_of::())?; + Self::from_physbox_zeroed(phys) + } +} + +impl Dma> { + pub unsafe fn assume_init(self) -> Dma { + let &Dma { phys: PhysBox { address, size }, virt } = &self; + mem::forget(self); + + Dma { + phys: PhysBox { address, size }, + virt: virt as *mut T, + } + } +} +impl Dma { + pub fn physical(&self) -> usize { + self.phys.address() + } + pub fn size(&self) -> usize { + self.phys.size() + } + pub fn phys(&self) -> &PhysBox { + &self.phys + } +} + +impl Dma<[T]> { + pub fn from_physbox_uninit_unsized(phys: PhysBox, len: usize) -> Result]>> { + let max_len = phys.size() / mem::size_of::(); + assert!(len <= max_len); + + Ok(Dma { + virt: unsafe { slice::from_raw_parts_mut(crate::physmap(phys.address, phys.size, crate::PHYSMAP_WRITE)? as *mut MaybeUninit, len) } as *mut [MaybeUninit], + phys, + }) + } + pub fn from_physbox_zeroed_unsized(phys: PhysBox, len: usize) -> Result]>> { + let this = Self::from_physbox_uninit_unsized(phys, len)?; + unsafe { ptr::write_bytes(this.virt as *mut MaybeUninit, 0, this.phys.size()) } + Ok(this) + } + /// Creates a new DMA buffer with a size only known at runtime. + /// ## Safety + /// * `T` must be properly aligned. + /// * `T` must be valid as zeroed (i.e. no NonNull pointers). + pub unsafe fn zeroed_unsized(count: usize) -> Result { + let phys = PhysBox::new(mem::size_of::() * count)?; + Ok(Self::from_physbox_zeroed_unsized(phys, count)?.assume_init()) + } +} +impl Dma<[MaybeUninit]> { + pub unsafe fn assume_init(self) -> Dma<[T]> { + let &Dma { phys: PhysBox { address, size }, virt } = &self; + mem::forget(self); + + Dma { + phys: PhysBox { address, size }, + virt: virt as *mut [T], + } + } } impl Deref for Dma { diff --git a/src/number.rs b/src/number.rs index ec9acfa..4e85c92 100644 --- a/src/number.rs +++ b/src/number.rs @@ -59,6 +59,7 @@ pub const SYS_MPROTECT: usize = 125; pub const SYS_MKNS: usize = 984; pub const SYS_NANOSLEEP: usize =162; pub const SYS_PHYSALLOC: usize =945; +pub const SYS_PHYSALLOC3: usize=9453; pub const SYS_PHYSFREE: usize = 946; pub const SYS_PHYSMAP: usize = 947; pub const SYS_PHYSUNMAP: usize =948;