From 003bd6a0d2516c5195cab23c81f05fbc7fa7858b Mon Sep 17 00:00:00 2001
From: 4lDO2 <4ldo2@protonmail.com>
Date: Tue, 16 Jun 2020 21:20:00 +0000
Subject: [PATCH] More complex physalloc
---
src/lib.rs | 1 +
src/memory/bump.rs | 49 ++++++++++++++------
src/memory/mod.rs | 16 ++++++-
src/memory/recycle.rs | 103 +++++++++++++++++++++++++++---------------
src/syscall/debug.rs | 4 ++
src/syscall/driver.rs | 26 +++++++++--
src/syscall/mod.rs | 1 +
syscall | 2 +-
8 files changed, 145 insertions(+), 57 deletions(-)
diff --git a/src/lib.rs b/src/lib.rs
index 4f18ee3..8627940 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -48,6 +48,7 @@
#![feature(integer_atomics)]
#![feature(lang_items)]
#![feature(naked_functions)]
+#![feature(matches_macro)] // stable in current Rust
#![feature(ptr_internals)]
#![feature(thread_local)]
#![no_std]
diff --git a/src/memory/bump.rs b/src/memory/bump.rs
index 5ffb4ad..e824fa4 100644
--- a/src/memory/bump.rs
+++ b/src/memory/bump.rs
@@ -2,9 +2,9 @@
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html)
use crate::paging::PhysicalAddress;
-
use super::{Frame, FrameAllocator, MemoryArea, MemoryAreaIter};
+use syscall::{PartialAllocStrategy, PhysallocFlags};
pub struct BumpAllocator {
next_free_frame: Frame,
@@ -88,14 +88,20 @@ impl FrameAllocator for BumpAllocator {
count
}
- fn allocate_frames(&mut self, count: usize) -> Option {
+ fn allocate_frames3(&mut self, count: usize, flags: PhysallocFlags, strategy: Option, min: usize) -> Option<(Frame, usize)> {
+ // TODO: Comply with flags and allocation strategies better.
if count == 0 {
- None
+ return None;
} else if let Some(area) = self.current_area {
+ let space32 = flags.contains(PhysallocFlags::SPACE_32);
+ let partial_alloc = flags.contains(PhysallocFlags::PARTIAL_ALLOC);
+ let mut actual_size = count;
+
// "Clone" the frame to return it if it's free. Frame doesn't
// implement Clone, but we can construct an identical frame.
- let start_frame = Frame{ number: self.next_free_frame.number };
- let end_frame = Frame { number: self.next_free_frame.number + (count - 1) };
+ let start_frame = Frame { number: self.next_free_frame.number };
+ let mut end_frame = Frame { number: self.next_free_frame.number + (count - 1) };
+ let min_end_frame = if partial_alloc { Frame { number: self.next_free_frame.number + (min - 1) } } else { Frame { number: self.next_free_frame.number + (count - 1) } };
// the last frame of the current area
let current_area_last_frame = {
@@ -103,24 +109,39 @@ impl FrameAllocator for BumpAllocator {
Frame::containing_address(PhysicalAddress::new(address as usize))
};
- if end_frame > current_area_last_frame {
+ if end_frame > current_area_last_frame && min_end_frame > current_area_last_frame {
// all frames of current area are used, switch to next area
self.choose_next_area();
- } else if (start_frame >= self.kernel_start && start_frame <= self.kernel_end)
+ return self.allocate_frames3(count, flags, strategy, min)
+ } else if partial_alloc {
+ end_frame = Frame { number: self.next_free_frame.number + (min - 1) };
+ actual_size = min;
+ }
+
+ if space32 && end_frame.start_address().get() + super::PAGE_SIZE >= 0x1_0000_0000 {
+ // assuming that the bump allocator always advances, and that the memory map is sorted,
+ // when allocating in 32-bit space we can only return None when the free range was
+ // outside 0x0000_0000-0xFFFF_FFFF.
+ //
+ // we don't want to skip an entire memory region just because one 32-bit allocation failed.
+ return None;
+ }
+
+ if (start_frame >= self.kernel_start && start_frame <= self.kernel_end)
|| (end_frame >= self.kernel_start && end_frame <= self.kernel_end) {
// `frame` is used by the kernel
self.next_free_frame = Frame {
number: self.kernel_end.number + 1
};
- } else {
- // frame is unused, increment `next_free_frame` and return it
- self.next_free_frame.number += count;
- return Some(start_frame);
+ // `frame` was not valid, try it again with the updated `next_free_frame`
+ return self.allocate_frames3(count, flags, strategy, min)
}
- // `frame` was not valid, try it again with the updated `next_free_frame`
- self.allocate_frames(count)
+
+ // frame is unused, increment `next_free_frame` and return it
+ self.next_free_frame.number += actual_size;
+ return Some((start_frame, actual_size));
} else {
- None // no free frames left
+ None // no free memory areas left, and thus no frames left
}
}
diff --git a/src/memory/mod.rs b/src/memory/mod.rs
index 146ff42..bb45e93 100644
--- a/src/memory/mod.rs
+++ b/src/memory/mod.rs
@@ -7,6 +7,7 @@ use self::bump::BumpAllocator;
use self::recycle::RecycleAllocator;
use spin::Mutex;
+use syscall::{PartialAllocStrategy, PhysallocFlags};
pub mod bump;
pub mod recycle;
@@ -118,6 +119,13 @@ pub fn allocate_frames(count: usize) -> Option {
panic!("frame allocator not initialized");
}
}
+pub fn allocate_frames_complex(count: usize, flags: PhysallocFlags, strategy: Option, min: usize) -> Option<(Frame, usize)> {
+ if let Some(ref mut allocator) = *ALLOCATOR.lock() {
+ allocator.allocate_frames3(count, flags, strategy, min)
+ } else {
+ panic!("frame allocator not initialized");
+ }
+}
/// Deallocate a range of frames frame
pub fn deallocate_frames(frame: Frame, count: usize) {
@@ -184,6 +192,12 @@ pub trait FrameAllocator {
fn set_noncore(&mut self, noncore: bool);
fn free_frames(&self) -> usize;
fn used_frames(&self) -> usize;
- fn allocate_frames(&mut self, size: usize) -> Option;
+ fn allocate_frames(&mut self, size: usize) -> Option {
+ self.allocate_frames2(size, PhysallocFlags::SPACE_64)
+ }
+ fn allocate_frames2(&mut self, size: usize, flags: PhysallocFlags) -> Option {
+ self.allocate_frames3(size, flags, None, size).map(|(s, _)| s)
+ }
+ fn allocate_frames3(&mut self, size: usize, flags: PhysallocFlags, strategy: Option, min: usize) -> Option<(Frame, usize)>;
fn deallocate_frames(&mut self, frame: Frame, size: usize);
}
diff --git a/src/memory/recycle.rs b/src/memory/recycle.rs
index 55a4c81..aac6bb9 100644
--- a/src/memory/recycle.rs
+++ b/src/memory/recycle.rs
@@ -4,13 +4,19 @@
use alloc::vec::Vec;
use crate::paging::PhysicalAddress;
-
use super::{Frame, FrameAllocator};
+use syscall::{PartialAllocStrategy, PhysallocFlags};
+
+struct Range {
+ base: usize,
+ count: usize,
+}
+
pub struct RecycleAllocator {
inner: T,
noncore: bool,
- free: Vec<(usize, usize)>,
+ free: Vec,
}
impl RecycleAllocator {
@@ -23,23 +29,19 @@ impl RecycleAllocator {
}
fn free_count(&self) -> usize {
- let mut count = 0;
- for free in self.free.iter() {
- count += free.1;
- }
- count
+ self.free.len()
}
fn merge(&mut self, address: usize, count: usize) -> bool {
for i in 0 .. self.free.len() {
let changed = {
let free = &mut self.free[i];
- if address + count * 4096 == free.0 {
- free.0 = address;
- free.1 += count;
+ if address + count * super::PAGE_SIZE == free.base {
+ free.base = address;
+ free.count += count;
true
- } else if free.0 + free.1 * 4096 == address {
- free.1 += count;
+ } else if free.base + free.count * super::PAGE_SIZE == address {
+ free.count += count;
true
} else {
false
@@ -48,7 +50,7 @@ impl RecycleAllocator {
if changed {
//TODO: Use do not use recursion
- let (address, count) = self.free[i];
+ let Range { base: address, count } = self.free[i];
if self.merge(address, count) {
self.free.remove(i);
}
@@ -58,6 +60,48 @@ impl RecycleAllocator {
false
}
+ fn try_recycle(&mut self, count: usize, flags: PhysallocFlags, strategy: Option, min: usize) -> Option<(usize, usize)> {
+ let space32 = flags.contains(PhysallocFlags::SPACE_32);
+ let partial_alloc = flags.contains(PhysallocFlags::PARTIAL_ALLOC);
+
+ let mut actual_size = count;
+ let mut current_optimal_index = None;
+ let mut current_optimal = self.free.first()?;
+
+ for (free_range_index, free_range) in self.free.iter().enumerate().skip(1) {
+ // Later entries can be removed faster
+
+ if space32 && free_range.base + count * super::PAGE_SIZE >= 0x1_0000_0000 {
+ // We need a 32-bit physical address and this range is outside that address
+ // space.
+ continue;
+ }
+
+ if free_range.count < count {
+ if partial_alloc && free_range.count >= min && matches!(strategy, Some(PartialAllocStrategy::Greedy)) {
+ // The free range does not fit the entire requested range, but is still
+ // at least as large as the minimum range. When using the "greedy"
+ // strategy, we return immediately.
+ current_optimal_index = Some(free_range_index);
+ actual_size = free_range.count;
+ break;
+ }
+
+ // Range has to fit if we want the entire frame requested.
+ continue;
+ }
+ if free_range.count > current_optimal.count {
+ // Skip this free range if it wasn't smaller than the old one; we do want to use
+ // the smallest range possible to reduce fragmentation as much as possible.
+ continue;
+ }
+
+ // We found a range that fit.
+ current_optimal_index = Some(free_range_index);
+ current_optimal = free_range;
+ }
+ current_optimal_index.map(|idx| (actual_size, idx))
+ }
}
impl FrameAllocator for RecycleAllocator {
@@ -73,38 +117,25 @@ impl FrameAllocator for RecycleAllocator {
self.inner.used_frames() - self.free_count()
}
- fn allocate_frames(&mut self, count: usize) -> Option {
- let mut small_i = None;
- {
- let mut small = (0, 0);
- for i in 0..self.free.len() {
- let free = self.free[i];
- // Later entries can be removed faster
- if free.1 >= count {
- if free.1 <= small.1 || small_i.is_none() {
- small_i = Some(i);
- small = free;
- }
- }
- }
- }
+ fn allocate_frames3(&mut self, count: usize, flags: PhysallocFlags, strategy: Option, min: usize) -> Option<(Frame, usize)> {
+ // TODO: Cover all different strategies.
- if let Some(i) = small_i {
+ if let Some((actual_size, free_range_idx_to_use)) = self.try_recycle(count, flags, strategy, min) {
let (address, remove) = {
- let free = &mut self.free[i];
- free.1 -= count;
- (free.0 + free.1 * 4096, free.1 == 0)
+ let free_range = &mut self.free[free_range_idx_to_use];
+ free_range.count -= actual_size;
+ (free_range.base + free_range.count * super::PAGE_SIZE, free_range.count == 0)
};
if remove {
- self.free.remove(i);
+ self.free.remove(free_range_idx_to_use);
}
//println!("Restoring frame {:?}, {}", frame, count);
- Some(Frame::containing_address(PhysicalAddress::new(address)))
+ Some((Frame::containing_address(PhysicalAddress::new(address)), actual_size))
} else {
//println!("No saved frames {}", count);
- self.inner.allocate_frames(count)
+ self.inner.allocate_frames3(count, flags, strategy, min)
}
}
@@ -112,7 +143,7 @@ impl FrameAllocator for RecycleAllocator {
if self.noncore {
let address = frame.start_address().get();
if ! self.merge(address, count) {
- self.free.push((address, count));
+ self.free.push(Range { base: address, count });
}
} else {
//println!("Could not save frame {:?}, {}", frame, count);
diff --git a/src/syscall/debug.rs b/src/syscall/debug.rs
index e62d034..8b42526 100644
--- a/src/syscall/debug.rs
+++ b/src/syscall/debug.rs
@@ -271,6 +271,10 @@ pub fn format_call(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) -
"physalloc({})",
b
),
+ SYS_PHYSALLOC3 => format!(
+ "physalloc3({}, {}, {})",
+ b, c, d,
+ ),
SYS_PHYSFREE => format!(
"physfree({:#X}, {})",
b,
diff --git a/src/syscall/driver.rs b/src/syscall/driver.rs
index e1655fd..7cf168d 100644
--- a/src/syscall/driver.rs
+++ b/src/syscall/driver.rs
@@ -1,11 +1,11 @@
use crate::macros::InterruptStack;
-use crate::memory::{allocate_frames, deallocate_frames, Frame};
+use crate::memory::{allocate_frames_complex, deallocate_frames, Frame};
use crate::paging::{ActivePageTable, PhysicalAddress, VirtualAddress};
use crate::paging::entry::EntryFlags;
use crate::context;
use crate::context::memory::Grant;
use crate::syscall::error::{Error, EFAULT, EINVAL, ENOMEM, EPERM, ESRCH, Result};
-use crate::syscall::flag::{PhysmapFlags, PHYSMAP_WRITE, PHYSMAP_WRITE_COMBINE, PHYSMAP_NO_CACHE};
+use crate::syscall::flag::{PhysallocFlags, PartialAllocStrategy, PhysmapFlags, PHYSMAP_WRITE, PHYSMAP_WRITE_COMBINE, PHYSMAP_NO_CACHE};
fn enforce_root() -> Result<()> {
let contexts = context::contexts();
@@ -30,12 +30,28 @@ pub fn iopl(level: usize, stack: &mut InterruptStack) -> Result {
Ok(0)
}
-pub fn inner_physalloc(size: usize) -> Result {
- allocate_frames((size + 4095)/4096).ok_or(Error::new(ENOMEM)).map(|frame| frame.start_address().get())
+pub fn inner_physalloc(size: usize, flags: PhysallocFlags, strategy: Option, min: usize) -> Result<(usize, usize)> {
+ if flags.contains(PhysallocFlags::SPACE_32 | PhysallocFlags::SPACE_64) {
+ return Err(Error::new(EINVAL));
+ }
+ let space32 = flags.contains(PhysallocFlags::SPACE_32);
+ allocate_frames_complex((size + 4095) / 4096, flags, strategy, (min + 4095) / 4096).ok_or(Error::new(ENOMEM)).map(|(frame, count)| (frame.start_address().get(), count * 4096))
}
pub fn physalloc(size: usize) -> Result {
enforce_root()?;
- inner_physalloc(size)
+ inner_physalloc(size, PhysallocFlags::SPACE_64, None, size).map(|(base, _)| base)
+}
+pub fn physalloc3(size: usize, flags_raw: usize, min: &mut usize) -> Result {
+ enforce_root()?;
+ let flags = PhysallocFlags::from_bits(flags_raw & !syscall::PARTIAL_ALLOC_STRATEGY_MASK).ok_or(Error::new(EINVAL))?;
+ let strategy = if flags.contains(PhysallocFlags::PARTIAL_ALLOC) {
+ Some(PartialAllocStrategy::from_raw(flags_raw & syscall::PARTIAL_ALLOC_STRATEGY_MASK).ok_or(Error::new(EINVAL))?)
+ } else {
+ None
+ };
+ let (base, count) = inner_physalloc(size, flags, strategy, *min)?;
+ *min = count;
+ Ok(base)
}
pub fn inner_physfree(physical_address: usize, size: usize) -> Result {
diff --git a/src/syscall/mod.rs b/src/syscall/mod.rs
index de4014a..bb01f60 100644
--- a/src/syscall/mod.rs
+++ b/src/syscall/mod.rs
@@ -152,6 +152,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
SYS_SIGRETURN => sigreturn(),
SYS_PIPE2 => pipe2(validate_slice_mut(b as *mut usize, 2)?, c),
SYS_PHYSALLOC => physalloc(b),
+ SYS_PHYSALLOC3 => physalloc3(b, c, &mut validate_slice_mut(d as *mut usize, 1)?[0]),
SYS_PHYSFREE => physfree(b, c),
SYS_PHYSMAP => physmap(b, c, PhysmapFlags::from_bits_truncate(d)),
SYS_PHYSUNMAP => physunmap(b),
diff --git a/syscall b/syscall
index 1c637e7..9ecdc11 160000
--- a/syscall
+++ b/syscall
@@ -1 +1 @@
-Subproject commit 1c637e72b2f3be8e8f942372e8414101e463df98
+Subproject commit 9ecdc11d73677477b37567a47af1633478093cbb