Merge branch 'memory' into 'master'

Implement funmap2

See merge request redox-os/kernel!139
This commit is contained in:
Jeremy Soller
2020-07-30 13:36:59 +00:00
10 changed files with 702 additions and 269 deletions

View File

@@ -11,7 +11,7 @@ use crate::arch::{interrupt::InterruptStack, paging::PAGE_SIZE};
use crate::common::unique::Unique;
use crate::context::arch;
use crate::context::file::{FileDescriptor, FileDescription};
use crate::context::memory::{Grant, Memory, SharedMemory, Tls};
use crate::context::memory::{UserGrants, Memory, SharedMemory, Tls};
use crate::ipi::{ipi, IpiKind, IpiTarget};
use crate::scheme::{SchemeNamespace, FileHandle};
use crate::sync::WaitMap;
@@ -230,7 +230,7 @@ pub struct Context {
/// User Thread local storage
pub tls: Option<Tls>,
/// User grants
pub grants: Arc<Mutex<Vec<Grant>>>,
pub grants: Arc<Mutex<UserGrants>>,
/// The name of the context
pub name: Arc<Mutex<Box<[u8]>>>,
/// The current working directory
@@ -289,7 +289,7 @@ impl Context {
stack: None,
sigstack: None,
tls: None,
grants: Arc::new(Mutex::new(Vec::new())),
grants: Arc::new(Mutex::new(UserGrants::default())),
name: Arc::new(Mutex::new(Vec::new().into_boxed_slice())),
cwd: Arc::new(Mutex::new(Vec::new())),
files: Arc::new(Mutex::new(Vec::new())),

View File

@@ -1,7 +1,15 @@
use alloc::collections::{BTreeSet, VecDeque};
use alloc::sync::{Arc, Weak};
use alloc::collections::VecDeque;
use core::borrow::Borrow;
use core::cmp::{self, Eq, Ordering, PartialEq, PartialOrd};
use core::fmt::{self, Debug};
use core::intrinsics;
use core::ops::{Deref, DerefMut};
use spin::Mutex;
use syscall::{
flag::MapFlags,
error::*,
};
use crate::arch::paging::PAGE_SIZE;
use crate::context::file::FileDescriptor;
@@ -12,10 +20,279 @@ use crate::paging::entry::EntryFlags;
use crate::paging::mapper::MapperFlushAll;
use crate::paging::temporary_page::TemporaryPage;
#[derive(Debug)]
pub struct Grant {
/// Round down to the nearest multiple of page size
pub fn round_down_pages(number: usize) -> usize {
number - number % PAGE_SIZE
}
/// Round up to the nearest multiple of page size
pub fn round_up_pages(number: usize) -> usize {
round_down_pages(number + PAGE_SIZE - 1)
}
pub fn entry_flags(flags: MapFlags) -> EntryFlags {
let mut entry_flags = EntryFlags::PRESENT | EntryFlags::USER_ACCESSIBLE;
if !flags.contains(MapFlags::PROT_EXEC) {
entry_flags |= EntryFlags::NO_EXECUTE;
}
if flags.contains(MapFlags::PROT_READ) {
//TODO: PROT_READ
}
if flags.contains(MapFlags::PROT_WRITE) {
entry_flags |= EntryFlags::WRITABLE;
}
entry_flags
}
#[derive(Debug, Default)]
pub struct UserGrants {
pub inner: BTreeSet<Grant>,
}
impl UserGrants {
/// Returns the grant, if any, which occupies the specified address
pub fn contains(&self, address: VirtualAddress) -> Option<&Grant> {
let byte = Region::byte(address);
self.inner
.range(..=byte)
.next_back()
.filter(|existing| existing.occupies(byte))
}
/// Returns an iterator over all grants that occupy some part of the
/// requested region
pub fn conflicts<'a>(&'a self, requested: Region) -> impl Iterator<Item = &'a Grant> + 'a {
let start = self.contains(requested.start_address());
let start_region = start.map(Region::from).unwrap_or(requested);
self
.inner
.range(start_region..)
.take_while(move |region| !region.intersect(requested).is_empty())
}
/// Return a free region with the specified size
pub fn find_free(&self, size: usize) -> Region {
// Get last used region
let last = self.inner.iter().next_back().map(Region::from).unwrap_or(Region::new(VirtualAddress::new(0), 0));
// At the earliest, start at grant offset
let address = cmp::max(last.end_address().get(), crate::USER_GRANT_OFFSET);
// Create new region
Region::new(VirtualAddress::new(address), size)
}
/// Return a free region, respecting the user's hinted address and flags. Address may be null.
pub fn find_free_at(&mut self, address: VirtualAddress, size: usize, flags: MapFlags) -> Result<Region> {
if address == VirtualAddress::new(0) {
// Free hands!
return Ok(self.find_free(size));
}
// The user wished to have this region...
let mut requested = Region::new(address, size);
if
requested.end_address().get() >= crate::PML4_SIZE * 256 // There are 256 PML4 entries reserved for userspace
&& address.get() % PAGE_SIZE != 0
{
// ... but it was invalid
return Err(Error::new(EINVAL));
}
if let Some(grant) = self.contains(requested.start_address()) {
// ... but it already exists
if flags.contains(MapFlags::MAP_FIXED_NOREPLACE) {
println!("grant: conflicts with: {:#x} - {:#x}", grant.start_address().get(), grant.end_address().get());
return Err(Error::new(EEXIST));
} else if flags.contains(MapFlags::MAP_FIXED) {
// TODO: Overwrite existing grant
return Err(Error::new(EOPNOTSUPP));
} else {
// TODO: Find grant close to requested address?
requested = self.find_free(requested.size());
}
}
Ok(requested)
}
}
impl Deref for UserGrants {
type Target = BTreeSet<Grant>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for UserGrants {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
#[derive(Clone, Copy)]
pub struct Region {
start: VirtualAddress,
size: usize,
}
impl Region {
/// Create a new region with the given size
pub fn new(start: VirtualAddress, size: usize) -> Self {
Self { start, size }
}
/// Create a new region spanning exactly one byte
pub fn byte(address: VirtualAddress) -> Self {
Self::new(address, 1)
}
/// Create a new region spanning between the start and end address
/// (exclusive end)
pub fn between(start: VirtualAddress, end: VirtualAddress) -> Self {
Self::new(
start,
end.get().saturating_sub(start.get()),
)
}
/// Return the part of the specified region that intersects with self.
pub fn intersect(&self, other: Self) -> Self {
Self::between(
cmp::max(self.start_address(), other.start_address()),
cmp::min(self.end_address(), other.end_address()),
)
}
/// Get the start address of the region
pub fn start_address(&self) -> VirtualAddress {
self.start
}
/// Set the start address of the region
pub fn set_start_address(&mut self, start: VirtualAddress) {
self.start = start;
}
/// Get the last address in the region (inclusive end)
pub fn final_address(&self) -> VirtualAddress {
VirtualAddress::new(self.start.get() + self.size - 1)
}
/// Get the start address of the next region (exclusive end)
pub fn end_address(&self) -> VirtualAddress {
VirtualAddress::new(self.start.get() + self.size)
}
/// Return the exact size of the region
pub fn size(&self) -> usize {
self.size
}
/// Return true if the size of this region is zero. Grants with such a
/// region should never exist.
pub fn is_empty(&self) -> bool {
self.size == 0
}
/// Set the exact size of the region
pub fn set_size(&mut self, size: usize) {
self.size = size;
}
/// Round region up to nearest page size
pub fn round(self) -> Self {
Self {
size: round_up_pages(self.size),
..self
}
}
/// Return the size of the grant in multiples of the page size
pub fn full_size(&self) -> usize {
self.round().size()
}
/// Returns true if the address is within the regions's requested range
pub fn collides(&self, other: Self) -> bool {
self.start_address() <= other.start_address() && other.end_address().get() - self.start_address().get() < self.size()
}
/// Returns true if the address is within the regions's actual range (so,
/// rounded up to the page size)
pub fn occupies(&self, other: Self) -> bool {
self.round().collides(other)
}
/// Return all pages containing a chunk of the region
pub fn pages(&self) -> PageIter {
Page::range_inclusive(
Page::containing_address(self.start_address()),
Page::containing_address(self.end_address())
)
}
/// Returns the region from the start of self until the start of the specified region.
///
/// # Panics
///
/// Panics if the given region starts before self
pub fn before(self, region: Self) -> Option<Self> {
assert!(self.start_address() <= region.start_address());
Some(Self::between(
self.start_address(),
region.start_address(),
)).filter(|reg| !reg.is_empty())
}
/// Returns the region from the end of the given region until the end of self.
///
/// # Panics
///
/// Panics if self ends before the given region
pub fn after(self, region: Self) -> Option<Self> {
assert!(region.end_address() <= self.end_address());
Some(Self::between(
region.end_address(),
self.end_address(),
)).filter(|reg| !reg.is_empty())
}
/// Re-base address that lives inside this region, onto a new base region
pub fn rebase(self, new_base: Self, address: VirtualAddress) -> VirtualAddress {
let offset = address.get() - self.start_address().get();
let new_start = new_base.start_address().get() + offset;
VirtualAddress::new(new_start)
}
}
impl PartialEq for Region {
fn eq(&self, other: &Self) -> bool {
self.start.eq(&other.start)
}
}
impl Eq for Region {}
impl PartialOrd for Region {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.start.partial_cmp(&other.start)
}
}
impl Ord for Region {
fn cmp(&self, other: &Self) -> Ordering {
self.start.cmp(&other.start)
}
}
impl Debug for Region {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:#x}..{:#x} ({:#x} long)", self.start_address().get(), self.end_address().get(), self.size())
}
}
impl<'a> From<&'a Grant> for Region {
fn from(source: &'a Grant) -> Self {
source.region
}
}
#[derive(Debug)]
pub struct Grant {
region: Region,
flags: EntryFlags,
mapped: bool,
owned: bool,
@@ -24,6 +301,12 @@ pub struct Grant {
}
impl Grant {
/// Get a mutable reference to the region. This is unsafe, because a bad
/// region could lead to the wrong addresses being unmapped.
pub unsafe fn region_mut(&mut self) -> &mut Region {
&mut self.region
}
pub fn physmap(from: PhysicalAddress, to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant {
let mut active_table = unsafe { ActivePageTable::new() };
@@ -40,8 +323,10 @@ impl Grant {
flush_all.flush(&mut active_table);
Grant {
start: to,
size,
region: Region {
start: to,
size,
},
flags,
mapped: true,
owned: false,
@@ -64,8 +349,10 @@ impl Grant {
flush_all.flush(&mut active_table);
Grant {
start: to,
size,
region: Region {
start: to,
size,
},
flags,
mapped: true,
owned: true,
@@ -100,8 +387,10 @@ impl Grant {
ipi(IpiKind::Tlb, IpiTarget::Other);
Grant {
start: to,
size,
region: Region {
start: to,
size,
},
flags,
mapped: true,
owned: false,
@@ -117,14 +406,14 @@ impl Grant {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
let start_page = Page::containing_address(self.region.start);
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.get() + self.region.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
let frame = active_table.translate_page(page).expect("grant references unmapped memory");
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.start.get() + new_start.get()));
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.region.start.get() + new_start.get()));
if self.owned {
let result = active_table.map(new_page, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
flush_all.consume(result);
@@ -138,7 +427,7 @@ impl Grant {
if self.owned {
unsafe {
intrinsics::copy(self.start.get() as *const u8, new_start.get() as *mut u8, self.size);
intrinsics::copy(self.region.start.get() as *const u8, new_start.get() as *mut u8, self.region.size);
}
let mut flush_all = MapperFlushAll::new();
@@ -147,7 +436,7 @@ impl Grant {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.start.get() + new_start.get()));
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.region.start.get() + new_start.get()));
let result = active_table.remap(new_page, flags);
flush_all.consume(result);
}
@@ -156,8 +445,10 @@ impl Grant {
}
Grant {
start: new_start,
size: self.size,
region: Region {
start: new_start,
size: self.region.size,
},
flags: self.flags,
mapped: true,
owned: self.owned,
@@ -172,8 +463,8 @@ impl Grant {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
let start_page = Page::containing_address(self.region.start);
let end_page = Page::containing_address(VirtualAddress::new(self.region.start.get() + self.region.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
//TODO: One function to do both?
let flags = active_table.translate_page_flags(page).expect("grant references unmapped memory");
@@ -181,7 +472,7 @@ impl Grant {
flush_all.consume(result);
active_table.with(new_table, temporary_page, |mapper| {
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.start.get() + new_start.get()));
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.region.start.get() + new_start.get()));
let result = mapper.map_to(new_page, frame, flags);
// Ignore result due to mapping on inactive table
unsafe { result.ignore(); }
@@ -190,21 +481,7 @@ impl Grant {
flush_all.flush(&mut active_table);
self.start = new_start;
}
pub fn start_address(&self) -> VirtualAddress {
self.start
}
pub unsafe fn set_start_address(&mut self, start: VirtualAddress) {
self.start = start;
}
pub fn size(&self) -> usize {
self.size
}
pub unsafe fn set_size(&mut self, size: usize) {
self.size = size;
self.region.start = new_start;
}
pub fn flags(&self) -> EntryFlags {
@@ -226,8 +503,8 @@ impl Grant {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
let start_page = Page::containing_address(self.start_address());
let end_page = Page::containing_address(self.final_address());
for page in Page::range_inclusive(start_page, end_page) {
let (result, _frame) = active_table.unmap_return(page, false);
flush_all.consume(result);
@@ -253,8 +530,8 @@ impl Grant {
let mut active_table = unsafe { ActivePageTable::new() };
active_table.with(new_table, temporary_page, |mapper| {
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
let start_page = Page::containing_address(self.start_address());
let end_page = Page::containing_address(self.final_address());
for page in Page::range_inclusive(start_page, end_page) {
let (result, _frame) = mapper.unmap_return(page, false);
// This is not the active table, so the flush can be ignored
@@ -271,11 +548,80 @@ impl Grant {
self.mapped = false;
}
/// Extract out a region into a separate grant. The return value is as
/// follows: (before, new split, after). Before and after may be `None`,
/// which occurs when the split off region is at the start or end of the
/// page respectively.
///
/// # Panics
///
/// Panics if the start or end addresses of the region is not aligned to the
/// page size. To round up the size to the nearest page size, use `.round()`
/// on the region.
///
/// Also panics if the given region isn't completely contained within the
/// grant. Use `grant.intersect` to find a sub-region that works.
pub fn extract(mut self, region: Region) -> Option<(Option<Grant>, Grant, Option<Grant>)> {
assert_eq!(region.start_address().get() % PAGE_SIZE, 0, "split_out must be called on page-size aligned start address");
assert_eq!(region.size() % PAGE_SIZE, 0, "split_out must be called on page-size aligned end address");
let before_grant = self.before(region).map(|region| Grant {
region,
flags: self.flags,
mapped: self.mapped,
owned: self.owned,
desc_opt: self.desc_opt.clone(),
});
let after_grant = self.after(region).map(|region| Grant {
region,
flags: self.flags,
mapped: self.mapped,
owned: self.owned,
desc_opt: self.desc_opt.clone(),
});
unsafe {
*self.region_mut() = region;
}
Some((before_grant, self, after_grant))
}
}
impl Deref for Grant {
type Target = Region;
fn deref(&self) -> &Self::Target {
&self.region
}
}
impl PartialOrd for Grant {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.region.partial_cmp(&other.region)
}
}
impl Ord for Grant {
fn cmp(&self, other: &Self) -> Ordering {
self.region.cmp(&other.region)
}
}
impl PartialEq for Grant {
fn eq(&self, other: &Self) -> bool {
self.region.eq(&other.region)
}
}
impl Eq for Grant {}
impl Borrow<Region> for Grant {
fn borrow(&self) -> &Region {
&self.region
}
}
impl Drop for Grant {
fn drop(&mut self) {
assert!(!self.mapped);
assert!(!self.mapped, "Grant dropped while still mapped");
}
}
@@ -488,3 +834,15 @@ impl Tls {
);
}
}
#[cfg(tests)]
mod tests {
// TODO: Get these tests working
#[test]
fn region_collides() {
assert!(Region::new(0, 2).collides(Region::new(0, 1)));
assert!(Region::new(0, 2).collides(Region::new(1, 1)));
assert!(!Region::new(0, 2).collides(Region::new(2, 1)));
assert!(!Region::new(0, 2).collides(Region::new(3, 1)));
}
}

View File

@@ -1,12 +1,10 @@
use core::cmp;
use crate::context;
use crate::context::memory::Grant;
use crate::context::memory::{entry_flags, Grant};
use crate::memory::{free_frames, used_frames, PAGE_SIZE};
use crate::paging::{ActivePageTable, Page, VirtualAddress};
use crate::paging::entry::EntryFlags;
use crate::paging::{ActivePageTable, VirtualAddress};
use crate::syscall::data::{Map, Map2, StatVfs};
use crate::syscall::error::*;
use crate::syscall::flag::{MapFlags, PROT_EXEC, PROT_READ, PROT_WRITE};
use crate::syscall::flag::MapFlags;
use crate::syscall::scheme::Scheme;
pub struct MemoryScheme;
@@ -42,112 +40,27 @@ impl Scheme for MemoryScheme {
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let fixed = map.flags.contains(MapFlags::MAP_FIXED);
let fixed_noreplace = map.flags.contains(MapFlags::MAP_FIXED_NOREPLACE);
let mut grants = context.grants.lock();
let full_size = ((map.size + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
let region = grants.find_free_at(VirtualAddress::new(map.address), map.size, map.flags)?.round();
let mut to_address = if map.address == 0 { crate::USER_GRANT_OFFSET } else {
if
map.address + full_size >= crate::PML4_SIZE * 256 // There are 256 PML4 entries reserved for userspace
&& map.address % PAGE_SIZE != 0
{
return Err(Error::new(EINVAL));
}
map.address
};
{
// Make sure it's *absolutely* not mapped already
// TODO: Keep track of all allocated memory so this isn't necessary
let mut entry_flags = EntryFlags::PRESENT | EntryFlags::USER_ACCESSIBLE;
if !map.flags.contains(PROT_EXEC) {
entry_flags |= EntryFlags::NO_EXECUTE;
}
if map.flags.contains(PROT_READ) {
//TODO: PROT_READ
}
if map.flags.contains(PROT_WRITE) {
entry_flags |= EntryFlags::WRITABLE;
}
let active_table = unsafe { ActivePageTable::new() };
let mut i = 0;
while i < grants.len() {
let grant = &mut grants[i];
let grant_start = grant.start_address().get();
let grant_len = ((grant.size() + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
let grant_end = grant_start + grant_len;
if to_address < grant_start || grant_end <= to_address {
// grant has nothing to do with the memory to map, and thus we can safely just
// go on to the next one.
if !fixed {
// Use the default grant offset, or if we've already passed it, anything after that.
to_address = cmp::max(
cmp::max(crate::USER_GRANT_OFFSET, grant_end),
to_address,
);
for page in region.pages() {
if active_table.translate_page(page).is_some() {
println!("page at {:#x} was already mapped", page.start_address().get());
return Err(Error::new(EEXIST))
}
i += 1;
continue;
}
// check whether this grant overlaps with the memory range to use, by checking that
// the start and end of the grant is not within the memory range to map
if grant_start <= to_address && grant_end > to_address || grant_start <= to_address + full_size && grant_end > to_address + full_size {
// the range overlaps, thus we'll have to continue to the next grant, or to
// insert a new grant at the end (if not MapFlags::MAP_FIXED).
if fixed_noreplace {
println!("grant: conflicts with: {:#x} - {:#x}", grant_start, grant_end);
return Err(Error::new(EEXIST));
} else if fixed {
/*
// shrink the grant, removing it if necessary. since the to_address isn't
// changed at all when mapping to a fixed address, we can just continue to
// the next grant and shrink or remove that one if it was also overlapping.
if to_address + full_size > grant_start {
let new_start = core::cmp::min(grant_end, to_address + full_size);
let new_size = grant.size() - (new_start - grant_start);
unsafe { grant.set_size(new_size) };
grant_len = ((new_size + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
let new_start = VirtualAddress::new(new_start);
unsafe { grant.set_start_address(new_start) };
grant_start = new_start;
grant_end = grant_start + grant_len;
}
*/
// TODO
return Err(Error::new(EOPNOTSUPP));
} else {
to_address = grant_end;
i += 1;
}
continue;
}
}
let start_address = VirtualAddress::new(to_address);
let end_address = VirtualAddress::new(to_address + full_size);
grants.insert(Grant::map(region.start_address(), region.size(), entry_flags(map.flags)));
// Make sure it's absolutely not mapped already
let active_table = unsafe { ActivePageTable::new() };
for page in Page::range_inclusive(Page::containing_address(start_address), Page::containing_address(end_address)) {
if active_table.translate_page(page).is_some() {
return Err(Error::new(EEXIST))
}
}
grants.insert(i, Grant::map(start_address, full_size, entry_flags));
Ok(to_address)
Ok(region.start_address().get())
}
}
fn fmap(&self, id: usize, map: &Map) -> Result<usize> {

View File

@@ -8,16 +8,15 @@ use spin::{Mutex, RwLock};
use crate::context::{self, Context};
use crate::context::file::FileDescriptor;
use crate::context::memory::Grant;
use crate::context::memory::{entry_flags, round_down_pages, Grant, Region};
use crate::event;
use crate::paging::{InactivePageTable, Page, VirtualAddress};
use crate::paging::entry::EntryFlags;
use crate::paging::{PAGE_SIZE, InactivePageTable, Page, VirtualAddress};
use crate::paging::temporary_page::TemporaryPage;
use crate::scheme::{AtomicSchemeId, SchemeId};
use crate::sync::{WaitQueue, WaitMap};
use crate::syscall::data::{Map, Packet, Stat, StatVfs, TimeSpec};
use crate::syscall::data::{Map, Map2, Packet, Stat, StatVfs, TimeSpec};
use crate::syscall::error::*;
use crate::syscall::flag::{EventFlags, EVENT_READ, O_NONBLOCK, MapFlags, PROT_EXEC, PROT_READ, PROT_WRITE};
use crate::syscall::flag::{EventFlags, EVENT_READ, O_NONBLOCK, MapFlags, PROT_READ, PROT_WRITE};
use crate::syscall::number::*;
use crate::syscall::scheme::Scheme;
@@ -30,8 +29,8 @@ pub struct UserInner {
next_id: AtomicU64,
context: Weak<RwLock<Context>>,
todo: WaitQueue<Packet>,
fmap: Mutex<BTreeMap<u64, (Weak<RwLock<Context>>, FileDescriptor, Map)>>,
funmap: Mutex<BTreeMap<usize, usize>>,
fmap: Mutex<BTreeMap<u64, (Weak<RwLock<Context>>, FileDescriptor, Map2)>>,
funmap: Mutex<BTreeMap<Region, VirtualAddress>>,
done: WaitMap<u64, usize>,
unmounting: AtomicBool,
}
@@ -101,69 +100,51 @@ impl UserInner {
Error::demux(self.done.receive(&id, "UserInner::call_inner"))
}
/// Map a readable structure to the scheme's userspace and return the
/// pointer
pub fn capture(&self, buf: &[u8]) -> Result<usize> {
UserInner::capture_inner(&self.context, buf.as_ptr() as usize, buf.len(), PROT_READ, None)
UserInner::capture_inner(&self.context, 0, buf.as_ptr() as usize, buf.len(), PROT_READ, None).map(|addr| addr.get())
}
/// Map a writeable structure to the scheme's userspace and return the
/// pointer
pub fn capture_mut(&self, buf: &mut [u8]) -> Result<usize> {
UserInner::capture_inner(&self.context, buf.as_mut_ptr() as usize, buf.len(), PROT_WRITE, None)
UserInner::capture_inner(&self.context, 0, buf.as_mut_ptr() as usize, buf.len(), PROT_WRITE, None).map(|addr| addr.get())
}
fn capture_inner(context_weak: &Weak<RwLock<Context>>, address: usize, size: usize, flags: MapFlags, desc_opt: Option<FileDescriptor>) -> Result<usize> {
//TODO: Abstract with other grant creation
fn capture_inner(context_weak: &Weak<RwLock<Context>>, to_address: usize, address: usize, size: usize, flags: MapFlags, desc_opt: Option<FileDescriptor>)
-> Result<VirtualAddress> {
// TODO: More abstractions over grant creation!
if size == 0 {
Ok(0)
} else {
let context_lock = context_weak.upgrade().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET)));
let mut grants = context.grants.lock();
let from_address = (address/4096) * 4096;
let offset = address - from_address;
let full_size = ((offset + size + 4095)/4096) * 4096;
let mut to_address = crate::USER_GRANT_OFFSET;
let mut entry_flags = EntryFlags::PRESENT | EntryFlags::USER_ACCESSIBLE;
if !flags.contains(PROT_EXEC) {
entry_flags |= EntryFlags::NO_EXECUTE;
}
if flags.contains(PROT_READ) {
//TODO: PROT_READ
}
if flags.contains(PROT_WRITE) {
entry_flags |= EntryFlags::WRITABLE;
}
let mut i = 0;
while i < grants.len() {
let start = grants[i].start_address().get();
if to_address + full_size < start {
break;
}
let pages = (grants[i].size() + 4095) / 4096;
let end = start + pages * 4096;
to_address = end;
i += 1;
}
//TODO: Use syscall_head and syscall_tail to avoid leaking data
grants.insert(i, Grant::map_inactive(
VirtualAddress::new(from_address),
VirtualAddress::new(to_address),
full_size,
entry_flags,
desc_opt,
&mut new_table,
&mut temporary_page
));
Ok(to_address + offset)
return Ok(VirtualAddress::new(0));
}
let context_lock = context_weak.upgrade().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(crate::USER_TMP_GRANT_OFFSET)));
let mut grants = context.grants.lock();
let from_address = round_down_pages(address);
let offset = address - from_address;
let from_region = Region::new(VirtualAddress::new(from_address), offset + size).round();
let to_region = grants.find_free_at(VirtualAddress::new(to_address), from_region.size(), flags)?;
//TODO: Use syscall_head and syscall_tail to avoid leaking data
grants.insert(Grant::map_inactive(
from_region.start_address(),
to_region.start_address(),
from_region.size(),
entry_flags(flags),
desc_opt,
&mut new_table,
&mut temporary_page
));
Ok(VirtualAddress::new(to_region.start_address().get() + offset))
}
pub fn release(&self, address: usize) -> Result<()> {
@@ -178,14 +159,9 @@ impl UserInner {
let mut grants = context.grants.lock();
for i in 0 .. grants.len() {
let start = grants[i].start_address().get();
let end = start + grants[i].size();
if address >= start && address < end {
grants.remove(i).unmap_inactive(&mut new_table, &mut temporary_page);
return Ok(());
}
if let Some(region) = grants.contains(VirtualAddress::new(address)).map(Region::from) {
grants.take(&region).unwrap().unmap_inactive(&mut new_table, &mut temporary_page);
return Ok(());
}
Err(Error::new(EFAULT))
@@ -239,12 +215,14 @@ impl UserInner {
} else {
if let Some((context_weak, desc, map)) = self.fmap.lock().remove(&packet.id) {
if let Ok(address) = Error::demux(packet.a) {
//TODO: Protect against sharing addresses that are not page aligned
let res = UserInner::capture_inner(&context_weak, address, map.size, map.flags, Some(desc));
if let Ok(new_address) = res {
self.funmap.lock().insert(new_address, address);
if address % PAGE_SIZE > 0 {
println!("scheme returned unaligned address, causing extra frame to be allocated");
}
packet.a = Error::mux(res);
let res = UserInner::capture_inner(&context_weak, map.address, address, map.size, map.flags, Some(desc));
if let Ok(grant_address) = res {
self.funmap.lock().insert(Region::new(grant_address, map.size), VirtualAddress::new(address));
}
packet.a = Error::mux(res.map(|addr| addr.get()));
} else {
let _ = desc.close();
}
@@ -391,7 +369,12 @@ impl Scheme for UserScheme {
let id = inner.next_id.fetch_add(1, Ordering::SeqCst);
inner.fmap.lock().insert(id, (context_lock, desc, *map));
inner.fmap.lock().insert(id, (context_lock, desc, Map2 {
offset: map.offset,
size: map.size,
flags: map.flags,
address: 0,
}));
let result = inner.call_inner(Packet {
id,
@@ -409,14 +392,114 @@ impl Scheme for UserScheme {
result
}
fn funmap(&self, new_address: usize) -> Result<usize> {
fn fmap2(&self, file: usize, map: &Map2) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let (pid, uid, gid, context_lock, desc) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
// TODO: Faster, cleaner mechanism to get descriptor
let scheme = inner.scheme_id.load(Ordering::SeqCst);
let mut desc_res = Err(Error::new(EBADF));
for context_file_opt in context.files.lock().iter() {
if let Some(context_file) = context_file_opt {
let (context_scheme, context_number) = {
let desc = context_file.description.read();
(desc.scheme, desc.number)
};
if context_scheme == scheme && context_number == file {
desc_res = Ok(context_file.clone());
break;
}
}
}
let desc = desc_res?;
(context.id, context.euid, context.egid, Arc::downgrade(&context_lock), desc)
};
let address = inner.capture(map)?;
let id = inner.next_id.fetch_add(1, Ordering::SeqCst);
inner.fmap.lock().insert(id, (context_lock, desc, *map));
let result = inner.call_inner(Packet {
id,
pid: pid.into(),
uid,
gid,
a: SYS_FMAP2,
b: file,
c: address,
d: mem::size_of::<Map2>()
});
let _ = inner.release(address);
result
}
fn funmap(&self, grant_address: usize) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let address_opt = {
let mut funmap = inner.funmap.lock();
funmap.remove(&new_address)
let entry = funmap.range(..=Region::byte(VirtualAddress::new(grant_address))).next_back();
let grant_address = VirtualAddress::new(grant_address);
if let Some((&grant, &user_base)) = entry {
if grant_address >= grant.end_address() {
return Err(Error::new(EINVAL));
}
funmap.remove(&grant);
let user = Region::new(user_base, grant.size());
Some(grant.rebase(user, grant_address).get())
} else {
None
}
};
if let Some(address) = address_opt {
inner.call(SYS_FUNMAP, address, 0, 0)
if let Some(user_address) = address_opt {
inner.call(SYS_FUNMAP, user_address, 0, 0)
} else {
Err(Error::new(EINVAL))
}
}
fn funmap2(&self, grant_address: usize, size: usize) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let address_opt = {
let mut funmap = inner.funmap.lock();
let entry = funmap.range(..=Region::byte(VirtualAddress::new(grant_address))).next_back();
let grant_address = VirtualAddress::new(grant_address);
if let Some((&grant, &user_base)) = entry {
let grant_requested = Region::new(grant_address, size);
if grant_requested.end_address() > grant.end_address() {
return Err(Error::new(EINVAL));
}
funmap.remove(&grant);
let user = Region::new(user_base, grant.size());
if let Some(before) = grant.before(grant_requested) {
funmap.insert(before, user_base);
}
if let Some(after) = grant.after(grant_requested) {
let start = grant.rebase(user, after.start_address());
funmap.insert(after, start);
}
Some(grant.rebase(user, grant_address).get())
} else {
None
}
};
if let Some(user_address) = address_opt {
inner.call(SYS_FUNMAP2, user_address, size, 0)
} else {
Err(Error::new(EINVAL))
}

View File

@@ -126,6 +126,11 @@ pub fn format_call(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) -
"funmap({:#X})",
b
),
SYS_FUNMAP2 => format!(
"funmap2({:#X}, {:#X})",
b,
c,
),
SYS_FPATH => format!(
"fpath({}, {:#X}, {})",
b,

View File

@@ -3,7 +3,7 @@ use crate::memory::{allocate_frames_complex, deallocate_frames, Frame};
use crate::paging::{ActivePageTable, PhysicalAddress, VirtualAddress};
use crate::paging::entry::EntryFlags;
use crate::context;
use crate::context::memory::Grant;
use crate::context::memory::{Grant, Region};
use crate::syscall::error::{Error, EFAULT, EINVAL, ENOMEM, EPERM, ESRCH, Result};
use crate::syscall::flag::{PhysallocFlags, PartialAllocStrategy, PhysmapFlags, PHYSMAP_WRITE, PHYSMAP_WRITE_COMBINE, PHYSMAP_NO_CACHE};
@@ -93,20 +93,20 @@ pub fn inner_physmap(physical_address: usize, size: usize, flags: PhysmapFlags)
entry_flags |= EntryFlags::NO_CACHE;
}
let mut i = 0;
while i < grants.len() {
let start = grants[i].start_address().get();
// TODO: Make this faster than Sonic himself by using le superpowers of BTreeSet
for grant in grants.iter() {
let start = grant.start_address().get();
if to_address + full_size < start {
break;
}
let pages = (grants[i].size() + 4095) / 4096;
let pages = (grant.size() + 4095) / 4096;
let end = start + pages * 4096;
to_address = end;
i += 1;
}
grants.insert(i, Grant::physmap(
grants.insert(Grant::physmap(
PhysicalAddress::new(from_address),
VirtualAddress::new(to_address),
full_size,
@@ -131,14 +131,9 @@ pub fn inner_physunmap(virtual_address: usize) -> Result<usize> {
let mut grants = context.grants.lock();
for i in 0 .. grants.len() {
let start = grants[i].start_address().get();
let end = start + grants[i].size();
if virtual_address >= start && virtual_address < end {
grants.remove(i).unmap();
return Ok(0);
}
if let Some(region) = grants.contains(VirtualAddress::new(virtual_address)).map(Region::from) {
grants.take(&region).unwrap().unmap();
return Ok(0);
}
Err(Error::new(EFAULT))

View File

@@ -1,15 +1,19 @@
//! Filesystem syscalls
use core::sync::atomic::Ordering;
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::sync::atomic::Ordering;
use spin::RwLock;
use crate::context::file::{FileDescriptor, FileDescription};
use crate::context::memory::Region;
use crate::context;
use crate::memory::PAGE_SIZE;
use crate::paging::VirtualAddress;
use crate::scheme::{self, FileHandle};
use crate::syscall;
use crate::syscall::data::{Packet, Stat};
use crate::syscall::error::*;
use crate::syscall::flag::*;
use crate::context::file::{FileDescriptor, FileDescription};
use crate::syscall;
pub fn file_op(a: usize, fd: FileHandle, c: usize, d: usize) -> Result<usize> {
let (file, pid, uid, gid) = {
@@ -453,15 +457,10 @@ pub fn funmap(virtual_address: usize) -> Result<usize> {
let mut grants = context.grants.lock();
for i in 0 .. grants.len() {
let start = grants[i].start_address().get();
let end = start + grants[i].size();
if virtual_address >= start && virtual_address < end {
let mut grant = grants.remove(i);
desc_opt = grant.desc_opt.take();
grant.unmap();
break;
}
if let Some(region) = grants.contains(VirtualAddress::new(virtual_address)).map(Region::from) {
let mut grant = grants.take(&region).unwrap();
desc_opt = grant.desc_opt.take();
grant.unmap();
}
}
@@ -486,3 +485,68 @@ pub fn funmap(virtual_address: usize) -> Result<usize> {
}
}
}
pub fn funmap2(virtual_address: usize, length: usize) -> Result<usize> {
if virtual_address == 0 || length == 0 {
return Ok(0);
} else if virtual_address % PAGE_SIZE != 0 {
return Err(Error::new(EINVAL));
}
let mut notify_files = Vec::new();
let virtual_address = VirtualAddress::new(virtual_address);
let requested = Region::new(virtual_address, length);
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let mut grants = context.grants.lock();
let conflicting: Vec<Region> = grants.conflicts(requested).map(Region::from).collect();
for conflict in conflicting {
let grant = grants.take(&conflict).expect("conflicting region didn't exist");
let intersection = grant.intersect(requested);
let (before, mut grant, after) = grant.extract(intersection.round()).expect("conflicting region shared no common parts");
// Notify scheme that holds grant
if let Some(file_desc) = grant.desc_opt.take() {
notify_files.push((file_desc, intersection));
}
// Keep untouched regions
if let Some(before) = before {
grants.insert(before);
}
if let Some(after) = after {
grants.insert(after);
}
// Remove irrelevant region
grant.unmap();
}
}
for (desc, intersection) in notify_files {
let scheme_id = {
let description = desc.description.read();
description.scheme
};
let scheme = {
let schemes = scheme::schemes();
let scheme = schemes.get(scheme_id).ok_or(Error::new(EBADF))?;
scheme.clone()
};
let res = scheme.funmap2(intersection.start_address().get(), intersection.size());
let _ = desc.close();
res?;
}
Ok(0)
}

View File

@@ -67,6 +67,7 @@ pub fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, bp: u
SYS_FEXEC => fexec(fd, validate_slice(c as *const [usize; 2], d)?, validate_slice(e as *const [usize; 2], f)?),
SYS_FRENAME => frename(fd, validate_slice(c as *const u8, d)?),
SYS_FUNMAP => funmap(b),
SYS_FUNMAP2 => funmap2(b, c),
_ => file_op(a, fd, c, d)
}
}

View File

@@ -1,13 +1,15 @@
use alloc::sync::Arc;
use alloc::boxed::Box;
use alloc::collections::BTreeSet;
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::alloc::{GlobalAlloc, Layout};
use core::{intrinsics, mem};
use core::ops::DerefMut;
use core::{intrinsics, mem};
use spin::Mutex;
use crate::context::file::FileDescriptor;
use crate::context::{ContextId, WaitpidKey};
use crate::context::memory::{UserGrants, Region};
use crate::context;
#[cfg(not(feature="doc"))]
use crate::elf::{self, program_header};
@@ -280,12 +282,12 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
if flags.contains(CLONE_VM) {
grants = Arc::clone(&context.grants);
} else {
let mut grants_vec = Vec::new();
let mut grants_set = UserGrants::default();
for grant in context.grants.lock().iter() {
let start = VirtualAddress::new(grant.start_address().get() + crate::USER_TMP_GRANT_OFFSET - crate::USER_GRANT_OFFSET);
grants_vec.push(grant.secret_clone(start));
grants_set.insert(grant.secret_clone(start));
}
grants = Arc::new(Mutex::new(grants_vec));
grants = Arc::new(Mutex::new(grants_set));
}
if flags.contains(CLONE_VM) {
@@ -332,20 +334,25 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
// If not cloning virtual memory, use fmap to re-obtain every grant where possible
if !flags.contains(CLONE_VM) {
let mut i = 0;
while i < grants.lock().len() {
let mut grants = grants.lock();
let mut to_remove = BTreeSet::new();
// TODO: Use drain_filter if possible
for grant in grants.iter() {
let remove = false;
if let Some(grant) = grants.lock().get(i) {
if let Some(ref _desc) = grant.desc_opt {
println!("todo: clone grant {} using fmap: {:?}", i, grant);
}
if let Some(ref _desc) = grant.desc_opt {
println!("todo: clone grant using fmap: {:?}", grant);
}
if remove {
grants.lock().remove(i);
} else {
i += 1;
to_remove.insert(Region::from(grant));
}
}
for region in to_remove {
grants.remove(&region);
}
}
// If vfork, block the current process
@@ -510,9 +517,15 @@ pub fn clone(flags: CloneFlags, stack_base: usize) -> Result<ContextId> {
}
// Move grants
for grant in grants.lock().iter_mut() {
let start = VirtualAddress::new(grant.start_address().get() + crate::USER_GRANT_OFFSET - crate::USER_TMP_GRANT_OFFSET);
grant.move_to(start, &mut new_table, &mut temporary_page);
{
let mut grants = grants.lock();
let old_grants = mem::replace(&mut *grants, UserGrants::default());
for mut grant in old_grants.inner.into_iter() {
let start = VirtualAddress::new(grant.start_address().get() + crate::USER_GRANT_OFFSET - crate::USER_TMP_GRANT_OFFSET);
grant.move_to(start, &mut new_table, &mut temporary_page);
grants.insert(grant);
}
}
context.grants = grants;
}
@@ -626,7 +639,8 @@ fn empty(context: &mut context::Context, reaping: bool) {
let mut grants = context.grants.lock();
if Arc::strong_count(&context.grants) == 1 {
for grant in grants.drain(..) {
let grants = mem::replace(&mut *grants, UserGrants::default());
for grant in grants.inner.into_iter() {
if reaping {
println!("{}: {}: Grant should not exist: {:?}", context.id.into(), unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }, grant);

Submodule syscall updated: 6346fd671e...a0ea09ceb3