Move the initfs scheme to userspace.

This commit is contained in:
4lDO2
2022-07-12 14:09:55 +02:00
parent 351d77ad9b
commit 1cdd462244
9 changed files with 76 additions and 402 deletions

9
Cargo.lock generated
View File

@@ -76,7 +76,6 @@ dependencies = [
"log",
"memoffset",
"raw-cpuid",
"redox-initfs",
"redox_syscall",
"rmm",
"rustc-cfg",
@@ -147,14 +146,6 @@ dependencies = [
"bitflags",
]
[[package]]
name = "redox-initfs"
version = "0.1.0"
source = "git+https://gitlab.redox-os.org/redox-os/redox-initfs.git#89b8fb8984cf96c418880b7dcd9ce3d6afc3f71c"
dependencies = [
"plain",
]
[[package]]
name = "redox_syscall"
version = "0.2.16"

View File

@@ -24,7 +24,6 @@ slab_allocator = { path = "slab_allocator", optional = true }
# FIXME: There is some undefined behavior probably in the kernel, which forces us to use spin 0.9.0 and not 0.9.2.
spin = "=0.9.0"
rmm = { path = "rmm", default-features = false }
redox-initfs = { git = "https://gitlab.redox-os.org/redox-os/redox-initfs.git", features = ["kernel"], default-features = false }
[dependencies.goblin]
version = "0.2.1"

View File

@@ -39,12 +39,12 @@ static BSP_READY: AtomicBool = AtomicBool::new(false);
#[repr(packed)]
pub struct KernelArgs {
kernel_base: u64,
kernel_size: u64,
stack_base: u64,
stack_size: u64,
env_base: u64,
env_size: u64,
kernel_base: usize,
kernel_size: usize,
stack_base: usize,
stack_size: usize,
env_base: usize,
env_size: usize,
/// The base 64-bit pointer to an array of saved RSDPs. It's up to the kernel (and possibly
/// userspace), to decide which RSDP to use. The buffer will be a linked list containing a
@@ -53,36 +53,26 @@ pub struct KernelArgs {
/// This field can be NULL, and if so, the system has not booted with UEFI or in some other way
/// retrieved the RSDPs. The kernel or a userspace driver will thus try searching the BIOS
/// memory instead. On UEFI systems, BIOS-like searching is not guaranteed to actually work though.
acpi_rsdps_base: u64,
acpi_rsdps_base: usize,
/// The size of the RSDPs region.
acpi_rsdps_size: u64,
acpi_rsdps_size: usize,
areas_base: u64,
areas_size: u64,
areas_base: usize,
areas_size: usize,
/// The physical base 64-bit pointer to the contiguous initfs.
initfs_base: u64,
initfs_size: u64,
/// The physical base 64-bit pointer to the contiguous bootstrap/initfs.
bootstrap_base: usize,
/// Size of contiguous bootstrap/initfs physical region, not necessarily page aligned.
bootstrap_size: usize,
/// Entry point the kernel will jump to.
bootstrap_entry: usize,
}
/// The entry to Rust, all things must be initialized
#[no_mangle]
pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
let env = {
let args = &*args_ptr;
let kernel_base = args.kernel_base as usize;
let kernel_size = args.kernel_size as usize;
let stack_base = args.stack_base as usize;
let stack_size = args.stack_size as usize;
let env_base = args.env_base as usize;
let env_size = args.env_size as usize;
let acpi_rsdps_base = args.acpi_rsdps_base;
let acpi_rsdps_size = args.acpi_rsdps_size;
let areas_base = args.areas_base as usize;
let areas_size = args.areas_size as usize;
let initfs_base = args.initfs_base as usize;
let initfs_size = args.initfs_size as usize;
let bootstrap = {
let args = args_ptr.read();
// BSS should already be zero
{
@@ -90,12 +80,11 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
assert_eq!(DATA_TEST_NONZERO, 0xFFFF_FFFF_FFFF_FFFF);
}
KERNEL_BASE.store(kernel_base, Ordering::SeqCst);
KERNEL_SIZE.store(kernel_size, Ordering::SeqCst);
KERNEL_BASE.store(args.kernel_base, Ordering::SeqCst);
KERNEL_SIZE.store(args.kernel_size, Ordering::SeqCst);
// Convert env to slice
let env = slice::from_raw_parts((env_base + crate::PHYS_OFFSET) as *const u8, env_size);
let initfs = slice::from_raw_parts((initfs_base + crate::PHYS_OFFSET) as *const u8, initfs_size);
let env = slice::from_raw_parts((args.env_base + crate::PHYS_OFFSET) as *const u8, args.env_size);
// Set up graphical debug
#[cfg(feature = "graphical_debug")]
@@ -117,12 +106,13 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
});
info!("Redox OS starting...");
info!("Kernel: {:X}:{:X}", kernel_base, kernel_base + kernel_size);
info!("Stack: {:X}:{:X}", stack_base, stack_base + stack_size);
info!("Env: {:X}:{:X}", env_base, env_base + env_size);
info!("RSDPs: {:X}:{:X}", acpi_rsdps_base, acpi_rsdps_base + acpi_rsdps_size);
info!("Areas: {:X}:{:X}", areas_base, areas_base + areas_size);
info!("Initfs: {:X}:{:X}", initfs_base, initfs_base + initfs_size);
info!("Kernel: {:X}:{:X}", args.kernel_base, args.kernel_base + args.kernel_size);
info!("Stack: {:X}:{:X}", args.stack_base, args.stack_base + args.stack_size);
info!("Env: {:X}:{:X}", args.env_base, args.env_base + args.env_size);
info!("RSDPs: {:X}:{:X}", args.acpi_rsdps_base, args.acpi_rsdps_base + args.acpi_rsdps_size);
info!("Areas: {:X}:{:X}", args.areas_base, args.areas_base + args.areas_size);
info!("Bootstrap: {:X}:{:X}", args.bootstrap_base, args.bootstrap_base + args.bootstrap_size);
info!("Bootstrap entry point: {:X}", args.bootstrap_entry);
// Set up GDT before paging
gdt::init();
@@ -132,19 +122,19 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
// Initialize RMM
crate::arch::rmm::init(
kernel_base, kernel_size,
stack_base, stack_size,
env_base, env_size,
acpi_rsdps_base as usize, acpi_rsdps_size as usize,
areas_base, areas_size,
initfs_base, initfs_size,
args.kernel_base, args.kernel_size,
args.stack_base, args.stack_size,
args.env_base, args.env_size,
args.acpi_rsdps_base, args.acpi_rsdps_size,
args.areas_base, args.areas_size,
args.bootstrap_base, args.bootstrap_size,
);
// Initialize paging
let (mut active_table, tcb_offset) = paging::init(0);
// Set up GDT after paging with TLS
gdt::init_paging(0, tcb_offset, stack_base + stack_size);
gdt::init_paging(0, tcb_offset, args.stack_base + args.stack_size);
// Set up IDT
idt::init_paging_bsp();
@@ -185,8 +175,8 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
// Read ACPI tables, starts APs
#[cfg(feature = "acpi")]
{
acpi::init(&mut active_table, if acpi_rsdps_base != 0 && acpi_rsdps_size > 0 {
Some((acpi_rsdps_base + crate::PHYS_OFFSET as u64, acpi_rsdps_size))
acpi::init(&mut active_table, if args.acpi_rsdps_base != 0 && args.acpi_rsdps_size > 0 {
Some(((args.acpi_rsdps_base + crate::PHYS_OFFSET) as u64, args.acpi_rsdps_size as u64))
} else {
None
});
@@ -196,18 +186,21 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
// Initialize all of the non-core devices not otherwise needed to complete initialization
device::init_noncore();
crate::scheme::initfs::init(initfs);
// Stop graphical debug
#[cfg(feature = "graphical_debug")]
graphical_debug::fini();
BSP_READY.store(true, Ordering::SeqCst);
env
crate::Bootstrap {
base: crate::memory::Frame::containing_address(crate::paging::PhysicalAddress::new(args.bootstrap_base)),
page_count: args.bootstrap_size / crate::memory::PAGE_SIZE,
entry: args.bootstrap_entry,
env,
}
};
crate::kmain(CPU_COUNT.load(Ordering::SeqCst), env);
crate::kmain(CPU_COUNT.load(Ordering::SeqCst), bootstrap);
}
#[repr(packed)]

View File

@@ -74,7 +74,6 @@ extern crate spin;
#[cfg(feature = "slab")]
extern crate slab_allocator;
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};
use crate::scheme::{FileHandle, SchemeNamespace};
@@ -169,48 +168,36 @@ pub fn cpu_count() -> usize {
CPU_COUNT.load(Ordering::Relaxed)
}
static mut INIT_ENV: &[u8] = &[];
/// Initialize userspace by running the initfs:bin/init process
/// This function will also set the CWD to initfs:bin and open debug: as stdio
pub extern fn userspace_init() {
let path = "initfs:/bin/bootstrap";
if let Err(err) = syscall::chdir("initfs:") {
info!("Failed to enter initfs ({}).", err);
panic!("Unexpected error while trying to enter initfs:.");
}
let fd = syscall::open(path, syscall::flag::O_RDONLY).expect("failed to open init");
let mut total_bytes_read = 0;
let mut data = Vec::new();
loop {
data.resize(total_bytes_read + 4096, 0);
let bytes_read = syscall::file_op_mut_slice(syscall::number::SYS_READ, fd, &mut data[total_bytes_read..]).expect("failed to read init");
if bytes_read == 0 { break }
total_bytes_read += bytes_read;
}
data.truncate(total_bytes_read);
let _ = syscall::close(fd);
crate::syscall::process::usermode_bootstrap(data.into_boxed_slice());
pub fn init_env() -> &'static [u8] {
crate::BOOTSTRAP.get().expect("BOOTSTRAP was not set").env
}
pub extern "C" fn userspace_init() {
let bootstrap = crate::BOOTSTRAP.get().expect("BOOTSTRAP was not set");
unsafe { crate::syscall::process::usermode_bootstrap(bootstrap) }
}
pub struct Bootstrap {
pub base: crate::memory::Frame,
pub page_count: usize,
pub entry: usize,
pub env: &'static [u8],
}
static BOOTSTRAP: spin::Once<Bootstrap> = spin::Once::new();
/// This is the kernel entry point for the primary CPU. The arch crate is responsible for calling this
pub fn kmain(cpus: usize, env: &'static [u8]) -> ! {
pub fn kmain(cpus: usize, bootstrap: Bootstrap) -> ! {
CPU_ID.store(0, Ordering::SeqCst);
CPU_COUNT.store(cpus, Ordering::SeqCst);
unsafe { INIT_ENV = env };
//Initialize the first context, stored in kernel/src/context/mod.rs
context::init();
let pid = syscall::getpid();
info!("BSP: {:?} {}", pid, cpus);
info!("Env: {:?}", ::core::str::from_utf8(unsafe { INIT_ENV }));
info!("Env: {:?}", ::core::str::from_utf8(bootstrap.env));
BOOTSTRAP.call_once(|| bootstrap);
match context::contexts_mut().spawn(userspace_init) {
Ok(context_lock) => {

View File

@@ -1,276 +0,0 @@
use core::convert::TryFrom;
use core::str;
use core::sync::atomic::{AtomicUsize, Ordering};
use alloc::collections::BTreeMap;
use alloc::string::String;
use alloc::vec::Vec;
use spin::{Once, RwLock};
use redox_initfs::{InitFs, InodeStruct, Inode, InodeDir, InodeKind, types::Timespec};
use crate::syscall::data::Stat;
use crate::syscall::error::*;
use crate::syscall::flag::{MODE_DIR, MODE_FILE};
use crate::syscall::scheme::{calc_seek_offset_usize, Scheme};
struct Handle {
inode: Inode,
seek: usize,
// TODO: Any better way to implement fpath? Or maybe work around it, e.g. by giving paths such
// as `initfs:__inodes__/<inode>`?
filename: String,
}
static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
static HANDLES: RwLock<BTreeMap<usize, Handle>> = RwLock::new(BTreeMap::new());
static FS: Once<InitFs<'static>> = Once::new();
fn fs() -> Result<InitFs<'static>> {
FS.get().copied().ok_or(Error::new(ENODEV))
}
fn get_inode(inode: Inode) -> Result<InodeStruct<'static>> {
fs()?.get_inode(inode).ok_or_else(|| Error::new(EIO))
}
pub fn init(bytes: &'static [u8]) {
let mut called = false;
FS.call_once(|| {
called = true;
InitFs::new(bytes)
.expect("failed to parse initfs header")
});
assert!(called, "called initfs::init more than once");
}
fn next_id() -> usize {
let old = NEXT_ID.fetch_add(1, Ordering::Relaxed);
assert_ne!(old, usize::MAX, "usize overflow in initfs scheme");
old
}
pub struct InitFsScheme;
struct Iter {
dir: InodeDir<'static>,
idx: u32,
}
impl Iterator for Iter {
type Item = Result<redox_initfs::Entry<'static>>;
fn next(&mut self) -> Option<Self::Item> {
let entry = self.dir.get_entry(self.idx).map_err(|_| Error::new(EIO));
self.idx += 1;
entry.transpose()
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self.dir.entry_count().ok() {
Some(size) => {
let size = usize::try_from(size).expect("expected u32 to be convertible into usize");
(size, Some(size))
}
None => (0, None),
}
}
}
fn entries_iter(dir: InodeDir<'static>) -> impl IntoIterator<Item = Result<redox_initfs::Entry<'static>>> + 'static {
let mut index = 0_u32;
core::iter::from_fn(move || {
let idx = index;
index += 1;
dir.get_entry(idx).map_err(|_| Error::new(EIO)).transpose()
})
}
fn inode_len(inode: InodeStruct<'static>) -> Result<usize> {
Ok(match inode.kind() {
InodeKind::File(file) => file.data().map_err(|_| Error::new(EIO))?.len(),
InodeKind::Dir(dir) => (Iter { dir, idx: 0 })
.fold(0, |len, entry| len + entry.and_then(|entry| entry.name().map_err(|_| Error::new(EIO))).map_or(0, |name| name.len() + 1)),
InodeKind::Unknown => return Err(Error::new(EIO)),
})
}
impl Scheme for InitFsScheme {
fn open(&self, path: &str, _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
let mut components = path
// trim leading and trailing slash
.trim_matches('/')
// divide into components
.split('/')
// filter out double slashes (e.g. /usr//bin/...)
.filter(|c| !c.is_empty());
let mut current_inode = InitFs::ROOT_INODE;
while let Some(component) = components.next() {
match component {
"." => continue,
".." => {
let _ = components.next_back();
continue
}
_ => (),
}
let current_inode_struct = get_inode(current_inode)?;
let dir = match current_inode_struct.kind() {
InodeKind::Dir(dir) => dir,
// If we still have more components in the path, and the file tree for that
// particular branch is not all directories except the last, then that file cannot
// exist.
InodeKind::File(_) | InodeKind::Unknown => return Err(Error::new(ENOENT)),
};
let mut entries = Iter {
dir,
idx: 0,
};
current_inode = loop {
let entry_res = match entries.next() {
Some(e) => e,
None => return Err(Error::new(ENOENT)),
};
let entry = entry_res?;
let name = entry.name().map_err(|_| Error::new(EIO))?;
if name == component.as_bytes() {
break entry.inode();
}
};
}
let id = next_id();
let old = HANDLES.write().insert(id, Handle {
inode: current_inode,
seek: 0_usize,
filename: path.into(),
});
assert!(old.is_none());
Ok(id)
}
fn read(&self, id: usize, buffer: &mut [u8]) -> Result<usize> {
let mut handles = HANDLES.write();
let handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
match get_inode(handle.inode)?.kind() {
InodeKind::Dir(dir) => {
let mut bytes_read = 0;
let mut bytes_skipped = 0;
for entry_res in (Iter { dir, idx: 0 }) {
let entry = entry_res?;
let name = entry.name().map_err(|_| Error::new(EIO))?;
let entry_len = name.len() + 1;
let to_skip = core::cmp::min(handle.seek - bytes_skipped, entry_len);
let max_to_read = core::cmp::min(entry_len - to_skip, buffer.len());
let to_copy = entry_len.saturating_sub(to_skip).saturating_sub(1);
buffer[bytes_read..bytes_read + to_copy].copy_from_slice(&name[..to_copy]);
if to_copy.saturating_sub(to_skip) == 1 {
buffer[bytes_read + to_copy] = b'\n';
bytes_read += 1;
}
bytes_read += to_copy;
bytes_skipped += to_skip;
}
handle.seek = handle.seek.checked_add(bytes_read).ok_or(Error::new(EOVERFLOW))?;
Ok(bytes_read)
}
InodeKind::File(file) => {
let data = file.data().map_err(|_| Error::new(EIO))?;
let src_buf = &data[core::cmp::min(handle.seek, data.len())..];
let to_copy = core::cmp::min(src_buf.len(), buffer.len());
buffer[..to_copy].copy_from_slice(&src_buf[..to_copy]);
handle.seek = handle.seek.checked_add(to_copy).ok_or(Error::new(EOVERFLOW))?;
Ok(to_copy)
}
InodeKind::Unknown => return Err(Error::new(EIO)),
}
}
fn seek(&self, id: usize, pos: isize, whence: usize) -> Result<isize> {
let mut handles = HANDLES.write();
let handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let new_offset = calc_seek_offset_usize(handle.seek, pos, whence, inode_len(get_inode(handle.inode)?)?)?;
handle.seek = new_offset as usize;
Ok(new_offset)
}
fn fcntl(&self, id: usize, _cmd: usize, _arg: usize) -> Result<usize> {
let handles = HANDLES.read();
let _handle = handles.get(&id).ok_or(Error::new(EBADF))?;
Ok(0)
}
fn fpath(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let handles = HANDLES.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
// TODO: Copy scheme part in kernel
let scheme_path = b"initfs:";
let scheme_bytes = core::cmp::min(scheme_path.len(), buf.len());
buf[..scheme_bytes].copy_from_slice(&scheme_path[..scheme_bytes]);
let source = handle.filename.as_bytes();
let path_bytes = core::cmp::min(buf.len() - scheme_bytes, source.len());
buf[scheme_bytes..scheme_bytes + path_bytes].copy_from_slice(&source[..path_bytes]);
Ok(scheme_bytes + path_bytes)
}
fn fstat(&self, id: usize, stat: &mut Stat) -> Result<usize> {
let handles = HANDLES.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
let Timespec { sec, nsec } = fs()?.image_creation_time();
let inode = get_inode(handle.inode)?;
stat.st_mode = inode.mode() | match inode.kind() { InodeKind::Dir(_) => MODE_DIR, InodeKind::File(_) => MODE_FILE, _ => 0 };
stat.st_uid = inode.uid();
stat.st_gid = inode.gid();
stat.st_size = u64::try_from(inode_len(inode)?).unwrap_or(u64::MAX);
stat.st_ctime = sec.get();
stat.st_ctime_nsec = nsec.get();
stat.st_mtime = sec.get();
stat.st_mtime_nsec = nsec.get();
Ok(0)
}
fn fsync(&self, id: usize) -> Result<usize> {
let handles = HANDLES.read();
let _handle = handles.get(&id).ok_or(Error::new(EBADF))?;
Ok(0)
}
fn close(&self, id: usize) -> Result<usize> {
let _ = HANDLES.write().remove(&id).ok_or(Error::new(EBADF))?;
Ok(0)
}
}
impl crate::scheme::KernelScheme for InitFsScheme {}

View File

@@ -37,7 +37,7 @@ impl DiskScheme {
let mut phys = 0;
let mut size = 0;
for line in str::from_utf8(unsafe { crate::INIT_ENV }).unwrap_or("").lines() {
for line in str::from_utf8(crate::init_env()).unwrap_or("").lines() {
let mut parts = line.splitn(2, '=');
let name = parts.next().unwrap_or("");
let value = parts.next().unwrap_or("");

View File

@@ -25,7 +25,6 @@ use self::acpi::AcpiScheme;
use self::debug::DebugScheme;
use self::event::EventScheme;
use self::initfs::InitFsScheme;
use self::irq::IrqScheme;
use self::itimer::ITimerScheme;
use self::memory::MemoryScheme;
@@ -46,9 +45,6 @@ pub mod debug;
/// `event:` - allows reading of `Event`s which are registered using `fevent`
pub mod event;
/// `initfs:` - a readonly filesystem used for initializing the system
pub mod initfs;
/// `irq:` - allows userspace handling of IRQs
pub mod irq;
@@ -166,7 +162,6 @@ impl SchemeList {
self.insert(ns, "kernel/acpi", |scheme_id| Arc::new(AcpiScheme::new(scheme_id))).unwrap();
}
self.insert(ns, "debug", |scheme_id| Arc::new(DebugScheme::new(scheme_id))).unwrap();
self.insert(ns, "initfs", |_| Arc::new(InitFsScheme)).unwrap();
self.insert(ns, "irq", |scheme_id| Arc::new(IrqScheme::new(scheme_id))).unwrap();
self.insert(ns, "proc", |scheme_id| Arc::new(ProcScheme::new(scheme_id))).unwrap();
self.insert(ns, "thisproc", |_| Arc::new(ProcScheme::restricted())).unwrap();

View File

@@ -52,7 +52,7 @@ impl SysScheme {
files.insert("scheme_num", Box::new(scheme_num::resource));
files.insert("syscall", Box::new(syscall::resource));
files.insert("uname", Box::new(uname::resource));
files.insert("env", Box::new(|| Ok(Vec::from(unsafe { crate::INIT_ENV }))));
files.insert("env", Box::new(|| Ok(Vec::from(crate::init_env()))));
#[cfg(target_arch = "x86_64")]
files.insert("spurious_irq", Box::new(irq::spurious_irq_resource));

View File

@@ -9,6 +9,7 @@ use spin::{RwLock, RwLockWriteGuard};
use crate::context::{Context, ContextId, memory, WaitpidKey};
use crate::Bootstrap;
use crate::context;
use crate::interrupt;
use crate::paging::mapper::{Flusher, InactiveFlusher, PageFlushAll};
@@ -624,28 +625,17 @@ pub fn waitpid(pid: ContextId, status_ptr: usize, flags: WaitFlags) -> Result<Co
}
}
pub fn usermode_bootstrap(mut data: Box<[u8]>) -> ! {
assert!(!data.is_empty());
const LOAD_BASE: usize = 0;
pub unsafe fn usermode_bootstrap(bootstrap: &Bootstrap) -> ! {
assert_ne!(bootstrap.page_count, 0);
{
let mut active_table = unsafe { ActivePageTable::new(TableKind::User) };
let grant = context::memory::Grant::physmap(
bootstrap.base.start_address(),
VirtualAddress::new(0),
bootstrap.page_count * PAGE_SIZE,
PageFlags::new().user(true).write(true).execute(true),
);
let grant = context::memory::Grant::zeroed(Page::containing_address(VirtualAddress::new(LOAD_BASE)), (data.len()+PAGE_SIZE-1)/PAGE_SIZE, PageFlags::new().user(true).write(true).execute(true), &mut active_table, PageFlushAll::new()).expect("failed to allocate memory for bootstrap");
for (index, page) in grant.pages().enumerate() {
let len = if data.len() - index * PAGE_SIZE < PAGE_SIZE { data.len() % PAGE_SIZE } else { PAGE_SIZE };
let physaddr = active_table.translate_page(page)
.expect("expected mapped init memory to have a corresponding frame")
.start_address();
unsafe {
(RmmA::phys_to_virt(physaddr).data() as *mut u8).copy_from_nonoverlapping(data.as_ptr().add(index * PAGE_SIZE), len);
}
}
context::contexts().current()
.expect("expected a context to exist when executing init")
.read().addr_space()
@@ -653,12 +643,7 @@ pub fn usermode_bootstrap(mut data: Box<[u8]>) -> ! {
.write().grants.insert(grant);
}
drop(data);
#[cfg(target_arch = "x86_64")]
unsafe {
let start = ((LOAD_BASE + 0x18) as *mut usize).read();
// Start with the (probably) ELF executable loaded, without any stack.
usermode(start, 0, 0, 0);
}
// Start in a minimal environment without any stack.
usermode(bootstrap.entry, 0, 0, 0);
}