Add linked list allocator with automatic resizing

Fix memory leaks in exec
Remove warnings
This commit is contained in:
Jeremy Soller
2018-01-29 21:29:24 -07:00
parent 015b79430e
commit 761fe30bf3
12 changed files with 255 additions and 117 deletions

1
Cargo.lock generated
View File

@@ -125,6 +125,7 @@ dependencies = [
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.182 (registry+https://github.com/rust-lang/crates.io-index)",
"goblin 0.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
"linked_list_allocator 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"raw-cpuid 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.37",
"slab_allocator 0.3.0",

View File

@@ -11,10 +11,11 @@ crate-type = ["staticlib"]
[dependencies]
bitflags = "1"
clippy = { version = "*", optional = true }
slab_allocator = { path = "slab_allocator" }
spin = "0.4"
linked_list_allocator = "0.5"
raw-cpuid = "3.0"
redox_syscall = { path = "syscall" }
slab_allocator = { path = "slab_allocator" }
spin = "0.4"
[dependencies.goblin]
version = "0.0.10"
@@ -31,3 +32,4 @@ doc = []
live = []
multi_core = []
pti = []
slab = []

View File

@@ -0,0 +1,70 @@
use alloc::heap::{Alloc, AllocErr, Layout};
use linked_list_allocator::Heap;
use spin::Mutex;
use paging::ActivePageTable;
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
pub struct Allocator;
impl Allocator {
pub unsafe fn init(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
}
unsafe impl<'a> Alloc for &'a Allocator {
unsafe fn alloc(&mut self, mut layout: Layout) -> Result<*mut u8, AllocErr> {
loop {
let res = if let Some(ref mut heap) = *HEAP.lock() {
heap.allocate_first_fit(layout)
} else {
panic!("__rust_allocate: heap not initialized");
};
match res {
Err(AllocErr::Exhausted { request }) => {
layout = request;
let size = if let Some(ref heap) = *HEAP.lock() {
heap.size()
} else {
panic!("__rust_allocate: heap not initialized");
};
println!("Expand heap at {} MB by {} MB", size/1024/1024, ::KERNEL_HEAP_SIZE/1024/1024);
super::map_heap(&mut ActivePageTable::new(), ::KERNEL_HEAP_OFFSET + size, ::KERNEL_HEAP_SIZE);
if let Some(ref mut heap) = *HEAP.lock() {
heap.extend(::KERNEL_HEAP_SIZE);
} else {
panic!("__rust_allocate: heap not initialized");
}
},
other => return other,
}
}
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.deallocate(ptr, layout)
} else {
panic!("__rust_deallocate: heap not initialized");
}
}
fn oom(&mut self, error: AllocErr) -> ! {
panic!("Out of memory: {:?}", error);
}
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.usable_size(layout)
} else {
panic!("__rust_usable_size: heap not initialized");
}
}
}

36
src/allocator/mod.rs Normal file
View File

@@ -0,0 +1,36 @@
use paging::{ActivePageTable, Page, VirtualAddress};
use paging::entry::EntryFlags;
use paging::mapper::MapperFlushAll;
#[cfg(not(feature="slab"))]
pub use self::linked_list::Allocator;
#[cfg(feature="slab")]
pub use self::slab::Allocator;
mod linked_list;
mod slab;
unsafe fn map_heap(active_table: &mut ActivePageTable, offset: usize, size: usize) {
let mut flush_all = MapperFlushAll::new();
let heap_start_page = Page::containing_address(VirtualAddress::new(offset));
let heap_end_page = Page::containing_address(VirtualAddress::new(offset + size-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
let result = active_table.map(page, EntryFlags::PRESENT | EntryFlags::GLOBAL | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
flush_all.consume(result);
}
flush_all.flush(active_table);
}
pub unsafe fn init(active_table: &mut ActivePageTable) {
let offset = ::KERNEL_HEAP_OFFSET;
let size = ::KERNEL_HEAP_SIZE;
// Map heap pages
map_heap(active_table, offset, size);
// Initialize global heap
Allocator::init(offset, size);
}

View File

@@ -4,12 +4,14 @@ use slab_allocator::Heap;
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
pub unsafe fn init_heap(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
pub struct Allocator;
impl Allocator {
pub unsafe fn init(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
}
unsafe impl<'a> Alloc for &'a Allocator {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
if let Some(ref mut heap) = *HEAP.lock() {
@@ -38,4 +40,4 @@ unsafe impl<'a> Alloc for &'a Allocator {
panic!("__rust_usable_size: heap not initialized");
}
}
}
}

View File

@@ -31,15 +31,25 @@ pub struct ScratchRegisters {
impl ScratchRegisters {
pub fn dump(&self) {
println!("RAX: {:>016X}", self.rax);
println!("RCX: {:>016X}", self.rcx);
println!("RDX: {:>016X}", self.rdx);
println!("RDI: {:>016X}", self.rdi);
println!("RSI: {:>016X}", self.rsi);
println!("R8: {:>016X}", self.r8);
println!("R9: {:>016X}", self.r9);
println!("R10: {:>016X}", self.r10);
println!("R11: {:>016X}", self.r11);
let rax = self.rax;
let rcx = self.rcx;
let rdx = self.rdx;
let rdi = self.rdi;
let rsi = self.rsi;
let r8 = self.r8;
let r9 = self.r9;
let r10 = self.r10;
let r11 = self.r11;
println!("RAX: {:>016X}", rax);
println!("RCX: {:>016X}", rcx);
println!("RDX: {:>016X}", rdx);
println!("RDI: {:>016X}", rdi);
println!("RSI: {:>016X}", rsi);
println!("R8: {:>016X}", r8);
println!("R9: {:>016X}", r9);
println!("R10: {:>016X}", r10);
println!("R11: {:>016X}", r11);
}
}
@@ -86,12 +96,19 @@ pub struct PreservedRegisters {
impl PreservedRegisters {
pub fn dump(&self) {
println!("RBX: {:>016X}", self.rbx);
println!("RBP: {:>016X}", self.rbp);
println!("R12: {:>016X}", self.r12);
println!("R13: {:>016X}", self.r13);
println!("R14: {:>016X}", self.r14);
println!("R15: {:>016X}", self.r15);
let rbx = self.rbx;
let rbp = self.rbp;
let r12 = self.r12;
let r13 = self.r13;
let r14 = self.r14;
let r15 = self.r15;
println!("RBX: {:>016X}", rbx);
println!("RBP: {:>016X}", rbp);
println!("R12: {:>016X}", r12);
println!("R13: {:>016X}", r13);
println!("R14: {:>016X}", r14);
println!("R15: {:>016X}", r15);
}
}
@@ -145,9 +162,13 @@ pub struct IretRegisters {
impl IretRegisters {
pub fn dump(&self) {
println!("RFLAG: {:>016X}", self.rflags);
println!("CS: {:>016X}", self.cs);
println!("RIP: {:>016X}", self.rip);
let rflags = self.rflags;
let cs = self.cs;
let rip = self.rip;
println!("RFLAG: {:>016X}", rflags);
println!("CS: {:>016X}", cs);
println!("RIP: {:>016X}", rip);
}
}

View File

@@ -1,7 +1,11 @@
#[cfg(feature = "pti")]
use core::ptr;
#[cfg(feature = "pti")]
use memory::Frame;
#[cfg(feature = "pti")]
use paging::ActivePageTable;
#[cfg(feature = "pti")]
use paging::entry::EntryFlags;
#[cfg(feature = "pti")]

View File

@@ -6,6 +6,7 @@
use core::slice;
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use allocator;
use acpi;
use arch::x86_64::pti;
use device;
@@ -13,10 +14,7 @@ use gdt;
use idt;
use interrupt;
use memory;
use memory::slab as allocator;
use paging::{self, Page, VirtualAddress};
use paging::entry::EntryFlags;
use paging::mapper::MapperFlushAll;
use paging;
/// Test of zero values in BSS.
static BSS_TEST_ZERO: usize = 0;
@@ -99,22 +97,7 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
BSP_READY.store(false, Ordering::SeqCst);
// Setup kernel heap
{
let mut flush_all = MapperFlushAll::new();
// Map heap pages
let heap_start_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET));
let heap_end_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET + ::KERNEL_HEAP_SIZE-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
let result = active_table.map(page, EntryFlags::PRESENT | EntryFlags::GLOBAL | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
// Init the allocator
allocator::init_heap(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE);
}
allocator::init(&mut active_table);
// Initialize devices
device::init(&mut active_table);

View File

@@ -19,7 +19,7 @@
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK)/PML4_SIZE;
/// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 256 * 1024 * 1024; // 256 MB
pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB
/// Offset to kernel percpu variables
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;

View File

@@ -41,6 +41,7 @@ extern crate alloc;
#[macro_use]
extern crate bitflags;
extern crate goblin;
extern crate linked_list_allocator;
extern crate spin;
extern crate slab_allocator;
@@ -64,6 +65,9 @@ pub use arch::*;
/// Constants like memory locations
pub mod consts;
/// Heap allocators
pub mod allocator;
/// ACPI table parsing
mod acpi;
@@ -104,7 +108,7 @@ pub mod time;
pub mod tests;
#[global_allocator]
static ALLOCATOR: memory::slab::Allocator = memory::slab::Allocator;
static ALLOCATOR: allocator::Allocator = allocator::Allocator;
/// A unique number that identifies the current CPU - used for scheduling
#[thread_local]

View File

@@ -10,7 +10,6 @@ use spin::Mutex;
pub mod bump;
pub mod recycle;
pub mod slab;
/// The current memory map. It's size is maxed out to 512 entries, due to it being
/// from 0x500 to 0x5000 (800 is the absolute total)

View File

@@ -542,8 +542,14 @@ impl Drop for ExecFile {
}
}
fn exec_noreturn(elf: elf::Elf, canonical: Box<[u8]>, setuid: Option<u32>, setgid: Option<u32>, args: Box<[Box<[u8]>]>) -> ! {
let entry = elf.entry();
fn exec_noreturn(
canonical: Box<[u8]>,
setuid: Option<u32>,
setgid: Option<u32>,
data: Box<[u8]>,
args: Box<[Box<[u8]>]>
) -> ! {
let entry;
let mut sp = ::USER_STACK_OFFSET + ::USER_STACK_SIZE - 256;
{
@@ -567,69 +573,76 @@ fn exec_noreturn(elf: elf::Elf, canonical: Box<[u8]>, setuid: Option<u32>, setgi
// Map and copy new segments
let mut tls_option = None;
for segment in elf.segments() {
if segment.p_type == program_header::PT_LOAD {
let voff = segment.p_vaddr % 4096;
let vaddr = segment.p_vaddr - voff;
{
let elf = elf::Elf::from(&data).unwrap();
entry = elf.entry();
for segment in elf.segments() {
if segment.p_type == program_header::PT_LOAD {
let voff = segment.p_vaddr % 4096;
let vaddr = segment.p_vaddr - voff;
let mut memory = context::memory::Memory::new(
VirtualAddress::new(vaddr as usize),
segment.p_memsz as usize + voff as usize,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
true
);
let mut memory = context::memory::Memory::new(
VirtualAddress::new(vaddr as usize),
segment.p_memsz as usize + voff as usize,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE,
true
);
unsafe {
// Copy file data
intrinsics::copy((elf.data.as_ptr() as usize + segment.p_offset as usize) as *const u8,
segment.p_vaddr as *mut u8,
segment.p_filesz as usize);
unsafe {
// Copy file data
intrinsics::copy((elf.data.as_ptr() as usize + segment.p_offset as usize) as *const u8,
segment.p_vaddr as *mut u8,
segment.p_filesz as usize);
}
let mut flags = EntryFlags::NO_EXECUTE | EntryFlags::USER_ACCESSIBLE;
if segment.p_flags & program_header::PF_R == program_header::PF_R {
flags.insert(EntryFlags::PRESENT);
}
// W ^ X. If it is executable, do not allow it to be writable, even if requested
if segment.p_flags & program_header::PF_X == program_header::PF_X {
flags.remove(EntryFlags::NO_EXECUTE);
} else if segment.p_flags & program_header::PF_W == program_header::PF_W {
flags.insert(EntryFlags::WRITABLE);
}
memory.remap(flags);
context.image.push(memory.to_shared());
} else if segment.p_type == program_header::PT_TLS {
let memory = context::memory::Memory::new(
VirtualAddress::new(::USER_TCB_OFFSET),
4096,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
);
let aligned_size = if segment.p_align > 0 {
((segment.p_memsz + (segment.p_align - 1))/segment.p_align) * segment.p_align
} else {
segment.p_memsz
};
let rounded_size = ((aligned_size + 4095)/4096) * 4096;
let rounded_offset = rounded_size - aligned_size;
let tcb_offset = ::USER_TLS_OFFSET + rounded_size as usize;
unsafe { *(::USER_TCB_OFFSET as *mut usize) = tcb_offset; }
context.image.push(memory.to_shared());
tls_option = Some((
VirtualAddress::new(segment.p_vaddr as usize),
segment.p_filesz as usize,
rounded_size as usize,
rounded_offset as usize,
));
}
let mut flags = EntryFlags::NO_EXECUTE | EntryFlags::USER_ACCESSIBLE;
if segment.p_flags & program_header::PF_R == program_header::PF_R {
flags.insert(EntryFlags::PRESENT);
}
// W ^ X. If it is executable, do not allow it to be writable, even if requested
if segment.p_flags & program_header::PF_X == program_header::PF_X {
flags.remove(EntryFlags::NO_EXECUTE);
} else if segment.p_flags & program_header::PF_W == program_header::PF_W {
flags.insert(EntryFlags::WRITABLE);
}
memory.remap(flags);
context.image.push(memory.to_shared());
} else if segment.p_type == program_header::PT_TLS {
let memory = context::memory::Memory::new(
VirtualAddress::new(::USER_TCB_OFFSET),
4096,
EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE,
true
);
let aligned_size = if segment.p_align > 0 {
((segment.p_memsz + (segment.p_align - 1))/segment.p_align) * segment.p_align
} else {
segment.p_memsz
};
let rounded_size = ((aligned_size + 4095)/4096) * 4096;
let rounded_offset = rounded_size - aligned_size;
let tcb_offset = ::USER_TLS_OFFSET + rounded_size as usize;
unsafe { *(::USER_TCB_OFFSET as *mut usize) = tcb_offset; }
context.image.push(memory.to_shared());
tls_option = Some((
VirtualAddress::new(segment.p_vaddr as usize),
segment.p_filesz as usize,
rounded_size as usize,
rounded_offset as usize,
));
}
}
// Data no longer required, can deallocate
drop(data);
// Map heap
context.heap = Some(context::memory::Memory::new(
VirtualAddress::new(::USER_HEAP_OFFSET),
@@ -713,6 +726,9 @@ fn exec_noreturn(elf: elf::Elf, canonical: Box<[u8]>, setuid: Option<u32>, setgi
context.image.push(memory.to_shared());
}
// Args no longer required, can deallocate
drop(args);
context.actions = Arc::new(Mutex::new(vec![(
SigAction {
sa_handler: unsafe { mem::transmute(SIG_DFL) },
@@ -861,10 +877,6 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
match elf::Elf::from(&data) {
Ok(elf) => {
// Drop so that usage is not allowed after unmapping context
drop(path);
drop(arg_ptrs);
// We check the validity of all loadable sections here
for segment in elf.segments() {
if segment.p_type == program_header::PT_LOAD {
@@ -880,17 +892,21 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
}
}
}
// This is the point of no return, quite literaly. Any checks for validity need
// to be done before, and appropriate errors returned. Otherwise, we have nothing
// to return to.
exec_noreturn(elf, canonical.into_boxed_slice(), setuid, setgid, args.into_boxed_slice());
},
Err(err) => {
println!("exec: failed to execute {}: {}", unsafe { str::from_utf8_unchecked(path) }, err);
Err(Error::new(ENOEXEC))
return Err(Error::new(ENOEXEC));
}
}
// Drop so that usage is not allowed after unmapping context
drop(path);
drop(arg_ptrs);
// This is the point of no return, quite literaly. Any checks for validity need
// to be done before, and appropriate errors returned. Otherwise, we have nothing
// to return to.
exec_noreturn(canonical.into_boxed_slice(), setuid, setgid, data.into_boxed_slice(), args.into_boxed_slice());
}
pub fn exit(status: usize) -> ! {