Merge pull request #76 from weclaw1/master

Use slab allocator for kernel heap
This commit is contained in:
Jeremy Soller
2018-01-28 14:16:23 -07:00
committed by GitHub
6 changed files with 22 additions and 27 deletions

View File

@@ -9,9 +9,9 @@ path = "src/lib.rs"
crate-type = ["staticlib"]
[dependencies]
alloc_kernel = { path = "alloc_kernel" }
bitflags = "1"
clippy = { version = "*", optional = true }
slab_allocator = "0.3.0"
spin = "0.4"
raw-cpuid = "3.0"
redox_syscall = { path = "syscall" }

View File

@@ -1,8 +0,0 @@
[package]
authors = ["Philipp Oppermann <dev@phil-opp.com>"]
name = "alloc_kernel"
version = "0.1.0"
[dependencies]
linked_list_allocator = { git = "https://github.com/redox-os/linked-list-allocator.git" }
spin = "*"

View File

@@ -7,13 +7,13 @@ use core::slice;
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use acpi;
use allocator;
use arch::x86_64::pti;
use device;
use gdt;
use idt;
use interrupt;
use memory;
use memory::slab as allocator;
use paging::{self, Page, VirtualAddress};
use paging::entry::EntryFlags;
use paging::mapper::MapperFlushAll;
@@ -113,7 +113,7 @@ pub unsafe extern fn kstart(args_ptr: *const KernelArgs) -> ! {
flush_all.flush(&mut active_table);
// Init the allocator
allocator::init(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE);
allocator::init_heap(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE);
}
// Initialize devices

View File

@@ -32,7 +32,6 @@
#![feature(const_size_of)]
#![no_std]
extern crate alloc_kernel as allocator;
pub extern crate x86;
#[macro_use]
@@ -42,6 +41,7 @@ extern crate alloc;
extern crate bitflags;
extern crate goblin;
extern crate spin;
extern crate slab_allocator;
use alloc::arc::Arc;
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
@@ -103,7 +103,7 @@ pub mod time;
pub mod tests;
#[global_allocator]
static ALLOCATOR: allocator::Allocator = allocator::Allocator;
static ALLOCATOR: memory::slab::Allocator = memory::slab::Allocator;
/// A unique number that identifies the current CPU - used for scheduling
#[thread_local]

View File

@@ -10,6 +10,7 @@ use spin::Mutex;
pub mod bump;
pub mod recycle;
pub mod slab;
/// The current memory map. It's size is maxed out to 512 entries, due to it being
/// from 0x500 to 0x5000 (800 is the absolute total)

View File

@@ -1,20 +1,10 @@
#![deny(warnings)]
#![feature(alloc)]
#![feature(allocator_api)]
#![feature(const_fn)]
#![no_std]
extern crate alloc;
extern crate spin;
extern crate linked_list_allocator;
use alloc::heap::{Alloc, AllocErr, Layout};
use spin::Mutex;
use linked_list_allocator::Heap;
use slab_allocator::Heap;
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
pub unsafe fn init(offset: usize, size: usize) {
pub unsafe fn init_heap(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
@@ -23,7 +13,7 @@ pub struct Allocator;
unsafe impl<'a> Alloc for &'a Allocator {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
if let Some(ref mut heap) = *HEAP.lock() {
heap.allocate_first_fit(layout)
heap.allocate(layout)
} else {
panic!("__rust_allocate: heap not initialized");
}
@@ -36,4 +26,16 @@ unsafe impl<'a> Alloc for &'a Allocator {
panic!("__rust_deallocate: heap not initialized");
}
}
}
fn oom(&mut self, error: AllocErr) -> ! {
panic!("Out of memory: {:?}", error);
}
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.usable_size(layout)
} else {
panic!("__rust_usable_size: heap not initialized");
}
}
}