diff --git a/alloc_kernel/src/lib.rs b/alloc_kernel/src/lib.rs index a7dc607..9caeb28 100644 --- a/alloc_kernel/src/lib.rs +++ b/alloc_kernel/src/lib.rs @@ -1,72 +1,39 @@ -#![feature(allocator)] +#![deny(warnings)] +#![feature(alloc)] +#![feature(allocator_api)] #![feature(const_fn)] - -#![allocator] #![no_std] -use core::ptr; -use spin::Mutex; -use linked_list_allocator::Heap; - +extern crate alloc; extern crate spin; extern crate linked_list_allocator; +use alloc::heap::{Alloc, AllocErr, Layout}; +use spin::Mutex; +use linked_list_allocator::Heap; + static HEAP: Mutex> = Mutex::new(None); pub unsafe fn init(offset: usize, size: usize) { *HEAP.lock() = Some(Heap::new(offset, size)); } -#[no_mangle] -pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 { - if let Some(ref mut heap) = *HEAP.lock() { - heap.allocate_first_fit(size, align).expect("out of memory") - } else { - panic!("__rust_allocate: heap not initialized"); +pub struct Allocator; + +unsafe impl<'a> Alloc for &'a Allocator { + unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { + if let Some(ref mut heap) = *HEAP.lock() { + heap.allocate_first_fit(layout) + } else { + panic!("__rust_allocate: heap not initialized"); + } + } + + unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { + if let Some(ref mut heap) = *HEAP.lock() { + heap.deallocate(ptr, layout) + } else { + panic!("__rust_deallocate: heap not initialized"); + } } } - -#[no_mangle] -pub extern fn __rust_allocate_zeroed(size: usize, align: usize) -> *mut u8 { - let ptr = __rust_allocate(size, align); - unsafe { - ptr::write_bytes(ptr, 0, size); - } - ptr -} - -#[no_mangle] -pub extern fn __rust_deallocate(ptr: *mut u8, size: usize, align: usize) { - if let Some(ref mut heap) = *HEAP.lock() { - unsafe { heap.deallocate(ptr, size, align) }; - } else { - panic!("__rust_deallocate: heap not initialized"); - } -} - -#[no_mangle] -pub extern fn __rust_usable_size(size: usize, _align: usize) -> usize { - size -} - -#[no_mangle] -pub extern fn __rust_reallocate_inplace(_ptr: *mut u8, size: usize, - _new_size: usize, _align: usize) -> usize -{ - size -} - -#[no_mangle] -pub extern fn __rust_reallocate(ptr: *mut u8, size: usize, new_size: usize, - align: usize) -> *mut u8 { - use core::{ptr, cmp}; - - // from: https://github.com/rust-lang/rust/blob/ - // c66d2380a810c9a2b3dbb4f93a830b101ee49cc2/ - // src/liballoc_system/lib.rs#L98-L101 - - let new_ptr = __rust_allocate(new_size, align); - unsafe { ptr::copy(ptr, new_ptr, cmp::min(size, new_size)) }; - __rust_deallocate(ptr, size, align); - new_ptr -} diff --git a/src/context/list.rs b/src/context/list.rs index 1556e10..aad872e 100644 --- a/src/context/list.rs +++ b/src/context/list.rs @@ -1,5 +1,7 @@ +use alloc::allocator::{Alloc, Layout}; use alloc::arc::Arc; use alloc::boxed::Box; +use alloc::heap::Heap; use collections::BTreeMap; use core::mem; use core::sync::atomic::Ordering; @@ -65,7 +67,7 @@ impl ContextList { let context_lock = self.new_context()?; { let mut context = context_lock.write(); - let mut fx = unsafe { Box::from_raw(::alloc::heap::allocate(512, 16) as *mut [u8; 512]) }; + let mut fx = unsafe { Box::from_raw(Heap.alloc(Layout::from_size_align_unchecked(512, 16)).unwrap() as *mut [u8; 512]) }; for b in fx.iter_mut() { *b = 0; } diff --git a/src/context/mod.rs b/src/context/mod.rs index 20ceedd..f0cd312 100644 --- a/src/context/mod.rs +++ b/src/context/mod.rs @@ -1,5 +1,7 @@ //! Context management +use alloc::allocator::{Alloc, Layout}; use alloc::boxed::Box; +use alloc::heap::Heap; use core::sync::atomic::Ordering; use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -47,7 +49,7 @@ pub fn init() { let mut contexts = contexts_mut(); let context_lock = contexts.new_context().expect("could not initialize first context"); let mut context = context_lock.write(); - let mut fx = unsafe { Box::from_raw(::alloc::heap::allocate(512, 16) as *mut [u8; 512]) }; + let mut fx = unsafe { Box::from_raw(Heap.alloc(Layout::from_size_align_unchecked(512, 16)).unwrap() as *mut [u8; 512]) }; for b in fx.iter_mut() { *b = 0; } diff --git a/src/lib.rs b/src/lib.rs index 223a633..533b9e4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,13 +5,14 @@ //#![deny(warnings)] #![feature(alloc)] +#![feature(allocator_api)] #![feature(asm)] #![feature(collections)] #![feature(concat_idents)] #![feature(const_fn)] #![feature(core_intrinsics)] #![feature(drop_types_in_const)] -#![feature(heap_api)] +#![feature(global_allocator)] #![feature(integer_atomics)] #![feature(lang_items)] #![feature(naked_functions)] @@ -103,6 +104,9 @@ pub mod time; #[cfg(test)] pub mod tests; +#[global_allocator] +static ALLOCATOR: allocator::Allocator = allocator::Allocator; + #[cfg(feature = "multi_core")] static MULTI_CORE_IS_NOT_SUPPORTED_AT_THE_MOMENT: u8 = (); @@ -183,7 +187,7 @@ pub extern fn kmain_ap(id: usize) { interrupt::halt(); } } - + if cfg!(feature = "multi_core"){ CPU_ID.store(id, Ordering::SeqCst); diff --git a/src/syscall/process.rs b/src/syscall/process.rs index bd30696..2b5e426 100644 --- a/src/syscall/process.rs +++ b/src/syscall/process.rs @@ -1,6 +1,8 @@ ///! Process syscalls +use alloc::allocator::{Alloc, Layout}; use alloc::arc::Arc; use alloc::boxed::Box; +use alloc::heap::Heap; use collections::{BTreeMap, Vec}; use core::{intrinsics, mem, str}; use core::ops::DerefMut; @@ -106,7 +108,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { arch = context.arch.clone(); if let Some(ref fx) = context.kfx { - let mut new_fx = unsafe { Box::from_raw(::alloc::heap::allocate(512, 16) as *mut [u8; 512]) }; + let mut new_fx = unsafe { Box::from_raw(Heap.alloc(Layout::from_size_align_unchecked(512, 16)).unwrap() as *mut [u8; 512]) }; for (new_b, b) in new_fx.iter_mut().zip(fx.iter()) { *new_b = *b; }