post-11 heap allocator designs

This commit is contained in:
Timothy Warren 2020-01-21 16:49:07 -05:00
parent c295ae2b15
commit 22818dca6c
7 changed files with 382 additions and 32 deletions

View File

@ -1,17 +1,25 @@
use alloc::alloc::{GlobalAlloc, Layout};
use core::ptr::null_mut;
use x86_64:: {
structures::paging:: {
FrameAllocator,
mapper::MapToError,
Mapper,
Page,
PageTableFlags,
Size4KiB,
use x86_64::{
structures::paging::{
mapper::MapToError, FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB,
},
VirtAddr,
};
pub mod bump;
pub mod fixed_size_block;
pub mod linked_list;
// use bump::BumpAllocator;
use fixed_size_block::FixedSizeBlockAllocator;
// use linked_list::LinkedListAllocator;
#[global_allocator]
static ALLOCATOR: Locked<FixedSizeBlockAllocator> =
Locked::new(FixedSizeBlockAllocator::new());
pub const HEAP_START: usize = 0x_4444_4444_0000;
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
@ -49,8 +57,33 @@ pub fn init_heap(
}
unsafe {
super::ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE);
ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE);
}
Ok(())
}
}
pub struct Locked<A> {
inner: spin::Mutex<A>,
}
impl<A> Locked<A> {
pub const fn new(inner: A) -> Self {
Locked {
inner: spin::Mutex::new(inner),
}
}
pub fn lock(&self) -> spin::MutexGuard<A> {
self.inner.lock()
}
}
fn align_up(addr: usize, align: usize) -> usize {
let remainder = addr % align;
if remainder == 0 {
addr // addr already aligned
} else {
addr - remainder + align
}
}

58
src/allocator/bump.rs Normal file
View File

@ -0,0 +1,58 @@
use super::{align_up, Locked};
use alloc::alloc::{GlobalAlloc, Layout};
use core::ptr;
pub struct BumpAllocator {
heap_start: usize,
heap_end: usize,
next: usize,
allocations: usize,
}
impl BumpAllocator {
///Creates a new empty bump allocator
pub const fn new() -> Self {
BumpAllocator {
heap_start: 0,
heap_end: 0,
next: 0,
allocations: 0,
}
}
/// Initializes the bump allocator with the given heap bounds.
///
/// This method is unsafe because the caller must ensure that the given
/// memory range is unused. Also, this method must be called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.heap_start = heap_start;
self.heap_end = heap_start + heap_size;
self.next = heap_start;
}
}
unsafe impl GlobalAlloc for Locked<BumpAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut bump = self.lock(); // get a mutable reference
let alloc_start = align_up(bump.next, layout.align());
let alloc_end = alloc_start + layout.size();
if alloc_end > bump.heap_end {
ptr::null_mut() // out of memory
} else {
bump.next = alloc_end;
bump.allocations += 1;
alloc_start as *mut u8
}
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
let mut bump = self.lock(); // get a mutable reference
bump.allocations -= 1;
if bump.allocations == 0 {
bump.next = bump.heap_start;
}
}
}

View File

@ -0,0 +1,100 @@
use alloc::alloc::{GlobalAlloc, Layout};
use core::{mem, ptr};
use super::Locked;
struct ListNode {
next: Option<&'static mut ListNode>,
}
/// The block sizes to use.
///
/// The sizes must each be a power of 2 because they are also used as
/// the block alignment (alignments must be always powers of 2).
const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048];
pub struct FixedSizeBlockAllocator {
list_heads: [Option<&'static mut ListNode>; BLOCK_SIZES.len()],
fallback_allocator: linked_list_allocator::Heap,
}
impl FixedSizeBlockAllocator {
/// Creates an empty FixedSizeBlockAllocator.
pub const fn new() -> Self {
FixedSizeBlockAllocator {
list_heads: [None; BLOCK_SIZES.len()],
fallback_allocator: linked_list_allocator::Heap::empty(),
}
}
/// Initialize the allocator with the given heap bounds.
///
/// This function is unsafe because the caller must guarantee that the given
/// heap bounds are valid and that the heap is unused. This method must be
/// called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.fallback_allocator.init(heap_start, heap_size);
}
/// Allocates using the fallback allocator.
fn fallback_alloc(&mut self, layout: Layout) -> *mut u8 {
match self.fallback_allocator.allocate_first_fit(layout) {
Ok(ptr) => ptr.as_ptr(),
Err(_) => ptr::null_mut(),
}
}
}
fn list_index(layout: &Layout) -> Option<usize> {
let required_block_size = layout.size().max(layout.align());
BLOCK_SIZES.iter().position(|&s| s >= required_block_size)
}
unsafe impl GlobalAlloc for Locked<FixedSizeBlockAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
match allocator.list_heads[index].take() {
Some(node) => {
allocator.list_heads[index] = node.next.take();
node as *mut ListNode as *mut u8
}
None => {
// no block exists in list => allocate new block
let block_size = BLOCK_SIZES[index];
// only works if all block sizes are a power of 2
let block_align = block_size;
let layout = Layout::from_size_align(block_size, block_align)
.unwrap();
allocator.fallback_alloc(layout)
}
}
},
None => allocator.fallback_alloc(layout),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
let new_node = ListNode {
next: allocator.list_heads[index].take(),
};
// verify that block has size and alignment required for storing node
assert!(mem::size_of::<ListNode>() <= BLOCK_SIZES[index]);
assert!(mem::align_of::<ListNode>() <= BLOCK_SIZES[index]);
let new_node_ptr = ptr as *mut ListNode;
new_node_ptr.write(new_node);
allocator.list_heads[index] = Some(&mut *new_node_ptr);
}
None => {
let ptr = ptr::NonNull::new(ptr).unwrap();
allocator.fallback_allocator.deallocate(ptr, layout);
}
}
}
}

View File

@ -0,0 +1,148 @@
use super::{align_up, Locked};
use alloc::alloc::{GlobalAlloc, Layout};
use core::{mem, ptr};
struct ListNode {
size: usize,
next: Option<&'static mut ListNode>,
}
impl ListNode {
const fn new(size: usize) -> Self {
ListNode { size, next: None }
}
fn start_addr(&self) -> usize {
self as *const Self as usize
}
fn end_addr(&self) -> usize {
self.start_addr() + self.size
}
}
pub struct LinkedListAllocator {
head: ListNode,
}
impl LinkedListAllocator {
/// Creates an empty LinkedListAllocator.
pub const fn new() -> Self {
Self {
head: ListNode::new(0),
}
}
/// Initialize the allocator with the given heap bounds.
///
/// This function is unsafe because the caller must guarantee that the given
/// heap bounds are valid and that the heap is unused. This method must be
/// called only once.
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.add_free_region(heap_start, heap_size);
}
/// Adds the given memory region to the front of the list
unsafe fn add_free_region(&mut self, addr: usize, size: usize) {
// ensure that the freed region is capable of holding ListNode
assert!(align_up(addr, mem::align_of::<ListNode>()) == addr);
assert!(size >= mem::size_of::<ListNode>());
// create a new list node and append it at the start of the list
let mut node = ListNode::new(size);
node.next = self.head.next.take();
let node_ptr = addr as *mut ListNode;
node_ptr.write(node);
self.head.next = Some(&mut *node_ptr)
}
/// Looks for a free region with the given size and alignment and removes
/// it from the list.
///
/// Returns a tuple of the list node and the start address of the allocation
fn find_region(&mut self, size: usize, align: usize) -> Option<(&'static mut ListNode, usize)> {
// reference to the current list node, updated for each iteration
let mut current = &mut self.head;
// look for a large enough memory region in linked list
while let Some(ref mut region) = current.next {
if let Ok(alloc_start) = Self::alloc_from_region(&region, size, align) {
// region suitable for allocation -> remove node from list
let next = region.next.take();
let ret = Some((current.next.take().unwrap(), alloc_start));
current.next = next;
return ret;
} else {
// region not suitable -> continue with next region
current = current.next.as_mut().unwrap();
}
}
// no suitable region found
None
}
/// Try to use the given region for an allocation with given size and
/// alignment.
///
/// Returns the allocation start address on success.
fn alloc_from_region(region: &ListNode, size: usize, align: usize) -> Result<usize, ()> {
let alloc_start = align_up(region.start_addr(), align);
let alloc_end = alloc_start + size;
if alloc_end > region.end_addr() {
// region too small
return Err(());
}
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 && excess_size < mem::size_of::<ListNode>() {
// rest of region too small to hold a ListNode (required because the
// allocation splits the region in a used and a free part)
return Err(());
}
// region suitable for allocation
Ok(alloc_start)
}
/// Adjust the given layout so that the resulting allocated memory
/// region is also capable of storing a `ListNode`.
///
/// Returns the adjusted size and alignment as a (size, align) tuple.
fn size_align(layout: Layout) -> (usize, usize) {
let layout = layout
.align_to(mem::align_of::<ListNode>())
.expect("adjusting alignment failed")
.pad_to_align();
let size = layout.size().max(mem::size_of::<ListNode>());
(size, layout.align())
}
}
unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// perform layout adjustments
let (size, align) = LinkedListAllocator::size_align(layout);
let mut allocator = self.inner.lock();
if let Some((region, alloc_start)) = allocator.find_region(size, align) {
let alloc_end = alloc_start + size;
let excess_size = region.end_addr() - alloc_end;
if excess_size > 0 {
allocator.add_free_region(alloc_end, excess_size);
}
alloc_start as *mut u8
} else {
ptr::null_mut()
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// perform layout adjustments
let (size, _) = LinkedListAllocator::size_align(layout);
self.inner.lock().add_free_region(ptr as usize, size)
}
}

View File

@ -1,8 +1,11 @@
#![no_std]
#![cfg_attr(test, no_main)]
#![feature(alloc_error_handler)]
#![feature(custom_test_frameworks)]
#![feature(abi_x86_interrupt)]
#![feature(alloc_error_handler)]
#![feature(alloc_layout_extra)]
#![feature(const_fn)]
#![feature(const_in_array_repeat_expressions)]
#![feature(custom_test_frameworks)]
#![test_runner(crate::test_runner)]
#![reexport_test_harness_main = "test_main"]
@ -17,8 +20,8 @@ use bootloader::{entry_point, BootInfo};
#[cfg(test)]
entry_point!(test_kernel_main);
#[global_allocator]
static ALLOCATOR: LockedHeap = LockedHeap::empty();
// #[global_allocator]
// static ALLOCATOR: LockedHeap = LockedHeap::empty();
#[alloc_error_handler]
fn alloc_error_handler(layout: alloc::alloc::Layout) -> ! {

View File

@ -6,12 +6,7 @@
extern crate alloc;
use alloc::{
boxed::Box,
vec,
vec::Vec,
rc::Rc,
};
use alloc::{boxed::Box, rc::Rc, vec, vec::Vec};
use core::panic::PanicInfo;
use blog_os::println;
@ -42,12 +37,9 @@ fn kernel_main(boot_info: &'static BootInfo) -> ! {
blog_os::init();
let mut mapper = unsafe { memory::init(VirtAddr::new(boot_info.physical_memory_offset)) };
let mut frame_allocator = unsafe {
BootInfoFrameAllocator::init(&boot_info.memory_map)
};
let mut frame_allocator = unsafe { BootInfoFrameAllocator::init(&boot_info.memory_map) };
allocator::init_heap(&mut mapper, &mut frame_allocator)
.expect("heap initialization failed");
allocator::init_heap(&mut mapper, &mut frame_allocator).expect("heap initialization failed");
// allocate a number on the heap
let heap_value = Box::new(41);
@ -63,9 +55,15 @@ fn kernel_main(boot_info: &'static BootInfo) -> ! {
// create a reference counted vector -> will be freed when count reaches 0
let reference_counted = Rc::new(vec![1, 2, 3]);
let cloned_reference = reference_counted.clone();
println!("current reference count is {}", Rc::strong_count(&cloned_reference));
println!(
"current reference count is {}",
Rc::strong_count(&cloned_reference)
);
core::mem::drop(reference_counted);
println!("reference count is {} now", Rc::strong_count(&cloned_reference));
println!(
"reference count is {} now",
Rc::strong_count(&cloned_reference)
);
#[cfg(test)]
test_main();

View File

@ -8,6 +8,7 @@ extern crate alloc;
use alloc::boxed::Box;
use alloc::vec::Vec;
use blog_os::allocator::HEAP_SIZE;
use blog_os::{serial_print, serial_println};
use bootloader::{entry_point, BootInfo};
use core::panic::PanicInfo;
@ -22,11 +23,8 @@ fn main(boot_info: &'static BootInfo) -> ! {
blog_os::init();
let phys_mem_offset = VirtAddr::new(boot_info.physical_memory_offset);
let mut mapper = unsafe { memory::init(phys_mem_offset) };
let mut frame_allocator = unsafe {
BootInfoFrameAllocator::init(&boot_info.memory_map)
};
allocator::init_heap(&mut mapper, &mut frame_allocator)
.expect("heap initialization failed");
let mut frame_allocator = unsafe { BootInfoFrameAllocator::init(&boot_info.memory_map) };
allocator::init_heap(&mut mapper, &mut frame_allocator).expect("heap initialization failed");
test_main();
@ -67,3 +65,15 @@ fn many_boxes() {
}
serial_println!("[ok]");
}
#[test_case]
fn many_boxes_long_lived() {
serial_print!("many_boxes_long_lived... ");
let long_lived = Box::new(1);
for i in 0..HEAP_SIZE {
let x = Box::new(i);
assert_eq!(*x, i);
}
assert_eq!(*long_lived, 1);
serial_println!("[ok]");
}