From 889c6ed938722ea8ed75f90e80b3ed494bf9dd6b Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Sun, 10 Sep 2023 15:46:55 +0200 Subject: [PATCH 1/7] hal_core: iter_pages shouldn't be right inclusive In doing so, we were overating over one page too many. Instead iterate over an exlusive range. --- hal_core/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hal_core/src/lib.rs b/hal_core/src/lib.rs index 33f873b..e420030 100644 --- a/hal_core/src/lib.rs +++ b/hal_core/src/lib.rs @@ -43,7 +43,7 @@ impl AddressRange { pub fn iter_pages(self, page_size: usize) -> impl Iterator { assert_eq!(self.end, mm::align_up(self.end, page_size)); - (self.start..=self.end).step_by(page_size) + (self.start..self.end).step_by(page_size) } pub fn count_pages(&self, page_size: usize) -> usize { From 1939fe186c7b700d6e5f84c4037e4314e0265dfa Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Sun, 10 Sep 2023 15:48:24 +0200 Subject: [PATCH 2/7] refactor: stop passing page_size where possible We have to assume PAGE_SIZE from the HAL to be a baked in constant at compile-time. Similar to a PAGE_SIZE define in a C project. Don't pass it because it won't (read *must not*) change at run-time. --- hal_aarch64/src/mm/mod.rs | 6 ++- hal_core/src/mm.rs | 6 +++ kernel/src/generic_main.rs | 2 +- kernel/src/mm/binary_buddy_allocator.rs | 5 ++- kernel/src/mm/physical_memory_manager.rs | 54 +++++++++--------------- 5 files changed, 34 insertions(+), 39 deletions(-) diff --git a/hal_aarch64/src/mm/mod.rs b/hal_aarch64/src/mm/mod.rs index 187a613..784257f 100644 --- a/hal_aarch64/src/mm/mod.rs +++ b/hal_aarch64/src/mm/mod.rs @@ -75,5 +75,9 @@ unsafe fn load_pagetable(pt: &'static mut PageTable) { } pub fn align_up(addr: usize) -> usize { - mm::align_up(addr, PageTable::PAGE_SIZE) + mm::align_up(addr, PAGE_SIZE) +} + +pub fn align_down(addr: usize) -> usize { + mm::align_down(addr, PAGE_SIZE) } diff --git a/hal_core/src/mm.rs b/hal_core/src/mm.rs index 86de2ff..52f5e61 100644 --- a/hal_core/src/mm.rs +++ b/hal_core/src/mm.rs @@ -136,6 +136,12 @@ pub fn align_up(val: usize, page_sz: usize) -> usize { ((val + page_sz - 1) / page_sz) * page_sz } +pub fn align_down(addr: usize, page_sz: usize) -> usize { + // TODO: can this be more optimized ? + // XXX: uh isn't this math wrong ? + align_up(addr, page_sz) + page_sz +} + pub fn init_paging( r: impl Iterator, rw: impl Iterator, diff --git a/kernel/src/generic_main.rs b/kernel/src/generic_main.rs index 5b267f6..6f45ca5 100644 --- a/kernel/src/generic_main.rs +++ b/kernel/src/generic_main.rs @@ -19,7 +19,7 @@ pub fn generic_main(dt: DeviceTree, hacky_devices: &[& // Memory init globals::PHYSICAL_MEMORY_MANAGER - .lock(|pmm| pmm.init_from_device_tree(&dt, 4096)) + .lock(|pmm| pmm.init_from_device_tree(&dt)) .unwrap(); map_address_space(&dt, devices).expect("failed to map the addres space"); diff --git a/kernel/src/mm/binary_buddy_allocator.rs b/kernel/src/mm/binary_buddy_allocator.rs index d7b3ec1..49a941c 100644 --- a/kernel/src/mm/binary_buddy_allocator.rs +++ b/kernel/src/mm/binary_buddy_allocator.rs @@ -1,4 +1,5 @@ use crate::globals; +use crate::hal::mm::PAGE_SIZE; use core::alloc::{GlobalAlloc, Layout}; @@ -23,10 +24,10 @@ unsafe impl GlobalAlloc for BinaryBuddyAllocator { // - disable interrupts when entering, then re-enable globals::PHYSICAL_MEMORY_MANAGER.lock(|pmm| { - let page_count = if layout.size() <= pmm.page_size() { + let page_count = if layout.size() <= PAGE_SIZE { 1 } else { - layout.size() / pmm.page_size() + 1 + layout.size() / PAGE_SIZE + 1 }; pmm.alloc_rw_pages(page_count).unwrap_or(0usize.into()) as *mut u8 }) diff --git a/kernel/src/mm/physical_memory_manager.rs b/kernel/src/mm/physical_memory_manager.rs index 4999b4f..106c196 100644 --- a/kernel/src/mm/physical_memory_manager.rs +++ b/kernel/src/mm/physical_memory_manager.rs @@ -9,6 +9,8 @@ use hal_core::{ AddressRange, }; +use hal::mm::PAGE_SIZE; + use log::debug; #[derive(Debug, PartialEq, Eq)] @@ -59,17 +61,16 @@ pub enum AllocatorError { #[derive(Debug)] pub struct PhysicalMemoryManager { metadata: &'static mut [PhysicalPage], - page_size: usize, } impl PhysicalMemoryManager { - fn count_pages(regions: &[Option], page_size: usize) -> usize { + fn count_pages(regions: &[Option]) -> usize { let total_memory_bytes: usize = regions .iter() .filter_map(|maybe_region| maybe_region.map(|region| region.size())) .sum(); - total_memory_bytes / page_size + total_memory_bytes / PAGE_SIZE } fn find_large_region(regions: &[Option], minimum_size: usize) -> Option { @@ -80,15 +81,6 @@ impl PhysicalMemoryManager { .map(|region| region.start) } - fn align_up(addr: usize, alignment: usize) -> usize { - ((addr) + alignment - 1) & !(alignment - 1) - } - - fn align_down(addr: usize, alignment: usize) -> usize { - // TODO: can this be more optimized ? - Self::align_up(addr, alignment) + alignment - } - fn is_metadata_page(base: usize, metadata_start: usize, metadata_end: usize) -> bool { base >= metadata_start && base < metadata_end } @@ -155,7 +147,6 @@ impl PhysicalMemoryManager { fn available_memory_regions( device_tree: &DeviceTree, - page_size: usize, ) -> [Option; MAX_REGIONS] { // First put all regions in the array. let mut all_regions = [None; MAX_REGIONS]; @@ -187,8 +178,8 @@ impl PhysicalMemoryManager { // Re-align the regions, for exemple things we exclude are not always aligned to a page boundary. all_regions.iter_mut().for_each(|maybe_region| { if let Some(region) = maybe_region { - region.start = Self::align_down(region.start, page_size); - region.end = Self::align_up(region.end, page_size); + region.start = hal::mm::align_down(region.start); + region.end = hal::mm::align_up(region.end); *maybe_region = if region.size() > 0 { Some(*region) @@ -209,27 +200,23 @@ impl PhysicalMemoryManager { ) }; - Self { - metadata, - page_size: 0, - } + Self { metadata } } /// Initialize a [`PageAllocator`] from the device tree. pub fn init_from_device_tree( &mut self, device_tree: &DeviceTree, - page_size: usize, ) -> Result<(), AllocatorError> { - let available_regions = Self::available_memory_regions::<10>(device_tree, page_size); + let available_regions = Self::available_memory_regions::<10>(device_tree); assert!( available_regions .iter() .flatten() .all( - |region| region.start == Self::align_up(region.start, page_size) - && region.end == Self::align_up(region.end, page_size) + |region| region.start == hal::mm::align_up(region.start) + && region.end == hal::mm::align_up(region.end) ), "Expected region bounds to be aligned to the page size (won't be possible to allocate pages otherwise)" ); @@ -238,9 +225,9 @@ impl PhysicalMemoryManager { debug!("region {}: {:X?}", i, reg); } - let page_count = Self::count_pages(&available_regions, page_size); + let page_count = Self::count_pages(&available_regions); let metadata_size = page_count * mem::size_of::(); - let pages_needed = Self::align_up(metadata_size, page_size) / page_size; + let pages_needed = hal::mm::align_up(metadata_size) / PAGE_SIZE; let metadata_addr = Self::find_large_region(&available_regions, metadata_size) .ok_or(AllocatorError::NotEnoughMemoryForMetadata)?; @@ -251,22 +238,23 @@ impl PhysicalMemoryManager { let physical_pages = available_regions .iter() .flatten() - .flat_map(|region| (region.start..region.end).step_by(page_size)) + .flat_map(|region| region.iter_pages(PAGE_SIZE)) .map(|base| { Self::phys_addr_to_physical_page( base, metadata_addr, - metadata_addr + pages_needed * page_size, + metadata_addr + pages_needed * PAGE_SIZE, ) }); - assert!(physical_pages.clone().count() == page_count); + let mut count = 0; for (i, page) in physical_pages.enumerate() { metadata[i] = page; + count += 1; } + assert!(count == page_count); self.metadata = metadata; - self.page_size = page_size; Ok(()) } @@ -287,7 +275,7 @@ impl PhysicalMemoryManager { continue; } - if consecutive_pages > 0 && page.base != last_page_base + self.page_size { + if consecutive_pages > 0 && page.base != last_page_base + PAGE_SIZE { consecutive_pages = 0; continue; } @@ -327,16 +315,12 @@ impl PhysicalMemoryManager { Ok(addr) } - pub fn page_size(&self) -> usize { - self.page_size - } - pub fn metadata_pages(&self) -> impl core::iter::Iterator { let metadata_start = (&self.metadata[0] as *const PhysicalPage) as usize; let metadata_last = (&self.metadata[self.metadata.len() - 1] as *const PhysicalPage) as usize; - (metadata_start..=metadata_last).step_by(self.page_size()) + (metadata_start..=metadata_last).step_by(PAGE_SIZE) } pub fn allocated_pages(&self) -> impl core::iter::Iterator + '_ { From 2f32b0ee7d0939924dadc0e69127e4681ed51fda Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Sun, 10 Sep 2023 15:48:32 +0200 Subject: [PATCH 3/7] mm: stop mapping all of DRAM as read-write Had to change a few interfaces to get around ownership issues. But now that all DRAM is not mapped as r/w, hopefully we will catch bugs earlier later on. --- hal_aarch64/Cargo.toml | 1 + hal_aarch64/src/lib.rs | 2 + hal_aarch64/src/mm/mod.rs | 33 ++++++++----- hal_aarch64/src/mm/pgt48.rs | 3 ++ hal_core/src/mm.rs | 10 ++-- kernel/src/mm/mod.rs | 60 +++++++++++++----------- kernel/src/mm/physical_memory_manager.rs | 11 +++++ 7 files changed, 75 insertions(+), 45 deletions(-) diff --git a/hal_aarch64/Cargo.toml b/hal_aarch64/Cargo.toml index ba678b9..13c79d3 100644 --- a/hal_aarch64/Cargo.toml +++ b/hal_aarch64/Cargo.toml @@ -9,3 +9,4 @@ edition = "2021" hal_core = { path = "../hal_core" } tock-registers = "0.8" cortex-a = "8.1" +log = "0.4" diff --git a/hal_aarch64/src/lib.rs b/hal_aarch64/src/lib.rs index 3635916..571f39d 100644 --- a/hal_aarch64/src/lib.rs +++ b/hal_aarch64/src/lib.rs @@ -17,12 +17,14 @@ mod devices; pub struct PanicInfo { esr_el1: u64, elr_el1: u64, + far_el1: u64, } pub fn panic_info() -> PanicInfo { PanicInfo { esr_el1: ESR_EL1.get(), elr_el1: ELR_EL1.get(), + far_el1: FAR_EL1.get(), } } diff --git a/hal_aarch64/src/mm/mod.rs b/hal_aarch64/src/mm/mod.rs index 784257f..7089bff 100644 --- a/hal_aarch64/src/mm/mod.rs +++ b/hal_aarch64/src/mm/mod.rs @@ -19,32 +19,41 @@ use core::cell::OnceCell; static mut GPT: OnceCell<&'static mut PageTable> = OnceCell::new(); +pub fn is_pagetable_installed() -> bool { + unsafe { GPT.get_mut().is_some() } +} + pub fn current() -> &'static mut PageTable { unsafe { GPT.get_mut().unwrap() } } -pub fn init_paging( +pub fn prefill_pagetable( r: impl Iterator, rw: impl Iterator, rwx: impl Iterator, pre_allocated: impl Iterator, alloc: PageAllocFn, ) -> Result<(), Error> { - hal_core::mm::init_paging::(r, rw, rwx, pre_allocated, alloc, |pt| { - // TODO: put into into the hal_core::Error - unsafe { - if GPT.set(pt).is_err() { - panic!("GPT is already set ?"); - } - }; - unsafe { - load_pagetable(current()); - }; - })?; + let pt = hal_core::mm::prefill_pagetable::(r, rw, rwx, pre_allocated, alloc)?; + + // TODO: put into into the hal_core::Error + unsafe { + if GPT.set(pt).is_err() { + panic!("GPT is already set ?"); + } + }; Ok(()) } +pub fn enable_paging() { + unsafe { + load_pagetable(current()); + }; + + log::trace!("hal_core::mm::init_paging finished"); +} + unsafe fn load_pagetable(pt: &'static mut PageTable) { MAIR_EL1.write( // Attribute 0 - NonCacheable normal DRAM. FIXME: enable cache? diff --git a/hal_aarch64/src/mm/pgt48.rs b/hal_aarch64/src/mm/pgt48.rs index 203cfa4..6e85de2 100644 --- a/hal_aarch64/src/mm/pgt48.rs +++ b/hal_aarch64/src/mm/pgt48.rs @@ -226,6 +226,9 @@ impl PageMap for PageTable { fn new(alloc: PageAllocFn) -> Result<&'static mut Self, Error> { let page = alloc(1); + if page.val == 0x40330000 { + log::debug!("creating pte 40330000"); + } let page_table = page.ptr_cast::(); // Safety: the PMM gave us the memory, it should be a valid pointer. let page_table: &mut PageTable = unsafe { page_table.as_mut().unwrap() }; diff --git a/hal_core/src/mm.rs b/hal_core/src/mm.rs index 52f5e61..3e53959 100644 --- a/hal_core/src/mm.rs +++ b/hal_core/src/mm.rs @@ -142,14 +142,14 @@ pub fn align_down(addr: usize, page_sz: usize) -> usize { align_up(addr, page_sz) + page_sz } -pub fn init_paging( +pub fn prefill_pagetable( r: impl Iterator, rw: impl Iterator, rwx: impl Iterator, pre_allocated: impl Iterator, alloc: PageAllocFn, - store_pagetable: impl FnOnce(&'static mut P), -) -> Result<(), Error> { +) -> Result<&'static mut P, Error> { + trace!("hal_core::mm::prefill_pagetable"); let pt: &'static mut P = P::new(alloc)?; let page_size = P::PAGE_SIZE; @@ -176,7 +176,5 @@ pub fn init_paging( )? } - store_pagetable(pt); - - Ok(()) + Ok(pt) } diff --git a/kernel/src/mm/mod.rs b/kernel/src/mm/mod.rs index aff8ef2..ef6128c 100644 --- a/kernel/src/mm/mod.rs +++ b/kernel/src/mm/mod.rs @@ -6,7 +6,7 @@ mod binary_buddy_allocator; use crate::device_tree::DeviceTree; use crate::globals; -use crate::hal; +use crate::hal::{self, mm::PAGE_SIZE}; use crate::Error; use hal_core::mm::{align_up, PageMap, Permissions, VAddr}; use hal_core::AddressRange; @@ -116,6 +116,9 @@ pub fn map_address_space<'a, I: Iterator>( let mut rwx_entries = ArrayVec::::new(); let mut pre_allocated_entries = ArrayVec::::new(); + let alloc = + |count| alloc_pages_raw(count).expect("failure on page allocator passed to init_paging"); + // Add entries/descriptors in the pagetable for all of accessible memory regions. // That way in the future, mapping those entries won't require any memory allocations, // just settings the entry to valid and filling up the bits. @@ -124,11 +127,6 @@ pub fn map_address_space<'a, I: Iterator>( pre_allocated_entries .try_push(AddressRange::with_size(base, size)) .unwrap(); - - // TODO: this needs to be done differently, we're mapping all DRAM as rw... - rw_entries - .try_push(AddressRange::with_size(base, size)) - .unwrap(); }); }); debug!( @@ -144,9 +142,9 @@ pub fn map_address_space<'a, I: Iterator>( .unwrap(); let (kernel_r, kernel_rw, kernel_rwx) = map_kernel_rwx(); - kernel_r.for_each(|entry| r_entries.try_push(entry).unwrap()); - kernel_rw.for_each(|entry| rw_entries.try_push(entry).unwrap()); - kernel_rwx.for_each(|entry| rwx_entries.try_push(entry).unwrap()); + r_entries.extend(kernel_r); + rw_entries.extend(kernel_rw); + rwx_entries.extend(kernel_rwx); for drv in drivers { if let Some((base, len)) = drv.get_address_range() { @@ -162,34 +160,42 @@ pub fn map_address_space<'a, I: Iterator>( } } - // rw_entries = rw_entries.chain( - // globals::PHYSICAL_MEMORY_MANAGER.lock(|pmm| { - // let metadata_pages = pmm.metadata_pages(); - // let allocated_pages = pmm.allocated_pages(); - // let pmm_pages = metadata_pages.chain(allocated_pages); - // }), - // ); - globals::PHYSICAL_MEMORY_MANAGER.lock(|pmm| { - // All pmm pages are located in DRAM so they are already in the pagetable (they are part of - // the pre_allocated_entries). - // Therefore no allocations will be made. - let _pmm_pages = iter::Iterator::chain(pmm.metadata_pages(), pmm.allocated_pages()); - // XXX: put the pages as range in to rw_entries - // for now just crammed all memory regions as rw_entries a bit higher in the function. - }); - debug!("r_entries: {:X?}", r_entries); debug!("rw_entries: {:X?}", rw_entries); debug!("rwx_entries: {:X?}", rwx_entries); debug!("pre_allocated_entries: {:X?}", pre_allocated_entries); - hal::mm::init_paging( + hal::mm::prefill_pagetable( r_entries.into_iter(), rw_entries.into_iter(), rwx_entries.into_iter(), pre_allocated_entries.into_iter(), - |count| alloc_pages_raw(count).expect("failure on page allocator passed to init_paging"), + alloc, )?; + + globals::PHYSICAL_MEMORY_MANAGER.lock(|pmm| { + // All pmm pages are located in DRAM so they are already in the pagetable (they are part of + // the pre_allocated_entries). + // Therefore no allocations will be made. + for page in pmm.metadata_pages() { + hal::mm::current().identity_map( + VAddr::new(page), + Permissions::READ | Permissions::WRITE, + alloc, + ); + } + for page in pmm.allocated_pages() { + log::debug!("pushing rw allocated page 0x{:X}", page); + hal::mm::current().identity_map( + VAddr::new(page), + Permissions::READ | Permissions::WRITE, + alloc, + ); + } + }); + + hal::mm::enable_paging(); + unsafe { globals::STATE = globals::KernelState::MmuEnabledInit }; Ok(()) diff --git a/kernel/src/mm/physical_memory_manager.rs b/kernel/src/mm/physical_memory_manager.rs index 106c196..5f94a9a 100644 --- a/kernel/src/mm/physical_memory_manager.rs +++ b/kernel/src/mm/physical_memory_manager.rs @@ -255,6 +255,7 @@ impl PhysicalMemoryManager { assert!(count == page_count); self.metadata = metadata; + log::debug!("PMM metadata addr is 0x{:X}", self.metadata.as_ptr() as u64); Ok(()) } @@ -289,6 +290,8 @@ impl PhysicalMemoryManager { .for_each(|page| page.set_allocated()); self.metadata[i].set_last(); + let addr = self.metadata[first_page_index].base; + return Ok(self.metadata[first_page_index].base); } } @@ -303,7 +306,10 @@ impl PhysicalMemoryManager { let first_page = self.alloc_pages(page_count)?; let addr: usize = first_page.into(); + let our_page = addr == 0x40330000; + if unsafe { globals::STATE.is_mmu_enabled() } { + log::debug!("our_page is mapped before handed off"); hal::mm::current().identity_map_range(VAddr::new(addr), page_count, Permissions::READ | Permissions::WRITE, |_| { // The mmu is enabled, therefore we already mapped all DRAM into the kernel's pagetable as // invalid entries. @@ -324,6 +330,11 @@ impl PhysicalMemoryManager { } pub fn allocated_pages(&self) -> impl core::iter::Iterator + '_ { + log::debug!( + "allocated_pages: PMM metadata addr is 0x{:X}", + self.metadata.as_ptr() as u64 + ); + log::debug!("allocated_pages: metadata[254] {:?}", self.metadata[254]); self.metadata .iter() .filter(|page| page.is_allocated()) From d9502c01ab1a995c36cf961c52bd7d5ed0a5a9a4 Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Wed, 13 Sep 2023 23:07:48 +0200 Subject: [PATCH 4/7] refactor: PageAlloc trait instead of PageAllocFn PageAllocFn was not very wieldy, the alloc_pages_* (alloc_pages_for_hal etc...) functions were a mess and it was hard to know what to use when etc... Use a trait instead, that way we can still pass an allocator that does not allocate in cases when we don't want to allocate, and pass the whole allocator when allocations actually are needed. That way it is easier to add functionality for when the HALs need more info from the allocator. Also it is much much easier to manage ownership&lifetimes this way. --- hal_aarch64/src/irq.rs | 8 +-- hal_aarch64/src/mm/mod.rs | 6 +- hal_aarch64/src/mm/pgt48.rs | 19 +++--- hal_core/src/lib.rs | 11 +++- hal_core/src/mm.rs | 65 +++++++++++++++------ kernel/src/error.rs | 6 +- kernel/src/executable/elf.rs | 11 ++-- kernel/src/generic_main.rs | 6 +- kernel/src/lib.rs | 2 +- kernel/src/lock.rs | 4 ++ kernel/src/mm/binary_buddy_allocator.rs | 3 +- kernel/src/mm/mod.rs | 74 +++++------------------- kernel/src/mm/physical_memory_manager.rs | 66 +++++++++++++-------- kernel/src/tests.rs | 12 ++-- 14 files changed, 153 insertions(+), 140 deletions(-) diff --git a/hal_aarch64/src/irq.rs b/hal_aarch64/src/irq.rs index a72f4df..c7da54c 100644 --- a/hal_aarch64/src/irq.rs +++ b/hal_aarch64/src/irq.rs @@ -7,7 +7,7 @@ use hal_core::{Error, TimerCallbackFn}; use crate::devices::gicv2::GicV2; use crate::mm; -use hal_core::mm::{PageAllocFn, PageMap, Permissions, VAddr}; +use hal_core::mm::{PageAlloc, PageMap, Permissions, VAddr}; use tock_registers::interfaces::Writeable; @@ -64,19 +64,19 @@ impl IrqChip { static mut IRQ_CHIP: IrqChip = IrqChip::NoChip; -pub fn init_irq_chip(_dt_node: (), alloc: PageAllocFn) -> Result<(), Error> { +pub fn init_irq_chip(_dt_node: (), allocator: &mut impl PageAlloc) -> Result<(), Error> { let (gicd_base, gicc_base) = (0x800_0000, 0x801_0000); mm::current().identity_map_range( VAddr::new(gicd_base), 0x0001_0000 / mm::PAGE_SIZE, Permissions::READ | Permissions::WRITE, - alloc, + allocator, )?; mm::current().identity_map_range( VAddr::new(gicc_base), 0x0001_0000 / mm::PAGE_SIZE, Permissions::READ | Permissions::WRITE, - alloc, + allocator, )?; unsafe { diff --git a/hal_aarch64/src/mm/mod.rs b/hal_aarch64/src/mm/mod.rs index 7089bff..a0076bd 100644 --- a/hal_aarch64/src/mm/mod.rs +++ b/hal_aarch64/src/mm/mod.rs @@ -1,5 +1,5 @@ use hal_core::{ - mm::{self, PageAllocFn, PageMap}, + mm::{self, PageAlloc, PageMap}, AddressRange, Error, }; @@ -32,9 +32,9 @@ pub fn prefill_pagetable( rw: impl Iterator, rwx: impl Iterator, pre_allocated: impl Iterator, - alloc: PageAllocFn, + allocator: &mut impl PageAlloc, ) -> Result<(), Error> { - let pt = hal_core::mm::prefill_pagetable::(r, rw, rwx, pre_allocated, alloc)?; + let pt = hal_core::mm::prefill_pagetable::(r, rw, rwx, pre_allocated, allocator)?; // TODO: put into into the hal_core::Error unsafe { diff --git a/hal_aarch64/src/mm/pgt48.rs b/hal_aarch64/src/mm/pgt48.rs index 6e85de2..774ec2e 100644 --- a/hal_aarch64/src/mm/pgt48.rs +++ b/hal_aarch64/src/mm/pgt48.rs @@ -1,5 +1,5 @@ use hal_core::{ - mm::{self, PageAllocFn, PageEntry, PageMap, Permissions}, + mm::{self, PageAlloc, PageEntry, PageMap, Permissions}, Error, }; @@ -224,12 +224,9 @@ impl PageMap for PageTable { const PAGE_SIZE: usize = 4096; type Entry = TableEntry; - fn new(alloc: PageAllocFn) -> Result<&'static mut Self, Error> { - let page = alloc(1); - if page.val == 0x40330000 { - log::debug!("creating pte 40330000"); - } - let page_table = page.ptr_cast::(); + fn new(allocator: &mut impl PageAlloc) -> Result<&'static mut Self, Error> { + let page = allocator.alloc(1)?; + let page_table = unsafe { page as *mut PageTable }; // Safety: the PMM gave us the memory, it should be a valid pointer. let page_table: &mut PageTable = unsafe { page_table.as_mut().unwrap() }; @@ -246,7 +243,7 @@ impl PageMap for PageTable { va: mm::VAddr, pa: mm::PAddr, perms: Permissions, - alloc: PageAllocFn, + allocator: &mut impl PageAlloc, ) -> Result<&mut TableEntry, Error> { let va = VAddr::from(va); let pa = PAddr::from(pa); @@ -269,7 +266,7 @@ impl PageMap for PageTable { let descriptor = unsafe { &mut content.descriptor }; if descriptor.is_invalid() { - let new_page_table = PageTable::new(alloc)?; + let new_page_table = PageTable::new(allocator)?; descriptor.set_next_level(new_page_table); } @@ -279,14 +276,14 @@ impl PageMap for PageTable { unreachable!("We should have reached lvl 3 and returned by now..."); } - fn add_invalid_entry(&mut self, va: mm::VAddr, alloc: PageAllocFn) -> Result<(), Error> { + fn add_invalid_entry(&mut self, va: mm::VAddr, allocator: &mut impl PageAlloc) -> Result<(), Error> { let entry = self.map( va, mm::PAddr { val: 0x0A0A_0A0A_0A0A_0A0A, }, mm::Permissions::READ, - alloc, + allocator, )?; entry.set_invalid(); diff --git a/hal_core/src/lib.rs b/hal_core/src/lib.rs index e420030..3becbf6 100644 --- a/hal_core/src/lib.rs +++ b/hal_core/src/lib.rs @@ -1,4 +1,5 @@ #![no_std] +#![feature(return_position_impl_trait_in_trait)] use core::convert::Into; use core::ops::Range; @@ -6,7 +7,15 @@ use core::ops::Range; pub mod mm; #[derive(Debug)] -pub enum Error {} +pub enum Error { + Alloc(mm::AllocatorError), +} + +impl From for Error { + fn from(e: mm::AllocatorError) -> Self { + Self::Alloc(e) + } +} pub type TimerCallbackFn = fn(); diff --git a/hal_core/src/mm.rs b/hal_core/src/mm.rs index 3e53959..bc0f32d 100644 --- a/hal_core/src/mm.rs +++ b/hal_core/src/mm.rs @@ -51,6 +51,35 @@ bitflags::bitflags! { pub type PageAllocFn = fn(usize) -> PAddr; +#[derive(Debug)] +pub enum AllocatorError { + NotEnoughMemoryForMetadata, + OutOfMemory, +} + +pub trait PageAlloc { + fn alloc(&mut self, page_count: usize) -> Result; + fn dealloc(&mut self, base: usize, page_count: usize) -> Result<(), AllocatorError>; + fn used_pages(&self) -> impl Iterator + '_; +} + +pub struct NullPageAllocator; + +impl PageAlloc for NullPageAllocator { + fn alloc(&mut self, page_count: usize) -> Result { + panic!("the null page allocator mustn't allocate"); + } + + fn dealloc(&mut self, base: usize, page_count: usize) -> Result<(), AllocatorError> { + panic!("the null page allocator cannot deallocate"); + } + + fn used_pages(&self) -> impl Iterator + '_ { + panic!("obviously the null allocator has no pages that are in use"); + core::iter::empty() + } +} + pub trait PageEntry { fn set_invalid(&mut self); } @@ -59,22 +88,22 @@ pub trait PageMap { const PAGE_SIZE: usize; type Entry: PageEntry; - fn new(alloc: PageAllocFn) -> Result<&'static mut Self, Error>; + fn new(allocator: &mut impl PageAlloc) -> Result<&'static mut Self, Error>; fn map( &mut self, va: VAddr, pa: PAddr, perms: Permissions, - alloc: PageAllocFn, + allocator: &mut impl PageAlloc, ) -> Result<&mut Self::Entry, Error>; - fn add_invalid_entry(&mut self, va: VAddr, alloc: PageAllocFn) -> Result<(), Error> { + fn add_invalid_entry(&mut self, va: VAddr, allocator: &mut impl PageAlloc) -> Result<(), Error> { self.map( va, PAddr::new(0x0A0A_0A0A_0A0A_0A0A), Permissions::READ, - alloc, + allocator, )? .set_invalid(); @@ -85,9 +114,9 @@ pub trait PageMap { &mut self, addr: VAddr, perms: Permissions, - alloc: PageAllocFn, + allocator: &mut impl PageAlloc, ) -> Result<(), Error> { - self.map(addr, PAddr::new(addr.val), perms, alloc) + self.map(addr, PAddr::new(addr.val), perms, allocator) .map(|_| ()) } @@ -96,11 +125,11 @@ pub trait PageMap { addr: VAddr, page_count: usize, perms: Permissions, - alloc: PageAllocFn, + allocator: &mut impl PageAlloc, ) -> Result<(), Error> { let start = addr.val; for i in 0..page_count { - self.identity_map(VAddr::new(start + i * Self::PAGE_SIZE), perms, alloc)?; + self.identity_map(VAddr::new(start + i * Self::PAGE_SIZE), perms, allocator)?; } Ok(()) @@ -109,10 +138,10 @@ pub trait PageMap { fn add_invalid_entries( &mut self, range: AddressRange, - alloc: PageAllocFn, + allocator: &mut impl PageAlloc, ) -> Result<(), Error> { for page in range.iter_pages(Self::PAGE_SIZE) { - self.add_invalid_entry(VAddr::new(page), alloc)?; + self.add_invalid_entry(VAddr::new(page), allocator)?; } Ok(()) @@ -122,10 +151,10 @@ pub trait PageMap { &mut self, range: AddressRange, perms: Permissions, - alloc: PageAllocFn, + allocator: &mut impl PageAlloc, ) -> Result<(), Error> { for page in range.iter_pages(Self::PAGE_SIZE) { - self.identity_map(VAddr::new(page), perms, alloc)?; + self.identity_map(VAddr::new(page), perms, allocator)?; } Ok(()) @@ -147,24 +176,24 @@ pub fn prefill_pagetable( rw: impl Iterator, rwx: impl Iterator, pre_allocated: impl Iterator, - alloc: PageAllocFn, + allocator: &mut impl PageAlloc, ) -> Result<&'static mut P, Error> { trace!("hal_core::mm::prefill_pagetable"); - let pt: &'static mut P = P::new(alloc)?; + let pt: &'static mut P = P::new(allocator)?; let page_size = P::PAGE_SIZE; for range in pre_allocated { - pt.add_invalid_entries(range, alloc)?; + pt.add_invalid_entries(range, allocator)?; } for range in r { trace!("mapping as RO: {:X?}", range); - pt.identity_map_addressrange(range, Permissions::READ, alloc)?; + pt.identity_map_addressrange(range, Permissions::READ, allocator)?; } for range in rw { trace!("mapping as RW: {:X?}", range); - pt.identity_map_addressrange(range, Permissions::READ | Permissions::WRITE, alloc)?; + pt.identity_map_addressrange(range, Permissions::READ | Permissions::WRITE, allocator)?; } for range in rwx { @@ -172,7 +201,7 @@ pub fn prefill_pagetable( pt.identity_map_addressrange( range, Permissions::READ | Permissions::WRITE | Permissions::EXECUTE, - alloc, + allocator, )? } diff --git a/kernel/src/error.rs b/kernel/src/error.rs index 97c4398..4037b08 100644 --- a/kernel/src/error.rs +++ b/kernel/src/error.rs @@ -6,7 +6,7 @@ pub enum Error { NoMatchingDriver(&'static str), InvalidFdtNode, FdtError(fdt::FdtError), - Allocator(mm::AllocatorError), + Allocator(hal_core::mm::AllocatorError), Hal(hal_core::Error), SetLoggerError(log::SetLoggerError), } @@ -17,8 +17,8 @@ impl From for Error { } } -impl From for Error { - fn from(e: mm::AllocatorError) -> Self { +impl From for Error { + fn from(e: hal_core::mm::AllocatorError) -> Self { Self::Allocator(e) } } diff --git a/kernel/src/executable/elf.rs b/kernel/src/executable/elf.rs index 9481dbe..0b0e882 100644 --- a/kernel/src/executable/elf.rs +++ b/kernel/src/executable/elf.rs @@ -10,7 +10,7 @@ use goblin::elf::program_header::program_header64::ProgramHeader; use goblin::elf::program_header::*; use crate::hal; -use hal_core::mm::{PAddr, PageMap, Permissions, VAddr}; +use hal_core::mm::{PAddr, PageMap, Permissions, VAddr, PageAlloc}; fn align_down(addr: usize, page_size: usize) -> usize { let page_mask = !(page_size - 1); @@ -66,7 +66,7 @@ impl<'a> Elf<'a> { } pub fn load(&self) -> Result<(), Error> { - let pmm = globals::PHYSICAL_MEMORY_MANAGER.lock(|pmm| pmm); + let pmm = globals::PHYSICAL_MEMORY_MANAGER.get(); let page_size = hal::mm::PAGE_SIZE; for segment in self.segments() { @@ -79,7 +79,7 @@ impl<'a> Elf<'a> { let p_memsz = segment.p_memsz as usize; let pages_needed = Self::pages_needed(segment, page_size); - let physical_pages = pmm.alloc_rw_pages(pages_needed).unwrap(); + let physical_pages = pmm.alloc(pages_needed).unwrap(); let virtual_pages = segment.p_paddr as *mut u8; let offset_in_page = (virtual_pages as usize) - align_down(virtual_pages as usize, page_size); @@ -113,10 +113,7 @@ impl<'a> Elf<'a> { VAddr::new(align_down(virtual_pages as usize, page_size) + page_offset), PAddr::new(usize::from(physical_pages) + page_offset), perms, - |count| { - mm::alloc_pages_raw(count) - .expect("failure on page allocator passed during elf loading") - }, + pmm ) .unwrap(); } diff --git a/kernel/src/generic_main.rs b/kernel/src/generic_main.rs index 6f45ca5..b8da410 100644 --- a/kernel/src/generic_main.rs +++ b/kernel/src/generic_main.rs @@ -4,7 +4,7 @@ use super::drivers::Driver; use super::globals; use crate::hal; -use crate::mm::{alloc_pages_for_hal, map_address_space}; +use crate::mm; use crate::tests::{self, TestResult}; @@ -21,12 +21,12 @@ pub fn generic_main(dt: DeviceTree, hacky_devices: &[& globals::PHYSICAL_MEMORY_MANAGER .lock(|pmm| pmm.init_from_device_tree(&dt)) .unwrap(); - map_address_space(&dt, devices).expect("failed to map the addres space"); + mm::map_address_space(&dt, devices).expect("failed to map the addres space"); // Driver stuff // let _drvmgr = DriverManager::with_devices(&dt).unwrap(); - hal::irq::init_irq_chip((), alloc_pages_for_hal).expect("initialization of irq chip failed"); + hal::irq::init_irq_chip((), globals::PHYSICAL_MEMORY_MANAGER.get()).expect("initialization of irq chip failed"); hal::cpu::unmask_interrupts(); diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 46dd840..8a04c99 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -9,6 +9,7 @@ #![feature(const_for)] #![feature(alloc_error_handler)] #![feature(trait_upcasting)] +#![feature(return_position_impl_trait_in_trait)] pub extern crate alloc; @@ -19,7 +20,6 @@ pub mod error; pub use error::Error; pub mod device_tree; -pub mod driver_manager; pub mod executable; pub mod generic_main; pub mod globals; diff --git a/kernel/src/lock.rs b/kernel/src/lock.rs index 497b6d0..92e3d15 100644 --- a/kernel/src/lock.rs +++ b/kernel/src/lock.rs @@ -17,6 +17,10 @@ impl Lock { f(data) } + + pub fn get(&self) -> &'static mut T { + unsafe { &mut *self.data.get() } + } } unsafe impl Send for Lock where T: Sized + Send {} diff --git a/kernel/src/mm/binary_buddy_allocator.rs b/kernel/src/mm/binary_buddy_allocator.rs index 49a941c..3bb013b 100644 --- a/kernel/src/mm/binary_buddy_allocator.rs +++ b/kernel/src/mm/binary_buddy_allocator.rs @@ -1,5 +1,6 @@ use crate::globals; use crate::hal::mm::PAGE_SIZE; +use hal_core::mm::PageAlloc; use core::alloc::{GlobalAlloc, Layout}; @@ -29,7 +30,7 @@ unsafe impl GlobalAlloc for BinaryBuddyAllocator { } else { layout.size() / PAGE_SIZE + 1 }; - pmm.alloc_rw_pages(page_count).unwrap_or(0usize.into()) as *mut u8 + pmm.alloc(page_count).unwrap_or(0usize.into()) as *mut u8 }) } diff --git a/kernel/src/mm/mod.rs b/kernel/src/mm/mod.rs index ef6128c..6ba2175 100644 --- a/kernel/src/mm/mod.rs +++ b/kernel/src/mm/mod.rs @@ -1,5 +1,5 @@ mod physical_memory_manager; -pub use physical_memory_manager::{AllocatorError, PhysicalMemoryManager}; +pub use physical_memory_manager::PhysicalMemoryManager; mod binary_buddy_allocator; @@ -8,7 +8,7 @@ use crate::globals; use crate::hal::{self, mm::PAGE_SIZE}; use crate::Error; -use hal_core::mm::{align_up, PageMap, Permissions, VAddr}; +use hal_core::mm::{align_up, PageMap, Permissions, VAddr, PageAlloc, NullPageAllocator}; use hal_core::AddressRange; use crate::drivers; @@ -75,38 +75,6 @@ fn map_kernel_rwx() -> ( (iter::empty(), iter::empty(), rwx_entries) } -pub fn alloc_pages_raw(count: usize) -> Result { - // If there is a kernel pagetable, identity map the pages. - // Not sure this is the best idea, but I would say it follows the - // principle of least astonishment. - // TODO: remove unwrap - let start = globals::PHYSICAL_MEMORY_MANAGER.lock(|pmm| pmm.alloc_rw_pages(count).unwrap()); - let addr: usize = start.into(); - - if unsafe { globals::STATE.is_mmu_enabled() } { - hal::mm::current().identity_map_range(VAddr::new(addr), count, Permissions::READ | Permissions::WRITE, |_| { - // The mmu is enabled, therefore we already mapped all DRAM into the kernel's pagetable as - // invalid entries. - // Pagetable must only modify existing entries and not allocate. - panic!("alloc_rw_pages: pagetable tried to allocate memory when mapping it's rw_pages") - }).expect("mapping in this case should never fail as illustrated by the comment above..."); - } - - Ok(hal_core::mm::PAddr::new(addr)) -} - -pub fn alloc_pages(count: usize) -> Result<&'static mut [u8], Error> { - let addr = alloc_pages_raw(count)?; - let page_size = hal::mm::PAGE_SIZE; - - Ok(unsafe { slice::from_raw_parts_mut(addr.val as *mut _, count * page_size) }) -} - -pub fn alloc_pages_for_hal(count: usize) -> hal_core::mm::PAddr { - alloc_pages_raw(count) - .expect("page allocation function passed to the hal has failed, critical...") -} - pub fn map_address_space<'a, I: Iterator>( device_tree: &DeviceTree, drivers: I, @@ -116,9 +84,6 @@ pub fn map_address_space<'a, I: Iterator>( let mut rwx_entries = ArrayVec::::new(); let mut pre_allocated_entries = ArrayVec::::new(); - let alloc = - |count| alloc_pages_raw(count).expect("failure on page allocator passed to init_paging"); - // Add entries/descriptors in the pagetable for all of accessible memory regions. // That way in the future, mapping those entries won't require any memory allocations, // just settings the entry to valid and filling up the bits. @@ -170,29 +135,22 @@ pub fn map_address_space<'a, I: Iterator>( rw_entries.into_iter(), rwx_entries.into_iter(), pre_allocated_entries.into_iter(), - alloc, + globals::PHYSICAL_MEMORY_MANAGER.get(), )?; - globals::PHYSICAL_MEMORY_MANAGER.lock(|pmm| { - // All pmm pages are located in DRAM so they are already in the pagetable (they are part of - // the pre_allocated_entries). - // Therefore no allocations will be made. - for page in pmm.metadata_pages() { - hal::mm::current().identity_map( - VAddr::new(page), - Permissions::READ | Permissions::WRITE, - alloc, - ); - } - for page in pmm.allocated_pages() { - log::debug!("pushing rw allocated page 0x{:X}", page); - hal::mm::current().identity_map( - VAddr::new(page), - Permissions::READ | Permissions::WRITE, - alloc, - ); - } - }); + // All pmm pages are located in DRAM so they are already in the pagetable (they are part of + // the pre_allocated_entries). + // Therefore no allocations will be made, pass the NullPageAllocator. + for page in globals::PHYSICAL_MEMORY_MANAGER.get().used_pages() { + log::debug!("pushing rw allocated page 0x{:X}", page); + // let range = AddressRange::new(page..page + PAGE_SIZE); + // rw_entries.try_push(range); + hal::mm::current().identity_map( + VAddr::new(page), + Permissions::READ | Permissions::WRITE, + &mut NullPageAllocator + ); + } hal::mm::enable_paging(); diff --git a/kernel/src/mm/physical_memory_manager.rs b/kernel/src/mm/physical_memory_manager.rs index 5f94a9a..ee682dd 100644 --- a/kernel/src/mm/physical_memory_manager.rs +++ b/kernel/src/mm/physical_memory_manager.rs @@ -5,7 +5,7 @@ use crate::mm; use crate::Error; use core::mem; use hal_core::{ - mm::{PageMap, Permissions, VAddr}, + mm::{PageMap, Permissions, VAddr, PageAlloc, AllocatorError, NullPageAllocator}, AddressRange, }; @@ -52,12 +52,6 @@ impl PhysicalPage { } } -#[derive(Debug)] -pub enum AllocatorError { - NotEnoughMemoryForMetadata, - OutOfMemory, -} - #[derive(Debug)] pub struct PhysicalMemoryManager { metadata: &'static mut [PhysicalPage], @@ -260,6 +254,23 @@ impl PhysicalMemoryManager { Ok(()) } + fn metadata_pages(&self) -> impl core::iter::Iterator { + let metadata_start = (&self.metadata[0] as *const PhysicalPage) as usize; + let metadata_last = + (&self.metadata[self.metadata.len() - 1] as *const PhysicalPage) as usize; + + (metadata_start..=metadata_last).step_by(PAGE_SIZE) + } + + fn allocated_pages(&self) -> impl core::iter::Iterator + '_ { + log::debug!("allocated_pages: PMM metadata addr is 0x{:X}", self.metadata.as_ptr() as u64); + log::debug!("allocated_pages: metadata[254] {:?}", self.metadata[254]); + self.metadata + .iter() + .filter(|page| page.is_allocated()) + .map(|page| page.base) + } + pub fn alloc_pages(&mut self, page_count: usize) -> Result { let mut consecutive_pages: usize = 0; let mut first_page_index: usize = 0; @@ -298,35 +309,37 @@ impl PhysicalMemoryManager { Err(AllocatorError::OutOfMemory) } +} - pub fn alloc_rw_pages(&mut self, page_count: usize) -> Result { + +impl PageAlloc for PhysicalMemoryManager { + fn alloc(&mut self, page_count: usize) -> Result { // If there is a kernel pagetable, identity map the pages. - // Not sure this is the best idea, but I would say it follows the - // principle of least astonishment. let first_page = self.alloc_pages(page_count)?; let addr: usize = first_page.into(); - let our_page = addr == 0x40330000; - if unsafe { globals::STATE.is_mmu_enabled() } { - log::debug!("our_page is mapped before handed off"); - hal::mm::current().identity_map_range(VAddr::new(addr), page_count, Permissions::READ | Permissions::WRITE, |_| { - // The mmu is enabled, therefore we already mapped all DRAM into the kernel's pagetable as - // invalid entries. - // Pagetable must only modify existing entries and not allocate. - panic!("alloc_rw_pages: pagetable tried to allocate memory when mapping it's rw_pages") - }).expect("mapping in this case should never fail as illustrated by the comment above..."); + // The mmu is enabled, therefore we already mapped all DRAM into the kernel's pagetable + // as invalid entries. + // Pagetable must only modify existing entries and not allocate. + hal::mm::current() + .identity_map_range( + VAddr::new(addr), + page_count, + Permissions::READ | Permissions::WRITE, + &mut NullPageAllocator, + ); } Ok(addr) } - pub fn metadata_pages(&self) -> impl core::iter::Iterator { - let metadata_start = (&self.metadata[0] as *const PhysicalPage) as usize; - let metadata_last = - (&self.metadata[self.metadata.len() - 1] as *const PhysicalPage) as usize; - - (metadata_start..=metadata_last).step_by(PAGE_SIZE) + fn dealloc(&mut self, _base: usize, _page_count: usize) -> Result<(), AllocatorError> { + // TODO: + // - if MMU is on, unmap the page + // - set as free + log::warn!("PMM dealloc not yet implemented..."); + Ok(()) } pub fn allocated_pages(&self) -> impl core::iter::Iterator + '_ { @@ -340,6 +353,9 @@ impl PhysicalMemoryManager { .filter(|page| page.is_allocated()) .map(|page| page.base) } + fn used_pages(&self) -> impl Iterator + '_ { + self.metadata_pages().chain(self.allocated_pages()) + } } #[cfg(test)] diff --git a/kernel/src/tests.rs b/kernel/src/tests.rs index f8c8142..8a144ab 100644 --- a/kernel/src/tests.rs +++ b/kernel/src/tests.rs @@ -3,10 +3,10 @@ use log::{debug, info, trace}; use core::slice; use core::sync::atomic::{AtomicUsize, Ordering}; +use crate::globals; use crate::executable::elf::Elf; -use crate::hal; -use crate::mm::{alloc_pages, alloc_pages_for_hal}; -use hal_core::mm::{PageMap, Permissions}; +use crate::hal::{self, mm::PAGE_SIZE}; +use hal_core::mm::{PageAlloc, PageMap, Permissions}; use align_data::include_aligned; use align_data::Align4K; @@ -97,12 +97,14 @@ fn test_timer_interrupt() -> TestResult { fn test_pagetable_remap() -> TestResult { info!("Testing the remapping capabilities of our pagetable..."); - let page_src = alloc_pages(1).unwrap(); + let page_src = globals::PHYSICAL_MEMORY_MANAGER.get().alloc(1).unwrap(); + let page_src = unsafe { slice::from_raw_parts_mut(page_src as *mut u8, PAGE_SIZE) }; let dst_addr = 0x0450_0000; let page_dst = unsafe { slice::from_raw_parts(dst_addr as *const u8, hal::mm::PAGE_SIZE) }; let deadbeef = [0xDE, 0xAD, 0xBE, 0xEF]; // Put data in source page + page_src[0..deadbeef.len()].copy_from_slice(&deadbeef); // Remap source page to destination page @@ -111,7 +113,7 @@ fn test_pagetable_remap() -> TestResult { hal_core::mm::VAddr::new(dst_addr), hal_core::mm::PAddr::new(page_src.as_ptr() as usize), Permissions::READ | Permissions::WRITE, - alloc_pages_for_hal, + globals::PHYSICAL_MEMORY_MANAGER.get() ) .unwrap(); From ac94f6437d2c040496e74d78886f9aa5e499a969 Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Thu, 14 Sep 2023 21:06:36 +0200 Subject: [PATCH 5/7] pmm: make it thread-safe Thanks to the previous patch, it was fairly easy to just make the PMM thread-safe. It also makes interfaces a bit nicer imo. Lastly I did it because it was easy and I felt like it. --- hal_aarch64/src/irq.rs | 2 +- hal_aarch64/src/mm/mod.rs | 2 +- hal_aarch64/src/mm/pgt48.rs | 10 ++- hal_core/src/mm.rs | 31 +++++---- kernel/src/executable/elf.rs | 9 +-- kernel/src/generic_main.rs | 5 +- kernel/src/globals.rs | 3 +- kernel/src/mm/binary_buddy_allocator.rs | 16 ++--- kernel/src/mm/mod.rs | 10 +-- kernel/src/mm/physical_memory_manager.rs | 86 +++++++++++------------- kernel/src/tests.rs | 6 +- 11 files changed, 87 insertions(+), 93 deletions(-) diff --git a/hal_aarch64/src/irq.rs b/hal_aarch64/src/irq.rs index c7da54c..412024f 100644 --- a/hal_aarch64/src/irq.rs +++ b/hal_aarch64/src/irq.rs @@ -64,7 +64,7 @@ impl IrqChip { static mut IRQ_CHIP: IrqChip = IrqChip::NoChip; -pub fn init_irq_chip(_dt_node: (), allocator: &mut impl PageAlloc) -> Result<(), Error> { +pub fn init_irq_chip(_dt_node: (), allocator: &impl PageAlloc) -> Result<(), Error> { let (gicd_base, gicc_base) = (0x800_0000, 0x801_0000); mm::current().identity_map_range( VAddr::new(gicd_base), diff --git a/hal_aarch64/src/mm/mod.rs b/hal_aarch64/src/mm/mod.rs index a0076bd..edccd42 100644 --- a/hal_aarch64/src/mm/mod.rs +++ b/hal_aarch64/src/mm/mod.rs @@ -32,7 +32,7 @@ pub fn prefill_pagetable( rw: impl Iterator, rwx: impl Iterator, pre_allocated: impl Iterator, - allocator: &mut impl PageAlloc, + allocator: &impl PageAlloc, ) -> Result<(), Error> { let pt = hal_core::mm::prefill_pagetable::(r, rw, rwx, pre_allocated, allocator)?; diff --git a/hal_aarch64/src/mm/pgt48.rs b/hal_aarch64/src/mm/pgt48.rs index 774ec2e..ae94d89 100644 --- a/hal_aarch64/src/mm/pgt48.rs +++ b/hal_aarch64/src/mm/pgt48.rs @@ -224,7 +224,7 @@ impl PageMap for PageTable { const PAGE_SIZE: usize = 4096; type Entry = TableEntry; - fn new(allocator: &mut impl PageAlloc) -> Result<&'static mut Self, Error> { + fn new(allocator: &impl PageAlloc) -> Result<&'static mut Self, Error> { let page = allocator.alloc(1)?; let page_table = unsafe { page as *mut PageTable }; // Safety: the PMM gave us the memory, it should be a valid pointer. @@ -243,7 +243,7 @@ impl PageMap for PageTable { va: mm::VAddr, pa: mm::PAddr, perms: Permissions, - allocator: &mut impl PageAlloc, + allocator: &impl PageAlloc, ) -> Result<&mut TableEntry, Error> { let va = VAddr::from(va); let pa = PAddr::from(pa); @@ -276,7 +276,11 @@ impl PageMap for PageTable { unreachable!("We should have reached lvl 3 and returned by now..."); } - fn add_invalid_entry(&mut self, va: mm::VAddr, allocator: &mut impl PageAlloc) -> Result<(), Error> { + fn add_invalid_entry( + &mut self, + va: mm::VAddr, + allocator: &impl PageAlloc, + ) -> Result<(), Error> { let entry = self.map( va, mm::PAddr { diff --git a/hal_core/src/mm.rs b/hal_core/src/mm.rs index bc0f32d..3d9d876 100644 --- a/hal_core/src/mm.rs +++ b/hal_core/src/mm.rs @@ -57,26 +57,25 @@ pub enum AllocatorError { OutOfMemory, } -pub trait PageAlloc { - fn alloc(&mut self, page_count: usize) -> Result; - fn dealloc(&mut self, base: usize, page_count: usize) -> Result<(), AllocatorError>; - fn used_pages(&self) -> impl Iterator + '_; +pub trait PageAlloc: Sync { + fn alloc(&self, page_count: usize) -> Result; + fn dealloc(&self, base: usize, page_count: usize) -> Result<(), AllocatorError>; + fn used_pages(&self, f: F); } pub struct NullPageAllocator; impl PageAlloc for NullPageAllocator { - fn alloc(&mut self, page_count: usize) -> Result { + fn alloc(&self, _page_count: usize) -> Result { panic!("the null page allocator mustn't allocate"); } - fn dealloc(&mut self, base: usize, page_count: usize) -> Result<(), AllocatorError> { + fn dealloc(&self, _base: usize, _page_count: usize) -> Result<(), AllocatorError> { panic!("the null page allocator cannot deallocate"); } - fn used_pages(&self) -> impl Iterator + '_ { + fn used_pages(&self, _f: F) { panic!("obviously the null allocator has no pages that are in use"); - core::iter::empty() } } @@ -88,17 +87,17 @@ pub trait PageMap { const PAGE_SIZE: usize; type Entry: PageEntry; - fn new(allocator: &mut impl PageAlloc) -> Result<&'static mut Self, Error>; + fn new(allocator: &impl PageAlloc) -> Result<&'static mut Self, Error>; fn map( &mut self, va: VAddr, pa: PAddr, perms: Permissions, - allocator: &mut impl PageAlloc, + allocator: &impl PageAlloc, ) -> Result<&mut Self::Entry, Error>; - fn add_invalid_entry(&mut self, va: VAddr, allocator: &mut impl PageAlloc) -> Result<(), Error> { + fn add_invalid_entry(&mut self, va: VAddr, allocator: &impl PageAlloc) -> Result<(), Error> { self.map( va, PAddr::new(0x0A0A_0A0A_0A0A_0A0A), @@ -114,7 +113,7 @@ pub trait PageMap { &mut self, addr: VAddr, perms: Permissions, - allocator: &mut impl PageAlloc, + allocator: &impl PageAlloc, ) -> Result<(), Error> { self.map(addr, PAddr::new(addr.val), perms, allocator) .map(|_| ()) @@ -125,7 +124,7 @@ pub trait PageMap { addr: VAddr, page_count: usize, perms: Permissions, - allocator: &mut impl PageAlloc, + allocator: &impl PageAlloc, ) -> Result<(), Error> { let start = addr.val; for i in 0..page_count { @@ -138,7 +137,7 @@ pub trait PageMap { fn add_invalid_entries( &mut self, range: AddressRange, - allocator: &mut impl PageAlloc, + allocator: &impl PageAlloc, ) -> Result<(), Error> { for page in range.iter_pages(Self::PAGE_SIZE) { self.add_invalid_entry(VAddr::new(page), allocator)?; @@ -151,7 +150,7 @@ pub trait PageMap { &mut self, range: AddressRange, perms: Permissions, - allocator: &mut impl PageAlloc, + allocator: &impl PageAlloc, ) -> Result<(), Error> { for page in range.iter_pages(Self::PAGE_SIZE) { self.identity_map(VAddr::new(page), perms, allocator)?; @@ -176,7 +175,7 @@ pub fn prefill_pagetable( rw: impl Iterator, rwx: impl Iterator, pre_allocated: impl Iterator, - allocator: &mut impl PageAlloc, + allocator: &impl PageAlloc, ) -> Result<&'static mut P, Error> { trace!("hal_core::mm::prefill_pagetable"); let pt: &'static mut P = P::new(allocator)?; diff --git a/kernel/src/executable/elf.rs b/kernel/src/executable/elf.rs index 0b0e882..e93eb92 100644 --- a/kernel/src/executable/elf.rs +++ b/kernel/src/executable/elf.rs @@ -10,7 +10,7 @@ use goblin::elf::program_header::program_header64::ProgramHeader; use goblin::elf::program_header::*; use crate::hal; -use hal_core::mm::{PAddr, PageMap, Permissions, VAddr, PageAlloc}; +use hal_core::mm::{PAddr, PageAlloc, PageMap, Permissions, VAddr}; fn align_down(addr: usize, page_size: usize) -> usize { let page_mask = !(page_size - 1); @@ -66,7 +66,6 @@ impl<'a> Elf<'a> { } pub fn load(&self) -> Result<(), Error> { - let pmm = globals::PHYSICAL_MEMORY_MANAGER.get(); let page_size = hal::mm::PAGE_SIZE; for segment in self.segments() { @@ -79,7 +78,9 @@ impl<'a> Elf<'a> { let p_memsz = segment.p_memsz as usize; let pages_needed = Self::pages_needed(segment, page_size); - let physical_pages = pmm.alloc(pages_needed).unwrap(); + let physical_pages = globals::PHYSICAL_MEMORY_MANAGER + .alloc(pages_needed) + .unwrap(); let virtual_pages = segment.p_paddr as *mut u8; let offset_in_page = (virtual_pages as usize) - align_down(virtual_pages as usize, page_size); @@ -113,7 +114,7 @@ impl<'a> Elf<'a> { VAddr::new(align_down(virtual_pages as usize, page_size) + page_offset), PAddr::new(usize::from(physical_pages) + page_offset), perms, - pmm + &globals::PHYSICAL_MEMORY_MANAGER, ) .unwrap(); } diff --git a/kernel/src/generic_main.rs b/kernel/src/generic_main.rs index b8da410..d410e6f 100644 --- a/kernel/src/generic_main.rs +++ b/kernel/src/generic_main.rs @@ -19,14 +19,15 @@ pub fn generic_main(dt: DeviceTree, hacky_devices: &[& // Memory init globals::PHYSICAL_MEMORY_MANAGER - .lock(|pmm| pmm.init_from_device_tree(&dt)) + .init_from_device_tree(&dt) .unwrap(); mm::map_address_space(&dt, devices).expect("failed to map the addres space"); // Driver stuff // let _drvmgr = DriverManager::with_devices(&dt).unwrap(); - hal::irq::init_irq_chip((), globals::PHYSICAL_MEMORY_MANAGER.get()).expect("initialization of irq chip failed"); + hal::irq::init_irq_chip((), &globals::PHYSICAL_MEMORY_MANAGER) + .expect("initialization of irq chip failed"); hal::cpu::unmask_interrupts(); diff --git a/kernel/src/globals.rs b/kernel/src/globals.rs index f6f631b..f2363e2 100644 --- a/kernel/src/globals.rs +++ b/kernel/src/globals.rs @@ -2,8 +2,7 @@ use crate::lock::Lock; use crate::mm; -pub static PHYSICAL_MEMORY_MANAGER: Lock = - Lock::new(mm::PhysicalMemoryManager::new()); +pub static PHYSICAL_MEMORY_MANAGER: mm::PhysicalMemoryManager = mm::PhysicalMemoryManager::new(); pub enum KernelState { EarlyInit, diff --git a/kernel/src/mm/binary_buddy_allocator.rs b/kernel/src/mm/binary_buddy_allocator.rs index 3bb013b..c31da79 100644 --- a/kernel/src/mm/binary_buddy_allocator.rs +++ b/kernel/src/mm/binary_buddy_allocator.rs @@ -24,14 +24,14 @@ unsafe impl GlobalAlloc for BinaryBuddyAllocator { // - be thread-safe // - disable interrupts when entering, then re-enable - globals::PHYSICAL_MEMORY_MANAGER.lock(|pmm| { - let page_count = if layout.size() <= PAGE_SIZE { - 1 - } else { - layout.size() / PAGE_SIZE + 1 - }; - pmm.alloc(page_count).unwrap_or(0usize.into()) as *mut u8 - }) + let page_count = if layout.size() <= PAGE_SIZE { + 1 + } else { + layout.size() / PAGE_SIZE + 1 + }; + globals::PHYSICAL_MEMORY_MANAGER + .alloc(page_count) + .unwrap_or(0usize.into()) as *mut u8 } unsafe fn dealloc(&self, _: *mut u8, _: Layout) { diff --git a/kernel/src/mm/mod.rs b/kernel/src/mm/mod.rs index 6ba2175..0553667 100644 --- a/kernel/src/mm/mod.rs +++ b/kernel/src/mm/mod.rs @@ -8,7 +8,7 @@ use crate::globals; use crate::hal::{self, mm::PAGE_SIZE}; use crate::Error; -use hal_core::mm::{align_up, PageMap, Permissions, VAddr, PageAlloc, NullPageAllocator}; +use hal_core::mm::{align_up, NullPageAllocator, PageAlloc, PageMap, Permissions, VAddr}; use hal_core::AddressRange; use crate::drivers; @@ -135,22 +135,22 @@ pub fn map_address_space<'a, I: Iterator>( rw_entries.into_iter(), rwx_entries.into_iter(), pre_allocated_entries.into_iter(), - globals::PHYSICAL_MEMORY_MANAGER.get(), + &globals::PHYSICAL_MEMORY_MANAGER, )?; // All pmm pages are located in DRAM so they are already in the pagetable (they are part of // the pre_allocated_entries). // Therefore no allocations will be made, pass the NullPageAllocator. - for page in globals::PHYSICAL_MEMORY_MANAGER.get().used_pages() { + globals::PHYSICAL_MEMORY_MANAGER.used_pages(|page| { log::debug!("pushing rw allocated page 0x{:X}", page); // let range = AddressRange::new(page..page + PAGE_SIZE); // rw_entries.try_push(range); hal::mm::current().identity_map( VAddr::new(page), Permissions::READ | Permissions::WRITE, - &mut NullPageAllocator + &mut NullPageAllocator, ); - } + }); hal::mm::enable_paging(); diff --git a/kernel/src/mm/physical_memory_manager.rs b/kernel/src/mm/physical_memory_manager.rs index ee682dd..0329de8 100644 --- a/kernel/src/mm/physical_memory_manager.rs +++ b/kernel/src/mm/physical_memory_manager.rs @@ -5,13 +5,14 @@ use crate::mm; use crate::Error; use core::mem; use hal_core::{ - mm::{PageMap, Permissions, VAddr, PageAlloc, AllocatorError, NullPageAllocator}, + mm::{AllocatorError, NullPageAllocator, PageAlloc, PageMap, Permissions, VAddr}, AddressRange, }; use hal::mm::PAGE_SIZE; use log::debug; +use spin::mutex::Mutex; #[derive(Debug, PartialEq, Eq)] pub enum PageKind { @@ -54,7 +55,7 @@ impl PhysicalPage { #[derive(Debug)] pub struct PhysicalMemoryManager { - metadata: &'static mut [PhysicalPage], + metadata: Mutex<&'static mut [PhysicalPage]>, } impl PhysicalMemoryManager { @@ -194,14 +195,13 @@ impl PhysicalMemoryManager { ) }; - Self { metadata } + Self { + metadata: Mutex::new(metadata), + } } /// Initialize a [`PageAllocator`] from the device tree. - pub fn init_from_device_tree( - &mut self, - device_tree: &DeviceTree, - ) -> Result<(), AllocatorError> { + pub fn init_from_device_tree(&self, device_tree: &DeviceTree) -> Result<(), AllocatorError> { let available_regions = Self::available_memory_regions::<10>(device_tree); assert!( @@ -248,35 +248,27 @@ impl PhysicalMemoryManager { } assert!(count == page_count); - self.metadata = metadata; - log::debug!("PMM metadata addr is 0x{:X}", self.metadata.as_ptr() as u64); + *self.metadata.lock() = metadata; Ok(()) } fn metadata_pages(&self) -> impl core::iter::Iterator { - let metadata_start = (&self.metadata[0] as *const PhysicalPage) as usize; - let metadata_last = - (&self.metadata[self.metadata.len() - 1] as *const PhysicalPage) as usize; + let metadata = self.metadata.lock(); + let metadata_start = (&metadata[0] as *const PhysicalPage) as usize; + let metadata_last = (&metadata[metadata.len() - 1] as *const PhysicalPage) as usize; (metadata_start..=metadata_last).step_by(PAGE_SIZE) } - fn allocated_pages(&self) -> impl core::iter::Iterator + '_ { - log::debug!("allocated_pages: PMM metadata addr is 0x{:X}", self.metadata.as_ptr() as u64); - log::debug!("allocated_pages: metadata[254] {:?}", self.metadata[254]); - self.metadata - .iter() - .filter(|page| page.is_allocated()) - .map(|page| page.base) - } - - pub fn alloc_pages(&mut self, page_count: usize) -> Result { + pub fn alloc_pages(&self, page_count: usize) -> Result { let mut consecutive_pages: usize = 0; let mut first_page_index: usize = 0; let mut last_page_base: usize = 0; - for (i, page) in self.metadata.iter().enumerate() { + let mut metadata = self.metadata.lock(); + + for (i, page) in metadata.iter().enumerate() { if consecutive_pages == 0 { first_page_index = i; last_page_base = page.base; @@ -296,14 +288,14 @@ impl PhysicalMemoryManager { last_page_base = page.base; if consecutive_pages == page_count { - self.metadata[first_page_index..=i] + metadata[first_page_index..=i] .iter_mut() .for_each(|page| page.set_allocated()); - self.metadata[i].set_last(); + metadata[i].set_last(); - let addr = self.metadata[first_page_index].base; + let addr = metadata[first_page_index].base; - return Ok(self.metadata[first_page_index].base); + return Ok(metadata[first_page_index].base); } } @@ -311,9 +303,8 @@ impl PhysicalMemoryManager { } } - impl PageAlloc for PhysicalMemoryManager { - fn alloc(&mut self, page_count: usize) -> Result { + fn alloc(&self, page_count: usize) -> Result { // If there is a kernel pagetable, identity map the pages. let first_page = self.alloc_pages(page_count)?; let addr: usize = first_page.into(); @@ -322,19 +313,18 @@ impl PageAlloc for PhysicalMemoryManager { // The mmu is enabled, therefore we already mapped all DRAM into the kernel's pagetable // as invalid entries. // Pagetable must only modify existing entries and not allocate. - hal::mm::current() - .identity_map_range( - VAddr::new(addr), - page_count, - Permissions::READ | Permissions::WRITE, - &mut NullPageAllocator, - ); + hal::mm::current().identity_map_range( + VAddr::new(addr), + page_count, + Permissions::READ | Permissions::WRITE, + &mut NullPageAllocator, + ); } Ok(addr) } - fn dealloc(&mut self, _base: usize, _page_count: usize) -> Result<(), AllocatorError> { + fn dealloc(&self, _base: usize, _page_count: usize) -> Result<(), AllocatorError> { // TODO: // - if MMU is on, unmap the page // - set as free @@ -342,19 +332,19 @@ impl PageAlloc for PhysicalMemoryManager { Ok(()) } - pub fn allocated_pages(&self) -> impl core::iter::Iterator + '_ { - log::debug!( - "allocated_pages: PMM metadata addr is 0x{:X}", - self.metadata.as_ptr() as u64 - ); - log::debug!("allocated_pages: metadata[254] {:?}", self.metadata[254]); - self.metadata + fn used_pages(&self, f: F) { + let metadata = self.metadata.lock(); + + let metadata_start = (&metadata[0] as *const PhysicalPage) as usize; + let metadata_last = (&metadata[metadata.len() - 1] as *const PhysicalPage) as usize; + + let metadata_pages = (metadata_start..=metadata_last).step_by(PAGE_SIZE); + let allocated_pages = metadata .iter() .filter(|page| page.is_allocated()) - .map(|page| page.base) - } - fn used_pages(&self) -> impl Iterator + '_ { - self.metadata_pages().chain(self.allocated_pages()) + .map(|page| page.base); + + metadata_pages.chain(allocated_pages).for_each(f); } } diff --git a/kernel/src/tests.rs b/kernel/src/tests.rs index 8a144ab..9ee009d 100644 --- a/kernel/src/tests.rs +++ b/kernel/src/tests.rs @@ -3,8 +3,8 @@ use log::{debug, info, trace}; use core::slice; use core::sync::atomic::{AtomicUsize, Ordering}; -use crate::globals; use crate::executable::elf::Elf; +use crate::globals; use crate::hal::{self, mm::PAGE_SIZE}; use hal_core::mm::{PageAlloc, PageMap, Permissions}; @@ -97,7 +97,7 @@ fn test_timer_interrupt() -> TestResult { fn test_pagetable_remap() -> TestResult { info!("Testing the remapping capabilities of our pagetable..."); - let page_src = globals::PHYSICAL_MEMORY_MANAGER.get().alloc(1).unwrap(); + let page_src = globals::PHYSICAL_MEMORY_MANAGER.alloc(1).unwrap(); let page_src = unsafe { slice::from_raw_parts_mut(page_src as *mut u8, PAGE_SIZE) }; let dst_addr = 0x0450_0000; let page_dst = unsafe { slice::from_raw_parts(dst_addr as *const u8, hal::mm::PAGE_SIZE) }; @@ -113,7 +113,7 @@ fn test_pagetable_remap() -> TestResult { hal_core::mm::VAddr::new(dst_addr), hal_core::mm::PAddr::new(page_src.as_ptr() as usize), Permissions::READ | Permissions::WRITE, - globals::PHYSICAL_MEMORY_MANAGER.get() + &globals::PHYSICAL_MEMORY_MANAGER, ) .unwrap(); From 7e73956860185e8267219cbbd59b3c1a73ceef30 Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Wed, 29 Nov 2023 20:32:47 +0000 Subject: [PATCH 6/7] refactor(PageAlloc trait): add riscv64 support --- hal_aarch64/src/mm/mod.rs | 2 -- hal_riscv64/src/irq.rs | 6 +++--- hal_riscv64/src/mm/mod.rs | 36 +++++++++++++++++++++--------------- hal_riscv64/src/mm/sv39.rs | 20 +++++++++++--------- 4 files changed, 35 insertions(+), 29 deletions(-) diff --git a/hal_aarch64/src/mm/mod.rs b/hal_aarch64/src/mm/mod.rs index edccd42..7b7ed3a 100644 --- a/hal_aarch64/src/mm/mod.rs +++ b/hal_aarch64/src/mm/mod.rs @@ -50,8 +50,6 @@ pub fn enable_paging() { unsafe { load_pagetable(current()); }; - - log::trace!("hal_core::mm::init_paging finished"); } unsafe fn load_pagetable(pt: &'static mut PageTable) { diff --git a/hal_riscv64/src/irq.rs b/hal_riscv64/src/irq.rs index c66055b..37a6dc9 100644 --- a/hal_riscv64/src/irq.rs +++ b/hal_riscv64/src/irq.rs @@ -1,5 +1,5 @@ use hal_core::{ - mm::{PageAllocFn, PageMap, Permissions, VAddr}, + mm::{PageAlloc, PageMap, Permissions, VAddr}, Error, TimerCallbackFn, }; @@ -20,7 +20,7 @@ pub fn init_exception_handlers() { static mut IRQ_CHIP: Option = None; -pub fn init_irq_chip(_dt_node: (), alloc: PageAllocFn) -> Result<(), Error> { +pub fn init_irq_chip(_dt_node: (), allocator: &impl PageAlloc) -> Result<(), Error> { // TODO map the dt_node let base = 0xc000000; let max_offset = 0x3FFFFFC; @@ -29,7 +29,7 @@ pub fn init_irq_chip(_dt_node: (), alloc: PageAllocFn) -> Result<(), Error> { VAddr::new(base), max_offset / mm::PAGE_SIZE + 1, Permissions::READ | Permissions::WRITE, - alloc, + allocator, )?; unsafe { IRQ_CHIP = Some(Plic::new(base)); diff --git a/hal_riscv64/src/mm/mod.rs b/hal_riscv64/src/mm/mod.rs index 9e570d7..dee6d33 100644 --- a/hal_riscv64/src/mm/mod.rs +++ b/hal_riscv64/src/mm/mod.rs @@ -1,8 +1,7 @@ use core::arch::asm; use core::cell::OnceCell; - use hal_core::{ - mm::{self, PageAllocFn, PageMap}, + mm::{self, PageAlloc, PageMap}, AddressRange, Error, }; @@ -17,28 +16,31 @@ pub fn current() -> &'static mut PageTable { unsafe { *GPT.get_mut().unwrap() } } -pub fn init_paging( +pub fn prefill_pagetable( r: impl Iterator, rw: impl Iterator, rwx: impl Iterator, pre_allocated: impl Iterator, - alloc: PageAllocFn, + allocator: &impl PageAlloc, ) -> Result<(), Error> { - hal_core::mm::init_paging::(r, rw, rwx, pre_allocated, alloc, |pt| { - // TODO: put into into the hal_core::Error - unsafe { - if GPT.set(pt).is_err() { - panic!("GPT is already set ?"); - } - }; - unsafe { - load_pagetable(current()); - }; - })?; + let pt = hal_core::mm::prefill_pagetable::(r, rw, rwx, pre_allocated, allocator)?; + + // TODO: put into into the hal_core::Error + unsafe { + if GPT.set(pt).is_err() { + panic!("GPT is already set ?"); + } + }; Ok(()) } +pub fn enable_paging() { + unsafe { + load_pagetable(current()); + } +} + unsafe fn load_pagetable(pt: &'static mut PageTable) { let pt_addr = pt as *mut PageTable as usize; let ppn = pt_addr >> 12; @@ -51,6 +53,10 @@ unsafe fn load_pagetable(pt: &'static mut PageTable) { } } +pub fn align_down(addr: usize) -> usize { + mm::align_down(addr, PageTable::PAGE_SIZE) +} + pub fn align_up(addr: usize) -> usize { mm::align_up(addr, PageTable::PAGE_SIZE) } diff --git a/hal_riscv64/src/mm/sv39.rs b/hal_riscv64/src/mm/sv39.rs index 89251ae..f6cb9a2 100644 --- a/hal_riscv64/src/mm/sv39.rs +++ b/hal_riscv64/src/mm/sv39.rs @@ -1,6 +1,6 @@ use modular_bitfield::{bitfield, prelude::*}; -use hal_core::mm::{self, PageAllocFn, PageEntry, PageMap}; +use hal_core::mm::{self, PageAlloc, PageEntry, PageMap}; use hal_core::Error; #[repr(C)] @@ -167,14 +167,16 @@ impl PageMap for PageTable { const PAGE_SIZE: usize = 4096; type Entry = PageTableEntry; - fn new(alloc: PageAllocFn) -> Result<&'static mut Self, Error> { - let page = alloc(1); - - let page_table = page.ptr_cast::(); + fn new(allocator: &impl PageAlloc) -> Result<&'static mut Self, Error> { + let page = allocator.alloc(1)?; + let page_table = unsafe { page as *mut PageTable }; // Safety: the PMM gave us the memory, it should be a valid pointer. - let page_table = unsafe { page_table.as_mut().unwrap() }; + let page_table: &mut PageTable = unsafe { page_table.as_mut().unwrap() }; - page_table.entries.iter_mut().for_each(|pte| pte.clear()); + page_table + .entries + .iter_mut() + .for_each(|pte| pte.set_invalid()); Ok(page_table) } @@ -184,7 +186,7 @@ impl PageMap for PageTable { va: mm::VAddr, pa: mm::PAddr, perms: mm::Permissions, - alloc: PageAllocFn, + allocator: &impl PageAlloc, ) -> Result<&mut Self::Entry, Error> { let paddr: PAddr = pa.into(); let vaddr: VAddr = va.into(); @@ -207,7 +209,7 @@ impl PageMap for PageTable { // If the entry is not valid we will need to allocate a new PageTable if !pte.is_valid() { - let new_page_table = PageTable::new(alloc); + let new_page_table = PageTable::new(allocator); // Set new PageTable as target of this entry pte.set_target(new_page_table? as *mut PageTable); From 72db5f424de9c3a2722ded05fb9e3d7ba836ded2 Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Fri, 1 Dec 2023 18:37:34 +0100 Subject: [PATCH 7/7] cleanup: apply clippy fixes --- hal_aarch64/src/irq.rs | 9 ++++---- hal_aarch64/src/mm/pgt48.rs | 2 +- hal_core/src/mm.rs | 1 - hal_riscv64/src/irq.rs | 7 +++--- hal_riscv64/src/lib.rs | 4 +--- hal_riscv64/src/mm/mod.rs | 2 +- hal_riscv64/src/mm/sv39.rs | 6 +---- kernel/src/device_tree.rs | 4 +--- kernel/src/drivers/mod.rs | 5 +---- kernel/src/error.rs | 2 -- kernel/src/executable/elf.rs | 9 +++----- kernel/src/generic_main.rs | 2 +- kernel/src/globals.rs | 2 -- kernel/src/lib.rs | 1 - kernel/src/lock.rs | 27 ----------------------- kernel/src/mm/binary_buddy_allocator.rs | 2 +- kernel/src/mm/mod.rs | 21 +++++++++--------- kernel/src/mm/physical_memory_manager.rs | 28 ++++++++---------------- 18 files changed, 38 insertions(+), 96 deletions(-) delete mode 100644 kernel/src/lock.rs diff --git a/hal_aarch64/src/irq.rs b/hal_aarch64/src/irq.rs index 412024f..10a99a1 100644 --- a/hal_aarch64/src/irq.rs +++ b/hal_aarch64/src/irq.rs @@ -103,11 +103,12 @@ extern "C" fn irq_current_el_sp0() { // Clear the timer in order to EOI it. cpu::clear_physical_timer(); - let timer_ptr = TIMER_CALLBACK.load(Ordering::Relaxed); - if !timer_ptr.is_null() { + let timer_cb = TIMER_CALLBACK.load(Ordering::Relaxed); + if !timer_cb.is_null() { unsafe { - let timer: fn() = core::mem::transmute::<_, fn()>(timer_ptr); - timer(); + // Cannot simply dereference TIMER_CALLBACK here. + // We are using an AtomicPtr and TIMER_CALLBACK already holds the fn(). + core::mem::transmute::<_, fn()>(timer_cb)(); } } diff --git a/hal_aarch64/src/mm/pgt48.rs b/hal_aarch64/src/mm/pgt48.rs index ae94d89..eb43729 100644 --- a/hal_aarch64/src/mm/pgt48.rs +++ b/hal_aarch64/src/mm/pgt48.rs @@ -226,7 +226,7 @@ impl PageMap for PageTable { fn new(allocator: &impl PageAlloc) -> Result<&'static mut Self, Error> { let page = allocator.alloc(1)?; - let page_table = unsafe { page as *mut PageTable }; + let page_table = page as *mut PageTable; // Safety: the PMM gave us the memory, it should be a valid pointer. let page_table: &mut PageTable = unsafe { page_table.as_mut().unwrap() }; diff --git a/hal_core/src/mm.rs b/hal_core/src/mm.rs index 3d9d876..f8cb6d8 100644 --- a/hal_core/src/mm.rs +++ b/hal_core/src/mm.rs @@ -179,7 +179,6 @@ pub fn prefill_pagetable( ) -> Result<&'static mut P, Error> { trace!("hal_core::mm::prefill_pagetable"); let pt: &'static mut P = P::new(allocator)?; - let page_size = P::PAGE_SIZE; for range in pre_allocated { pt.add_invalid_entries(range, allocator)?; diff --git a/hal_riscv64/src/irq.rs b/hal_riscv64/src/irq.rs index 37a6dc9..9e2a4e6 100644 --- a/hal_riscv64/src/irq.rs +++ b/hal_riscv64/src/irq.rs @@ -202,11 +202,10 @@ extern "C" fn undefined_handler() { } extern "C" fn timer_handler() { - let timer_ptr = TIMER_CALLBACK.load(Ordering::Relaxed); - if !timer_ptr.is_null() { + let timer_cb = TIMER_CALLBACK.load(Ordering::Relaxed); + if !timer_cb.is_null() { unsafe { - let timer: fn() = core::mem::transmute::<_, fn()>(timer_ptr); - timer(); + core::mem::transmute::<_, fn()>(timer_cb)(); } } } diff --git a/hal_riscv64/src/lib.rs b/hal_riscv64/src/lib.rs index 84dfa30..b83db9c 100644 --- a/hal_riscv64/src/lib.rs +++ b/hal_riscv64/src/lib.rs @@ -10,9 +10,7 @@ mod registers; use core::arch::asm; -pub fn panic_info() -> () { - () -} +pub fn panic_info() {} #[naked] #[no_mangle] diff --git a/hal_riscv64/src/mm/mod.rs b/hal_riscv64/src/mm/mod.rs index dee6d33..0da7729 100644 --- a/hal_riscv64/src/mm/mod.rs +++ b/hal_riscv64/src/mm/mod.rs @@ -13,7 +13,7 @@ pub const PAGE_SIZE: usize = PageTable::PAGE_SIZE; static mut GPT: OnceCell<&'static mut PageTable> = OnceCell::new(); pub fn current() -> &'static mut PageTable { - unsafe { *GPT.get_mut().unwrap() } + unsafe { GPT.get_mut().unwrap() } } pub fn prefill_pagetable( diff --git a/hal_riscv64/src/mm/sv39.rs b/hal_riscv64/src/mm/sv39.rs index f6cb9a2..65aaeb5 100644 --- a/hal_riscv64/src/mm/sv39.rs +++ b/hal_riscv64/src/mm/sv39.rs @@ -115,10 +115,6 @@ pub struct PageTableEntry { } impl PageTableEntry { - fn clear(&mut self) { - *self = PageTableEntry::from_bytes([0u8; 8]) - } - fn is_valid(&self) -> bool { self.v() == 1 } @@ -169,7 +165,7 @@ impl PageMap for PageTable { fn new(allocator: &impl PageAlloc) -> Result<&'static mut Self, Error> { let page = allocator.alloc(1)?; - let page_table = unsafe { page as *mut PageTable }; + let page_table = page as *mut PageTable; // Safety: the PMM gave us the memory, it should be a valid pointer. let page_table: &mut PageTable = unsafe { page_table.as_mut().unwrap() }; diff --git a/kernel/src/device_tree.rs b/kernel/src/device_tree.rs index df0515e..994999a 100644 --- a/kernel/src/device_tree.rs +++ b/kernel/src/device_tree.rs @@ -76,8 +76,6 @@ impl DeviceTree { // The heuristic, the root irq chip doesn't have a reg property. // Works on aarch64 and riscv64. - let interrupt_controller = interrupt_controllers.find(|intc| intc.reg().is_some()); - - interrupt_controller + interrupt_controllers.find(|intc| intc.reg().is_some()) } } diff --git a/kernel/src/drivers/mod.rs b/kernel/src/drivers/mod.rs index 4bd1945..4de8f6e 100644 --- a/kernel/src/drivers/mod.rs +++ b/kernel/src/drivers/mod.rs @@ -26,10 +26,7 @@ pub struct Matcher { impl Matcher { pub fn matches(&self, compatible: &str) -> bool { - self.compatibles - .iter() - .find(|&s| s == &compatible) - .is_some() + self.compatibles.iter().any(|s| s == &compatible) } } type ConsoleMatcher = Matcher; diff --git a/kernel/src/error.rs b/kernel/src/error.rs index 4037b08..5efa7e9 100644 --- a/kernel/src/error.rs +++ b/kernel/src/error.rs @@ -1,5 +1,3 @@ -use super::mm; - #[derive(Debug)] pub enum Error { DeviceNotFound(&'static str), diff --git a/kernel/src/executable/elf.rs b/kernel/src/executable/elf.rs index e93eb92..890a949 100644 --- a/kernel/src/executable/elf.rs +++ b/kernel/src/executable/elf.rs @@ -1,7 +1,6 @@ use core::iter::Iterator; use crate::globals; -use crate::mm; use crate::Error; use goblin; @@ -86,7 +85,7 @@ impl<'a> Elf<'a> { (virtual_pages as usize) - align_down(virtual_pages as usize, page_size); let segment_data_src_addr = ((self.data.as_ptr() as usize) + p_offset) as *const u8; - let segment_data_dst_addr = (usize::from(physical_pages) + offset_in_page) as *mut u8; + let segment_data_dst_addr = (physical_pages + offset_in_page) as *mut u8; let segment_data_src: &[u8] = unsafe { core::slice::from_raw_parts(segment_data_src_addr, p_filesz) }; @@ -95,9 +94,7 @@ impl<'a> Elf<'a> { unsafe { core::slice::from_raw_parts_mut(segment_data_dst_addr, p_memsz) }; // Zeroing uninitialized data - for i in p_filesz..p_memsz { - dst[i as usize] = 0u8; - } + dst[p_filesz..p_memsz].iter_mut().for_each(|e| *e = 0u8); dst }; @@ -112,7 +109,7 @@ impl<'a> Elf<'a> { hal::mm::current() .map( VAddr::new(align_down(virtual_pages as usize, page_size) + page_offset), - PAddr::new(usize::from(physical_pages) + page_offset), + PAddr::new(physical_pages + page_offset), perms, &globals::PHYSICAL_MEMORY_MANAGER, ) diff --git a/kernel/src/generic_main.rs b/kernel/src/generic_main.rs index d410e6f..807445c 100644 --- a/kernel/src/generic_main.rs +++ b/kernel/src/generic_main.rs @@ -15,7 +15,7 @@ pub fn generic_main(dt: DeviceTree, hacky_devices: &[& let qemu_exit = QemuExit::new(); let qemu_exit_slice = [&qemu_exit as &dyn Driver]; - let devices = hacky_devices.into_iter().chain(&qemu_exit_slice); + let devices = hacky_devices.iter().chain(&qemu_exit_slice); // Memory init globals::PHYSICAL_MEMORY_MANAGER diff --git a/kernel/src/globals.rs b/kernel/src/globals.rs index f2363e2..89d8e21 100644 --- a/kernel/src/globals.rs +++ b/kernel/src/globals.rs @@ -1,5 +1,3 @@ -use crate::lock::Lock; - use crate::mm; pub static PHYSICAL_MEMORY_MANAGER: mm::PhysicalMemoryManager = mm::PhysicalMemoryManager::new(); diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 8a04c99..6b217d6 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -24,7 +24,6 @@ pub mod executable; pub mod generic_main; pub mod globals; pub mod kernel_console; -mod lock; pub mod mm; mod panic; mod tests; diff --git a/kernel/src/lock.rs b/kernel/src/lock.rs deleted file mode 100644 index 92e3d15..0000000 --- a/kernel/src/lock.rs +++ /dev/null @@ -1,27 +0,0 @@ -use core::cell::UnsafeCell; - -pub struct Lock { - data: UnsafeCell, -} - -impl Lock { - pub const fn new(data: T) -> Self { - Self { - data: UnsafeCell::new(data), - } - } - - pub fn lock<'a, R>(&'a self, f: impl FnOnce(&'a mut T) -> R) -> R { - // TODO: actually lock something... - let data = unsafe { &mut *self.data.get() }; - - f(data) - } - - pub fn get(&self) -> &'static mut T { - unsafe { &mut *self.data.get() } - } -} - -unsafe impl Send for Lock where T: Sized + Send {} -unsafe impl Sync for Lock where T: Sized + Send {} diff --git a/kernel/src/mm/binary_buddy_allocator.rs b/kernel/src/mm/binary_buddy_allocator.rs index c31da79..bb17cad 100644 --- a/kernel/src/mm/binary_buddy_allocator.rs +++ b/kernel/src/mm/binary_buddy_allocator.rs @@ -31,7 +31,7 @@ unsafe impl GlobalAlloc for BinaryBuddyAllocator { }; globals::PHYSICAL_MEMORY_MANAGER .alloc(page_count) - .unwrap_or(0usize.into()) as *mut u8 + .unwrap_or(0usize) as *mut u8 } unsafe fn dealloc(&self, _: *mut u8, _: Layout) { diff --git a/kernel/src/mm/mod.rs b/kernel/src/mm/mod.rs index 0553667..581fe94 100644 --- a/kernel/src/mm/mod.rs +++ b/kernel/src/mm/mod.rs @@ -6,16 +6,16 @@ mod binary_buddy_allocator; use crate::device_tree::DeviceTree; use crate::globals; -use crate::hal::{self, mm::PAGE_SIZE}; +use crate::hal; use crate::Error; -use hal_core::mm::{align_up, NullPageAllocator, PageAlloc, PageMap, Permissions, VAddr}; +use hal_core::mm::{NullPageAllocator, PageAlloc, PageMap, Permissions, VAddr}; use hal_core::AddressRange; use crate::drivers; use drivers::Driver; use arrayvec::ArrayVec; -use core::{iter, slice}; +use core::iter; use log::debug; @@ -142,14 +142,13 @@ pub fn map_address_space<'a, I: Iterator>( // the pre_allocated_entries). // Therefore no allocations will be made, pass the NullPageAllocator. globals::PHYSICAL_MEMORY_MANAGER.used_pages(|page| { - log::debug!("pushing rw allocated page 0x{:X}", page); - // let range = AddressRange::new(page..page + PAGE_SIZE); - // rw_entries.try_push(range); - hal::mm::current().identity_map( - VAddr::new(page), - Permissions::READ | Permissions::WRITE, - &mut NullPageAllocator, - ); + hal::mm::current() + .identity_map( + VAddr::new(page), + Permissions::READ | Permissions::WRITE, + &NullPageAllocator, + ) + .unwrap(); }); hal::mm::enable_paging(); diff --git a/kernel/src/mm/physical_memory_manager.rs b/kernel/src/mm/physical_memory_manager.rs index 0329de8..f7a7e08 100644 --- a/kernel/src/mm/physical_memory_manager.rs +++ b/kernel/src/mm/physical_memory_manager.rs @@ -2,7 +2,6 @@ use crate::device_tree::DeviceTree; use crate::globals; use crate::hal; use crate::mm; -use crate::Error; use core::mem; use hal_core::{ mm::{AllocatorError, NullPageAllocator, PageAlloc, PageMap, Permissions, VAddr}, @@ -253,14 +252,6 @@ impl PhysicalMemoryManager { Ok(()) } - fn metadata_pages(&self) -> impl core::iter::Iterator { - let metadata = self.metadata.lock(); - let metadata_start = (&metadata[0] as *const PhysicalPage) as usize; - let metadata_last = (&metadata[metadata.len() - 1] as *const PhysicalPage) as usize; - - (metadata_start..=metadata_last).step_by(PAGE_SIZE) - } - pub fn alloc_pages(&self, page_count: usize) -> Result { let mut consecutive_pages: usize = 0; let mut first_page_index: usize = 0; @@ -293,8 +284,6 @@ impl PhysicalMemoryManager { .for_each(|page| page.set_allocated()); metadata[i].set_last(); - let addr = metadata[first_page_index].base; - return Ok(metadata[first_page_index].base); } } @@ -307,21 +296,22 @@ impl PageAlloc for PhysicalMemoryManager { fn alloc(&self, page_count: usize) -> Result { // If there is a kernel pagetable, identity map the pages. let first_page = self.alloc_pages(page_count)?; - let addr: usize = first_page.into(); if unsafe { globals::STATE.is_mmu_enabled() } { // The mmu is enabled, therefore we already mapped all DRAM into the kernel's pagetable // as invalid entries. // Pagetable must only modify existing entries and not allocate. - hal::mm::current().identity_map_range( - VAddr::new(addr), - page_count, - Permissions::READ | Permissions::WRITE, - &mut NullPageAllocator, - ); + hal::mm::current() + .identity_map_range( + VAddr::new(first_page), + page_count, + Permissions::READ | Permissions::WRITE, + &NullPageAllocator, + ) + .unwrap(); } - Ok(addr) + Ok(first_page) } fn dealloc(&self, _base: usize, _page_count: usize) -> Result<(), AllocatorError> {