Skip to content

Commit

Permalink
pmm: make it thread-safe
Browse files Browse the repository at this point in the history
Thanks to the previous patch, it was fairly easy to just make the PMM
thread-safe.
It also makes interfaces a bit nicer imo.
Lastly I did it because it was easy and I felt like it.
  • Loading branch information
n1tram1 committed Oct 14, 2023
1 parent 1e7a5b6 commit 5632e1a
Show file tree
Hide file tree
Showing 11 changed files with 70 additions and 67 deletions.
2 changes: 1 addition & 1 deletion hal_aarch64/src/irq.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ impl IrqChip {

static mut IRQ_CHIP: IrqChip = IrqChip::NoChip;

pub fn init_irq_chip(_dt_node: (), allocator: &mut impl PageAlloc) -> Result<(), Error> {
pub fn init_irq_chip(_dt_node: (), allocator: &impl PageAlloc) -> Result<(), Error> {
let (gicd_base, gicc_base) = (0x800_0000, 0x801_0000);
mm::current().identity_map_range(
VAddr::new(gicd_base),
Expand Down
2 changes: 1 addition & 1 deletion hal_aarch64/src/mm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ pub fn prefill_pagetable(
rw: impl Iterator<Item = AddressRange>,
rwx: impl Iterator<Item = AddressRange>,
pre_allocated: impl Iterator<Item = AddressRange>,
allocator: &mut impl PageAlloc,
allocator: &impl PageAlloc,
) -> Result<(), Error> {
let pt = hal_core::mm::prefill_pagetable::<PageTable>(r, rw, rwx, pre_allocated, allocator)?;

Expand Down
6 changes: 3 additions & 3 deletions hal_aarch64/src/mm/pgt48.rs
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ impl PageMap for PageTable {
const PAGE_SIZE: usize = 4096;
type Entry = TableEntry;

fn new(allocator: &mut impl PageAlloc) -> Result<&'static mut Self, Error> {
fn new(allocator: &impl PageAlloc) -> Result<&'static mut Self, Error> {
let page = allocator.alloc(1)?;
let page_table = unsafe { page as *mut PageTable };
// Safety: the PMM gave us the memory, it should be a valid pointer.
Expand All @@ -243,7 +243,7 @@ impl PageMap for PageTable {
va: mm::VAddr,
pa: mm::PAddr,
perms: Permissions,
allocator: &mut impl PageAlloc,
allocator: &impl PageAlloc,
) -> Result<&mut TableEntry, Error> {
let va = VAddr::from(va);
let pa = PAddr::from(pa);
Expand Down Expand Up @@ -276,7 +276,7 @@ impl PageMap for PageTable {
unreachable!("We should have reached lvl 3 and returned by now...");
}

fn add_invalid_entry(&mut self, va: mm::VAddr, allocator: &mut impl PageAlloc) -> Result<(), Error> {
fn add_invalid_entry(&mut self, va: mm::VAddr, allocator: &impl PageAlloc) -> Result<(), Error> {
let entry = self.map(
va,
mm::PAddr {
Expand Down
31 changes: 15 additions & 16 deletions hal_core/src/mm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,26 +57,25 @@ pub enum AllocatorError {
OutOfMemory,
}

pub trait PageAlloc {
fn alloc(&mut self, page_count: usize) -> Result<usize, AllocatorError>;
fn dealloc(&mut self, base: usize, page_count: usize) -> Result<(), AllocatorError>;
fn used_pages(&self) -> impl Iterator<Item = usize> + '_;
pub trait PageAlloc: Sync {
fn alloc(&self, page_count: usize) -> Result<usize, AllocatorError>;
fn dealloc(&self, base: usize, page_count: usize) -> Result<(), AllocatorError>;
fn used_pages<F: FnMut(usize)>(&self, f: F);
}

pub struct NullPageAllocator;

impl PageAlloc for NullPageAllocator {
fn alloc(&mut self, page_count: usize) -> Result<usize, AllocatorError> {
fn alloc(&self, _page_count: usize) -> Result<usize, AllocatorError> {
panic!("the null page allocator mustn't allocate");
}

fn dealloc(&mut self, base: usize, page_count: usize) -> Result<(), AllocatorError> {
fn dealloc(&self, _base: usize, _page_count: usize) -> Result<(), AllocatorError> {
panic!("the null page allocator cannot deallocate");
}

fn used_pages(&self) -> impl Iterator<Item = usize> + '_ {
fn used_pages<F: FnMut(usize)>(&self, f: F) {
panic!("obviously the null allocator has no pages that are in use");
core::iter::empty()
}
}

Expand All @@ -88,17 +87,17 @@ pub trait PageMap {
const PAGE_SIZE: usize;
type Entry: PageEntry;

fn new(allocator: &mut impl PageAlloc) -> Result<&'static mut Self, Error>;
fn new(allocator: &impl PageAlloc) -> Result<&'static mut Self, Error>;

fn map(
&mut self,
va: VAddr,
pa: PAddr,
perms: Permissions,
allocator: &mut impl PageAlloc,
allocator: &impl PageAlloc,
) -> Result<&mut Self::Entry, Error>;

fn add_invalid_entry(&mut self, va: VAddr, allocator: &mut impl PageAlloc) -> Result<(), Error> {
fn add_invalid_entry(&mut self, va: VAddr, allocator: &impl PageAlloc) -> Result<(), Error> {
self.map(
va,
PAddr::new(0x0A0A_0A0A_0A0A_0A0A),
Expand All @@ -114,7 +113,7 @@ pub trait PageMap {
&mut self,
addr: VAddr,
perms: Permissions,
allocator: &mut impl PageAlloc,
allocator: &impl PageAlloc,
) -> Result<(), Error> {
self.map(addr, PAddr::new(addr.val), perms, allocator)
.map(|_| ())
Expand All @@ -125,7 +124,7 @@ pub trait PageMap {
addr: VAddr,
page_count: usize,
perms: Permissions,
allocator: &mut impl PageAlloc,
allocator: &impl PageAlloc,
) -> Result<(), Error> {
let start = addr.val;
for i in 0..page_count {
Expand All @@ -138,7 +137,7 @@ pub trait PageMap {
fn add_invalid_entries(
&mut self,
range: AddressRange,
allocator: &mut impl PageAlloc,
allocator: &impl PageAlloc,
) -> Result<(), Error> {
for page in range.iter_pages(Self::PAGE_SIZE) {
self.add_invalid_entry(VAddr::new(page), allocator)?;
Expand All @@ -151,7 +150,7 @@ pub trait PageMap {
&mut self,
range: AddressRange,
perms: Permissions,
allocator: &mut impl PageAlloc,
allocator: &impl PageAlloc,
) -> Result<(), Error> {
for page in range.iter_pages(Self::PAGE_SIZE) {
self.identity_map(VAddr::new(page), perms, allocator)?;
Expand All @@ -176,7 +175,7 @@ pub fn prefill_pagetable<P: PageMap + 'static>(
rw: impl Iterator<Item = AddressRange>,
rwx: impl Iterator<Item = AddressRange>,
pre_allocated: impl Iterator<Item = AddressRange>,
allocator: &mut impl PageAlloc,
allocator: &impl PageAlloc,
) -> Result<&'static mut P, Error> {
trace!("hal_core::mm::init_paging");
let pt: &'static mut P = P::new(allocator)?;
Expand Down
5 changes: 2 additions & 3 deletions kernel/src/executable/elf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ impl<'a> Elf<'a> {
}

pub fn load(&self) -> Result<(), Error> {
let pmm = globals::PHYSICAL_MEMORY_MANAGER.get();
let page_size = hal::mm::PAGE_SIZE;

for segment in self.segments() {
Expand All @@ -79,7 +78,7 @@ impl<'a> Elf<'a> {
let p_memsz = segment.p_memsz as usize;

let pages_needed = Self::pages_needed(segment, page_size);
let physical_pages = pmm.alloc(pages_needed).unwrap();
let physical_pages = globals::PHYSICAL_MEMORY_MANAGER.alloc(pages_needed).unwrap();
let virtual_pages = segment.p_paddr as *mut u8;
let offset_in_page =
(virtual_pages as usize) - align_down(virtual_pages as usize, page_size);
Expand Down Expand Up @@ -113,7 +112,7 @@ impl<'a> Elf<'a> {
VAddr::new(align_down(virtual_pages as usize, page_size) + page_offset),
PAddr::new(usize::from(physical_pages) + page_offset),
perms,
pmm
&globals::PHYSICAL_MEMORY_MANAGER
)
.unwrap();
}
Expand Down
4 changes: 2 additions & 2 deletions kernel/src/generic_main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,14 @@ pub fn generic_main<const LAUNCH_TESTS: bool>(dt: DeviceTree, hacky_devices: &[&

// Memory init
globals::PHYSICAL_MEMORY_MANAGER
.lock(|pmm| pmm.init_from_device_tree(&dt))
.init_from_device_tree(&dt)
.unwrap();
mm::map_address_space(&dt, devices).expect("failed to map the addres space");

// Driver stuff
// let _drvmgr = DriverManager::with_devices(&dt).unwrap();

hal::irq::init_irq_chip((), globals::PHYSICAL_MEMORY_MANAGER.get()).expect("initialization of irq chip failed");
hal::irq::init_irq_chip((), &globals::PHYSICAL_MEMORY_MANAGER).expect("initialization of irq chip failed");

hal::cpu::unmask_interrupts();

Expand Down
3 changes: 1 addition & 2 deletions kernel/src/globals.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@ use crate::lock::Lock;

use crate::mm;

pub static PHYSICAL_MEMORY_MANAGER: Lock<mm::PhysicalMemoryManager> =
Lock::new(mm::PhysicalMemoryManager::new());
pub static PHYSICAL_MEMORY_MANAGER: mm::PhysicalMemoryManager = mm::PhysicalMemoryManager::new();

pub enum KernelState {
EarlyInit,
Expand Down
14 changes: 6 additions & 8 deletions kernel/src/mm/binary_buddy_allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,12 @@ unsafe impl GlobalAlloc for BinaryBuddyAllocator {
// - be thread-safe
// - disable interrupts when entering, then re-enable

globals::PHYSICAL_MEMORY_MANAGER.lock(|pmm| {
let page_count = if layout.size() <= PAGE_SIZE {
1
} else {
layout.size() / PAGE_SIZE + 1
};
pmm.alloc(page_count).unwrap_or(0usize.into()) as *mut u8
})
let page_count = if layout.size() <= PAGE_SIZE {
1
} else {
layout.size() / PAGE_SIZE + 1
};
globals::PHYSICAL_MEMORY_MANAGER.alloc(page_count).unwrap_or(0usize.into()) as *mut u8
}

unsafe fn dealloc(&self, _: *mut u8, _: Layout) {
Expand Down
6 changes: 3 additions & 3 deletions kernel/src/mm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,13 +135,13 @@ pub fn map_address_space<'a, I: Iterator<Item = &'a &'a dyn Driver>>(
rw_entries.into_iter(),
rwx_entries.into_iter(),
pre_allocated_entries.into_iter(),
globals::PHYSICAL_MEMORY_MANAGER.get(),
&globals::PHYSICAL_MEMORY_MANAGER,
)?;

// All pmm pages are located in DRAM so they are already in the pagetable (they are part of
// the pre_allocated_entries).
// Therefore no allocations will be made, pass the NullPageAllocator.
for page in globals::PHYSICAL_MEMORY_MANAGER.get().used_pages() {
globals::PHYSICAL_MEMORY_MANAGER.used_pages(|page| {
log::debug!("pushing rw allocated page 0x{:X}", page);
// let range = AddressRange::new(page..page + PAGE_SIZE);
// rw_entries.try_push(range);
Expand All @@ -150,7 +150,7 @@ pub fn map_address_space<'a, I: Iterator<Item = &'a &'a dyn Driver>>(
Permissions::READ | Permissions::WRITE,
&mut NullPageAllocator
);
}
});

hal::mm::enable_paging();

Expand Down
60 changes: 34 additions & 26 deletions kernel/src/mm/physical_memory_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ use hal_core::{
use hal::mm::PAGE_SIZE;

use log::debug;
use spin::mutex::Mutex;

#[derive(Debug, PartialEq, Eq)]
pub enum PageKind {
Expand Down Expand Up @@ -54,7 +55,7 @@ impl PhysicalPage {

#[derive(Debug)]
pub struct PhysicalMemoryManager {
metadata: &'static mut [PhysicalPage],
metadata: Mutex<&'static mut [PhysicalPage]>,
}

impl PhysicalMemoryManager {
Expand Down Expand Up @@ -195,13 +196,13 @@ impl PhysicalMemoryManager {
};

Self {
metadata,
metadata: Mutex::new(metadata),
}
}

/// Initialize a [`PageAllocator`] from the device tree.
pub fn init_from_device_tree(
&mut self,
&self,
device_tree: &DeviceTree,
) -> Result<(), AllocatorError> {
let available_regions = Self::available_memory_regions::<10>(device_tree);
Expand Down Expand Up @@ -250,35 +251,28 @@ impl PhysicalMemoryManager {
}
assert!(count == page_count);

self.metadata = metadata;
log::debug!("PMM metadata addr is 0x{:X}", self.metadata.as_ptr() as u64);
*self.metadata.lock() = metadata;

Ok(())
}

fn metadata_pages(&self) -> impl core::iter::Iterator<Item = usize> {
let metadata_start = (&self.metadata[0] as *const PhysicalPage) as usize;
let metadata = self.metadata.lock();
let metadata_start = (&metadata[0] as *const PhysicalPage) as usize;
let metadata_last =
(&self.metadata[self.metadata.len() - 1] as *const PhysicalPage) as usize;
(&metadata[metadata.len() - 1] as *const PhysicalPage) as usize;

(metadata_start..=metadata_last).step_by(PAGE_SIZE)
}

fn allocated_pages(&self) -> impl core::iter::Iterator<Item = usize> + '_ {
log::debug!("allocated_pages: PMM metadata addr is 0x{:X}", self.metadata.as_ptr() as u64);
log::debug!("allocated_pages: metadata[254] {:?}", self.metadata[254]);
self.metadata
.iter()
.filter(|page| page.is_allocated())
.map(|page| page.base)
}

pub fn alloc_pages(&mut self, page_count: usize) -> Result<usize, AllocatorError> {
pub fn alloc_pages(&self, page_count: usize) -> Result<usize, AllocatorError> {
let mut consecutive_pages: usize = 0;
let mut first_page_index: usize = 0;
let mut last_page_base: usize = 0;

for (i, page) in self.metadata.iter().enumerate() {
let mut metadata = self.metadata.lock();

for (i, page) in metadata.iter().enumerate() {
if consecutive_pages == 0 {
first_page_index = i;
last_page_base = page.base;
Expand All @@ -298,14 +292,14 @@ impl PhysicalMemoryManager {
last_page_base = page.base;

if consecutive_pages == page_count {
self.metadata[first_page_index..=i]
metadata[first_page_index..=i]
.iter_mut()
.for_each(|page| page.set_allocated());
self.metadata[i].set_last();
metadata[i].set_last();

let addr = self.metadata[first_page_index].base;
let addr = metadata[first_page_index].base;

return Ok(self.metadata[first_page_index].base);
return Ok(metadata[first_page_index].base);
}
}

Expand All @@ -315,7 +309,7 @@ impl PhysicalMemoryManager {


impl PageAlloc for PhysicalMemoryManager {
fn alloc(&mut self, page_count: usize) -> Result<usize, AllocatorError> {
fn alloc(&self, page_count: usize) -> Result<usize, AllocatorError> {
// If there is a kernel pagetable, identity map the pages.
let first_page = self.alloc_pages(page_count)?;
let addr: usize = first_page.into();
Expand All @@ -336,16 +330,30 @@ impl PageAlloc for PhysicalMemoryManager {
Ok(addr)
}

fn dealloc(&mut self, _base: usize, _page_count: usize) -> Result<(), AllocatorError> {
fn dealloc(&self, _base: usize, _page_count: usize) -> Result<(), AllocatorError> {
// TODO:
// - if MMU is on, unmap the page
// - set as free
log::warn!("PMM dealloc not yet implemented...");
Ok(())
}

fn used_pages(&self) -> impl Iterator<Item = usize> + '_ {
self.metadata_pages().chain(self.allocated_pages())
fn used_pages<F: FnMut(usize)>(&self, f: F) {
let metadata = self.metadata.lock();

let metadata_start = (&metadata[0] as *const PhysicalPage) as usize;
let metadata_last =
(&metadata[metadata.len() - 1] as *const PhysicalPage) as usize;

let metadata_pages = (metadata_start..=metadata_last).step_by(PAGE_SIZE);
let allocated_pages = metadata
.iter()
.filter(|page| page.is_allocated())
.map(|page| page.base);

metadata_pages
.chain(allocated_pages)
.for_each(f);
}
}

Expand Down
Loading

0 comments on commit 5632e1a

Please sign in to comment.