Skip to content

Commit

Permalink
Cache per VirtualMemory
Browse files Browse the repository at this point in the history
  • Loading branch information
frankdavid committed Dec 4, 2024
1 parent efdefe1 commit eb138e8
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 90 deletions.
14 changes: 7 additions & 7 deletions canbench_results.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ benches:
scopes: {}
btreemap_get_blob_512_1024_v2_mem_manager:
total:
instructions: 2529776211
instructions: 2517895062
heap_increase: 0
stable_memory_increase: 0
scopes: {}
Expand Down Expand Up @@ -139,7 +139,7 @@ benches:
scopes: {}
btreemap_get_u64_u64_v2_mem_manager:
total:
instructions: 346385206
instructions: 337472533
heap_increase: 0
stable_memory_increase: 0
scopes: {}
Expand Down Expand Up @@ -223,7 +223,7 @@ benches:
scopes: {}
btreemap_insert_blob_1024_512_v2_mem_manager:
total:
instructions: 5258158583
instructions: 5243334303
heap_increase: 0
stable_memory_increase: 256
scopes: {}
Expand Down Expand Up @@ -379,7 +379,7 @@ benches:
scopes: {}
btreemap_insert_u64_u64_mem_manager:
total:
instructions: 565125199
instructions: 553691634
heap_increase: 0
stable_memory_increase: 0
scopes: {}
Expand Down Expand Up @@ -631,7 +631,7 @@ benches:
scopes: {}
memory_manager_overhead:
total:
instructions: 1181970633
instructions: 1181967369
heap_increase: 0
stable_memory_increase: 8320
scopes: {}
Expand Down Expand Up @@ -661,7 +661,7 @@ benches:
scopes: {}
vec_get_blob_4_mem_manager:
total:
instructions: 7351373
instructions: 7238723
heap_increase: 0
stable_memory_increase: 0
scopes: {}
Expand All @@ -673,7 +673,7 @@ benches:
scopes: {}
vec_get_blob_64_mem_manager:
total:
instructions: 15459258
instructions: 15339702
heap_increase: 0
stable_memory_increase: 0
scopes: {}
Expand Down
139 changes: 56 additions & 83 deletions src/memory_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ impl<M: Memory> MemoryManager<M> {
VirtualMemory {
id,
memory_manager: self.inner.clone(),
cache: BucketCache::new(),
}
}

Expand Down Expand Up @@ -193,6 +194,7 @@ impl Header {
pub struct VirtualMemory<M: Memory> {
id: MemoryId,
memory_manager: Rc<RefCell<MemoryManagerInner<M>>>,
cache: BucketCache,
}

impl<M: Memory> Memory for VirtualMemory<M> {
Expand All @@ -205,17 +207,21 @@ impl<M: Memory> Memory for VirtualMemory<M> {
}

fn read(&self, offset: u64, dst: &mut [u8]) {
self.memory_manager.borrow().read(self.id, offset, dst)
self.memory_manager
.borrow()
.read(self.id, offset, dst, &self.cache)
}

unsafe fn read_unsafe(&self, offset: u64, dst: *mut u8, count: usize) {
self.memory_manager
.borrow()
.read_unsafe(self.id, offset, dst, count)
.read_unsafe(self.id, offset, dst, count, &self.cache)
}

fn write(&self, offset: u64, src: &[u8]) {
self.memory_manager.borrow().write(self.id, offset, src)
self.memory_manager
.borrow()
.write(self.id, offset, src, &self.cache)
}
}

Expand All @@ -233,8 +239,6 @@ struct MemoryManagerInner<M: Memory> {

/// A map mapping each managed memory to the bucket ids that are allocated to it.
memory_buckets: Vec<Vec<BucketId>>,

bucket_cache: BucketCache,
}

impl<M: Memory> MemoryManagerInner<M> {
Expand Down Expand Up @@ -263,7 +267,6 @@ impl<M: Memory> MemoryManagerInner<M> {
memory_sizes_in_pages: [0; MAX_NUM_MEMORIES as usize],
memory_buckets: vec![vec![]; MAX_NUM_MEMORIES as usize],
bucket_size_in_pages,
bucket_cache: BucketCache::new(),
};

mem_mgr.save_header();
Expand Down Expand Up @@ -305,7 +308,6 @@ impl<M: Memory> MemoryManagerInner<M> {
bucket_size_in_pages: header.bucket_size_in_pages,
memory_sizes_in_pages: header.memory_sizes_in_pages,
memory_buckets,
bucket_cache: BucketCache::new(),
}
}

Expand Down Expand Up @@ -377,14 +379,11 @@ impl<M: Memory> MemoryManagerInner<M> {
old_size as i64
}

fn write(&self, id: MemoryId, offset: u64, src: &[u8]) {
if let Some(real_address) = self.bucket_cache.get(
id,
VirtualSegment {
address: offset.into(),
length: src.len().into(),
},
) {
fn write(&self, id: MemoryId, offset: u64, src: &[u8], bucket_cache: &BucketCache) {
if let Some(real_address) = bucket_cache.get(VirtualSegment {
address: offset.into(),
length: src.len().into(),
}) {
self.memory.write(real_address.get(), src);
return;
}
Expand All @@ -400,6 +399,7 @@ impl<M: Memory> MemoryManagerInner<M> {
address: offset.into(),
length: src.len().into(),
},
bucket_cache,
|RealSegment { address, length }| {
self.memory.write(
address.get(),
Expand All @@ -412,25 +412,29 @@ impl<M: Memory> MemoryManagerInner<M> {
}

#[inline]
fn read(&self, id: MemoryId, offset: u64, dst: &mut [u8]) {
fn read(&self, id: MemoryId, offset: u64, dst: &mut [u8], bucket_cache: &BucketCache) {
// SAFETY: this is trivially safe because dst has dst.len() space.
unsafe { self.read_unsafe(id, offset, dst.as_mut_ptr(), dst.len()) }
unsafe { self.read_unsafe(id, offset, dst.as_mut_ptr(), dst.len(), bucket_cache) }
}

/// # Safety
///
/// Callers must guarantee that
/// * it is valid to write `count` number of bytes starting from `dst`,
/// * `dst..dst + count` does not overlap with `self`.
unsafe fn read_unsafe(&self, id: MemoryId, offset: u64, dst: *mut u8, count: usize) {
unsafe fn read_unsafe(
&self,
id: MemoryId,
offset: u64,
dst: *mut u8,
count: usize,
bucket_cache: &BucketCache,
) {
// First try to find the virtual segment in the cache.
if let Some(real_address) = self.bucket_cache.get(
id,
VirtualSegment {
address: offset.into(),
length: count.into(),
},
) {
if let Some(real_address) = bucket_cache.get(VirtualSegment {
address: offset.into(),
length: count.into(),
}) {
self.memory.read_unsafe(real_address.get(), dst, count);
return;
}
Expand All @@ -446,6 +450,7 @@ impl<M: Memory> MemoryManagerInner<M> {
address: offset.into(),
length: count.into(),
},
bucket_cache,
|RealSegment { address, length }| {
self.memory.read_unsafe(
address.get(),
Expand Down Expand Up @@ -492,6 +497,7 @@ impl<M: Memory> MemoryManagerInner<M> {
&self,
MemoryId(id): MemoryId,
virtual_segment: VirtualSegment,
bucket_cache: &BucketCache,
mut func: impl FnMut(RealSegment),
) {
// Get the buckets allocated to the given memory id.
Expand All @@ -514,8 +520,7 @@ impl<M: Memory> MemoryManagerInner<M> {
let segment_len = (bucket_size_in_bytes - start_offset_in_bucket).min(length);

// Cache this bucket.
self.bucket_cache.store(
MemoryId(id),
bucket_cache.store(
VirtualSegment {
address: bucket_start.into(),
length: self.bucket_size_in_bytes(),
Expand Down Expand Up @@ -598,7 +603,6 @@ fn bucket_allocations_address(id: BucketId) -> Address {
/// If a segment from this bucket is accessed, we can return the real address faster.
#[derive(Clone)]
struct BucketCache {
memory_id: Cell<MemoryId>,
bucket: Cell<VirtualSegment>,
/// The real address that corresponds to bucket.address
real_address: Cell<Address>,
Expand All @@ -608,7 +612,6 @@ impl BucketCache {
#[inline]
fn new() -> Self {
BucketCache {
memory_id: Cell::new(MemoryId(0)),
bucket: Cell::new(VirtualSegment {
address: Address::from(0),
length: Bytes::new(0),
Expand All @@ -622,19 +625,17 @@ impl BucketCache {
/// Returns the real address corresponding to `virtual_segment.address` if `virtual_segment`
/// is fully contained within the cached bucket, otherwise `None`.
#[inline]
fn get(&self, memory_id: MemoryId, virtual_segment: VirtualSegment) -> Option<Address> {
fn get(&self, virtual_segment: VirtualSegment) -> Option<Address> {
let cached_bucket = self.bucket.get();
let cache_hit =
self.memory_id.get() == memory_id && cached_bucket.contains_segment(&virtual_segment);

cache_hit
cached_bucket
.contains_segment(&virtual_segment)
.then(|| self.real_address.get() + (virtual_segment.address - cached_bucket.address))
}

/// Stores the mapping of a bucket to a real address.
#[inline]
fn store(&self, memory_id: MemoryId, bucket: VirtualSegment, real_address: Address) {
self.memory_id.set(memory_id);
fn store(&self, bucket: VirtualSegment, real_address: Address) {
self.bucket.set(bucket);
self.real_address.set(real_address);
}
Expand Down Expand Up @@ -1050,18 +1051,14 @@ mod test {

// No match, nothing has been stored.
assert_eq!(
bucket_cache.get(
MemoryId::new(0),
VirtualSegment {
address: Address::from(0),
length: Bytes::from(1u64)
}
),
bucket_cache.get(VirtualSegment {
address: Address::from(0),
length: Bytes::from(1u64)
}),
None
);

bucket_cache.store(
MemoryId::new(22),
VirtualSegment {
address: Address::from(0),
length: Bytes::from(335u64),
Expand All @@ -1071,61 +1068,37 @@ mod test {

// Match at the beginning
assert_eq!(
bucket_cache.get(
MemoryId::new(22),
VirtualSegment {
address: Address::from(1),
length: Bytes::from(2u64)
}
),
bucket_cache.get(VirtualSegment {
address: Address::from(1),
length: Bytes::from(2u64)
}),
Some(Address::from(984))
);

// Match at the end
assert_eq!(
bucket_cache.get(
MemoryId::new(22),
VirtualSegment {
address: Address::from(334),
length: Bytes::from(1u64)
}
),
bucket_cache.get(VirtualSegment {
address: Address::from(334),
length: Bytes::from(1u64)
}),
Some(Address::from(1317))
);

// Match entire segment
assert_eq!(
bucket_cache.get(
MemoryId::new(22),
VirtualSegment {
address: Address::from(0),
length: Bytes::from(335u64),
}
),
bucket_cache.get(VirtualSegment {
address: Address::from(0),
length: Bytes::from(335u64),
}),
Some(Address::from(983))
);

// No match (memory id is different)
assert_eq!(
bucket_cache.get(
MemoryId::new(23),
VirtualSegment {
address: Address::from(1),
length: Bytes::from(2u64)
}
),
None
);

// No match - outside cached segment
assert_eq!(
bucket_cache.get(
MemoryId::new(22),
VirtualSegment {
address: Address::from(1),
length: Bytes::from(335u64)
}
),
bucket_cache.get(VirtualSegment {
address: Address::from(1),
length: Bytes::from(335u64)
}),
None
);
}
Expand Down

0 comments on commit eb138e8

Please sign in to comment.