diff --git a/Cargo.lock b/Cargo.lock index 01820b68..6f30d01a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -241,7 +241,7 @@ version = "0.1.0" dependencies = [ "cfg-if", "der 0.5.1", - "ring", + "ring 0.16.20", "rust_std_stub", "rustls", "zeroize", @@ -654,11 +654,25 @@ dependencies = [ "libc", "once_cell", "spin 0.5.2", - "untrusted", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" +dependencies = [ + "cc", + "getrandom", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys", +] + [[package]] name = "rust_std_stub" version = "0.1.0" @@ -685,7 +699,7 @@ dependencies = [ name = "rustls" version = "0.22.0-alpha.0" dependencies = [ - "ring", + "ring 0.16.20", "rust_std_stub", "rustls-webpki", "subtle", @@ -697,8 +711,8 @@ version = "0.102.0-alpha.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "984a9510c24d64bc5c53a6b0d329f883b0f95048fa14ee30223ed35c7e1ba38b" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -971,7 +985,7 @@ dependencies = [ "der 0.4.5", "lazy_static", "r-efi", - "ring", + "ring 0.17.5", "scroll", "td-layout", "td-uefi-pi", @@ -1069,6 +1083,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "utf8parse" version = "0.2.1" diff --git a/deps/td-shim b/deps/td-shim index 800030b2..0001b349 160000 --- a/deps/td-shim +++ b/deps/td-shim @@ -1 +1 @@ -Subproject commit 800030b2cf3ac155ea29047feb748eb7b2063e51 +Subproject commit 0001b34978e15bb11365a9e9bd0364ad178046a3 diff --git a/src/attestation/src/ghci.rs b/src/attestation/src/ghci.rs index c8e54d36..2c5a3bda 100644 --- a/src/attestation/src/ghci.rs +++ b/src/attestation/src/ghci.rs @@ -6,7 +6,7 @@ use core::sync::atomic::{AtomicU8, Ordering}; use core::{ffi::c_void, ptr::null_mut, slice::from_raw_parts_mut}; use td_payload::arch::apic::{disable, enable_and_hlt}; use td_payload::arch::idt::register; -use td_payload::{interrupt_handler_template, mm::dma::DmaMemory}; +use td_payload::{interrupt_handler_template, mm::shared::SharedMemory}; use tdx_tdcall::{td_vmcall, tdx, TdVmcallArgs, TdVmcallError}; use crate::binding::AttestLibError; @@ -26,7 +26,7 @@ pub extern "C" fn migtd_get_quote(tdquote_req_buf: *mut c_void, len: u64) -> i32 let input = unsafe { from_raw_parts_mut(tdquote_req_buf as *mut u8, len as usize) }; - let mut shared = if let Some(shared) = DmaMemory::new(len as usize / 0x1000) { + let mut shared = if let Some(shared) = SharedMemory::new(len as usize / 0x1000) { shared } else { return AttestLibError::MigtdAttestErrorOutOfMemory as i32; diff --git a/src/devices/virtio/fuzz/fuzz_targets/afl-virtio.rs b/src/devices/virtio/fuzz/fuzz_targets/afl-virtio.rs index e02855b9..8997059a 100644 --- a/src/devices/virtio/fuzz/fuzz_targets/afl-virtio.rs +++ b/src/devices/virtio/fuzz/fuzz_targets/afl-virtio.rs @@ -15,7 +15,7 @@ const BARU64_2_OFFSET: u64 = 0x18; const BARU64_3_OFFSET: u64 = 0x20; const VEC_CAPACITY: usize = 0x10000_0000; -const TD_PAYLOAD_DMA_SIZE: usize = 0x100_0000; +const TD_PAYLOAD_SHARED_MEMORY_SIZE: usize = 0x100_0000; const PTR_ALIGN_VAR: u64 = 0xffff_ffff_ffff_0000; const DATA_LEN: usize = 0x100_0000; @@ -28,7 +28,7 @@ fn main() { let common_addr = 0; let paddr = ptr + PAGE_SIZE as u64; - init(paddr as usize, TD_PAYLOAD_DMA_SIZE); + init(paddr as usize, TD_PAYLOAD_SHARED_MEMORY_SIZE); COMMON_HEADER.try_init_once(|| ptr).expect("init error"); #[cfg(not(feature = "fuzz"))] diff --git a/src/devices/virtio/fuzz/fuzz_targets/fuzz-virtio.rs b/src/devices/virtio/fuzz/fuzz_targets/fuzz-virtio.rs index 3a022202..a6abccb6 100644 --- a/src/devices/virtio/fuzz/fuzz_targets/fuzz-virtio.rs +++ b/src/devices/virtio/fuzz/fuzz_targets/fuzz-virtio.rs @@ -17,7 +17,7 @@ fuzz_target!(|data: &[u8]| { let common_addr = ptr + 0x10c; let paddr = ptr + PAGE_SIZE as u64; - init(paddr as usize, TD_PAYLOAD_DMA_SIZE); + init(paddr as usize, TD_PAYLOAD_SHARED_MEMORY_SIZE); // COMMON_HEADER.try_init_once(|| ptr).expect("init error"); if !COMMON_HEADER.is_initialized() { COMMON_HEADER.init_once(|| ptr); diff --git a/src/devices/virtio/fuzz/fuzz_targets/fuzzlib.rs b/src/devices/virtio/fuzz/fuzz_targets/fuzzlib.rs index e6f0f030..aa07dce4 100644 --- a/src/devices/virtio/fuzz/fuzz_targets/fuzzlib.rs +++ b/src/devices/virtio/fuzz/fuzz_targets/fuzzlib.rs @@ -2,9 +2,9 @@ // // SPDX-License-Identifier: BSD-2-Clause-Patent -use crate::fuzzlib::dma_alloc::virtio_dma_alloc; +use crate::fuzzlib::shared_alloc::virtio_shared_alloc; use conquer_once::spin::OnceCell; -pub use dma_alloc::init; +pub use shared_alloc::init; pub use pci::{get_fuzz_seed_address, PciDevice, COMMON_HEADER}; pub use virtio::{ @@ -21,7 +21,7 @@ pub const BARU64_2_OFFSET: u64 = 0x18; pub const BARU64_3_OFFSET: u64 = 0x20; pub const VEC_CAPACITY: usize = 0x10000_0000; -pub const TD_PAYLOAD_DMA_SIZE: usize = 0x100_0000; +pub const TD_PAYLOAD_SHARED_MEMORY_SIZE: usize = 0x100_0000; pub const PTR_ALIGN_VAR: u64 = 0xffff_ffff_ffff_0000; pub const DATA_LEN: usize = 0x100_0000; @@ -66,12 +66,12 @@ pub fn fuzz_virtio(pddr: u64) { } } -mod dma_alloc { +mod shared_alloc { use bitmap_allocator::{BitAlloc, BitAlloc4K}; use spin::Mutex; - static DMA_ALLOCATOR: Mutex = Mutex::new(DmaAlloc::empty()); + static SHARED_MEMORY_ALLOCATOR: Mutex = Mutex::new(SharedAlloc::empty()); pub fn init(dma_base: usize, dma_size: usize) { println!("init dma - {:#x} - {:#x}\n", dma_base, dma_base + dma_size); @@ -80,18 +80,18 @@ mod dma_alloc { fn init_dma(dma_base: usize, dma_size: usize) { // set page table flags TBD: - *DMA_ALLOCATOR.lock() = DmaAlloc::new(dma_base as usize, dma_size); + *SHARED_MEMORY_ALLOCATOR.lock() = SharedAlloc::new(dma_base as usize, dma_size); } #[no_mangle] - pub extern "C" fn virtio_dma_alloc(blocks: usize) -> PhysAddr { - let paddr = unsafe { DMA_ALLOCATOR.lock().alloc_contiguous(blocks, 0) }.unwrap_or(0); + pub extern "C" fn virtio_shared_alloc(blocks: usize) -> PhysAddr { + let paddr = unsafe { SHARED_MEMORY_ALLOCATOR.lock().alloc_contiguous(blocks, 0) }.unwrap_or(0); paddr } #[no_mangle] - pub extern "C" fn virtio_dma_dealloc(paddr: PhysAddr, blocks: usize) -> i32 { - let _ = unsafe { DMA_ALLOCATOR.lock().dealloc_contiguous(paddr, blocks) }; + pub extern "C" fn virtio_shared_dealloc(paddr: PhysAddr, blocks: usize) -> i32 { + let _ = unsafe { SHARED_MEMORY_ALLOCATOR.lock().dealloc_contiguous(paddr, blocks) }; 0 } @@ -108,14 +108,14 @@ mod dma_alloc { type VirtAddr = usize; type PhysAddr = usize; - struct DmaAlloc { + struct SharedAlloc { base: usize, inner: BitAlloc4K, } const BLOCK_SIZE: usize = 4096; - impl Default for DmaAlloc { + impl Default for SharedAlloc { fn default() -> Self { Self { base: 0, @@ -124,13 +124,13 @@ mod dma_alloc { } } - impl DmaAlloc { + impl SharedAlloc { pub fn new(base: usize, length: usize) -> Self { let mut inner = BitAlloc4K::DEFAULT; let blocks = length / BLOCK_SIZE; assert!(blocks <= BitAlloc4K::CAP); inner.insert(0..blocks); - DmaAlloc { base, inner } + SharedAlloc { base, inner } } const fn empty() -> Self { diff --git a/src/devices/virtio_serial/src/lib.rs b/src/devices/virtio_serial/src/lib.rs index 5f966719..4e5d7cdc 100644 --- a/src/devices/virtio_serial/src/lib.rs +++ b/src/devices/virtio_serial/src/lib.rs @@ -123,7 +123,7 @@ impl DmaMemoryRegion { } /// Trait to allow separation of transport from block driver -pub trait DmaPageAllocator { +pub trait SharedPageAllocator { fn alloc_pages(&self, page_num: usize) -> Option; fn free_pages(&self, addr: u64, page_num: usize); } @@ -186,7 +186,7 @@ impl From for ControlEvent { pub struct VirtioSerial { virtio: Box, - dma_allocator: Box, + shared_allocator: Box, timer: Box, /// DMA allocation table @@ -211,12 +211,12 @@ unsafe impl Sync for VirtioSerial {} impl VirtioSerial { pub fn new( virtio: Box, - dma_allocator: Box, + shared_allocator: Box, timer: Box, ) -> Result { Ok(Self { virtio, - dma_allocator, + shared_allocator, timer, queues: Vec::new(), receive_queues_prefill: Vec::new(), @@ -792,7 +792,7 @@ impl VirtioSerial { fn allocate_dma_memory(&mut self, size: usize) -> Option { let dma_size = align_up(size); - let dma_addr = self.dma_allocator.alloc_pages(dma_size / PAGE_SIZE)?; + let dma_addr = self.shared_allocator.alloc_pages(dma_size / PAGE_SIZE)?; let record = DmaMemoryRegion::new(dma_addr, dma_size); self.dma_allocation.insert(dma_addr, record); @@ -803,7 +803,7 @@ impl VirtioSerial { fn free_dma_memory(&mut self, dma_addr: u64) -> Option { let record = self.dma_allocation.get(&dma_addr)?; - self.dma_allocator + self.shared_allocator .free_pages(record.dma_addr, record.dma_size / PAGE_SIZE); self.dma_allocation.remove(&dma_addr); diff --git a/src/devices/vsock/fuzz/fuzz_targets/afl-vsock.rs b/src/devices/vsock/fuzz/fuzz_targets/afl-vsock.rs index fda6adb3..8be2a4e7 100644 --- a/src/devices/vsock/fuzz/fuzz_targets/afl-vsock.rs +++ b/src/devices/vsock/fuzz/fuzz_targets/afl-vsock.rs @@ -4,7 +4,7 @@ mod fuzzlib; use conquer_once::spin::OnceCell; -use fuzzlib::{init, virtio_dma_alloc, virtio_dma_dealloc, COMMON_HEADER}; +use fuzzlib::{init, virtio_shared_alloc, virtio_shared_dealloc, COMMON_HEADER}; use spin::{once::Once, Mutex}; use std::thread::spawn; use virtio::{virtio_pci::VirtioPciTransport, Result}; @@ -21,7 +21,7 @@ const BARU64_2_OFFSET: u64 = 0x18; const BARU64_3_OFFSET: u64 = 0x20; const VEC_CAPACITY: usize = 0x10000_0000; -const TD_PAYLOAD_DMA_SIZE: usize = 0x100_0000; +const TD_PAYLOAD_SHARED_MEMORY_SIZE: usize = 0x100_0000; const PTR_ALIGN_VAR: u64 = 0xffff_ffff_ffff_0000; const DATA_LEN: usize = 0x100_0000; @@ -62,7 +62,7 @@ struct Allocator; impl VsockDmaPageAllocator for Allocator { fn alloc_pages(&self, page_num: usize) -> Option { - let addr = virtio_dma_alloc(page_num); + let addr = virtio_shared_alloc(page_num); if addr == 0 { None } else { @@ -71,7 +71,7 @@ impl VsockDmaPageAllocator for Allocator { } fn free_pages(&self, addr: u64, page_num: usize) { - virtio_dma_dealloc(addr as usize, page_num); + virtio_shared_dealloc(addr as usize, page_num); } } @@ -238,7 +238,7 @@ fn main() { data[..DEVICE_HEADER.len()].copy_from_slice(&DEVICE_HEADER); COMMON_HEADER.try_init_once(|| ptr).expect("init error"); let paddr = ptr + PAGE_SIZE as u64; - init(paddr as usize, TD_PAYLOAD_DMA_SIZE); + init(paddr as usize, TD_PAYLOAD_SHARED_MEMORY_SIZE); #[cfg(not(feature = "fuzz"))] { diff --git a/src/devices/vsock/fuzz/fuzz_targets/fuzz-vsock.rs b/src/devices/vsock/fuzz/fuzz_targets/fuzz-vsock.rs index d4da03e6..a0bf58d5 100644 --- a/src/devices/vsock/fuzz/fuzz_targets/fuzz-vsock.rs +++ b/src/devices/vsock/fuzz/fuzz_targets/fuzz-vsock.rs @@ -24,7 +24,7 @@ const BARU64_2_OFFSET: u64 = 0x18; const BARU64_3_OFFSET: u64 = 0x20; const VEC_CAPACITY: usize = 0x10000_0000; -const TD_PAYLOAD_DMA_SIZE: usize = 0x100_0000; +const TD_PAYLOAD_SHARED_MEMORY_SIZE: usize = 0x100_0000; const PTR_ALIGN_VAR: u64 = 0xffff_ffff_ffff_0000; const DATA_LEN: usize = 0x100_0000; @@ -231,7 +231,7 @@ fuzz_target!(|data: &[u8]| { data[..tmp.len()].copy_from_slice(&tmp); let common_addr = ptr + 0x10c; let paddr = ptr + PAGE_SIZE as u64; - init(paddr as usize, TD_PAYLOAD_DMA_SIZE); + init(paddr as usize, TD_PAYLOAD_SHARED_MEMORY_SIZE); COMMON_HEADER.try_init_once(|| ptr).expect("init error"); unsafe { diff --git a/src/devices/vsock/fuzz/fuzz_targets/fuzzlib.rs b/src/devices/vsock/fuzz/fuzz_targets/fuzzlib.rs index 3964174e..22ea0231 100644 --- a/src/devices/vsock/fuzz/fuzz_targets/fuzzlib.rs +++ b/src/devices/vsock/fuzz/fuzz_targets/fuzzlib.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: BSD-2-Clause-Patent use conquer_once::spin::OnceCell; -pub use dma_alloc::{init, virtio_dma_alloc, virtio_dma_dealloc}; +pub use shared_alloc::{init, virtio_shared_alloc, virtio_shared_dealloc}; pub use pci::{get_fuzz_seed_address, PciDevice, COMMON_HEADER}; pub use virtio::{virtio_pci::VirtioPciTransport, virtqueue::VirtQueue, VirtioTransport}; @@ -16,17 +16,17 @@ pub const BARU64_2_OFFSET: u64 = 0x18; pub const BARU64_3_OFFSET: u64 = 0x20; pub const VEC_CAPACITY: usize = 0x10000_0000; -pub const TD_PAYLOAD_DMA_SIZE: usize = 0x100_0000; +pub const TD_PAYLOAD_SHARED_MEMORY_SIZE: usize = 0x100_0000; pub const PTR_ALIGN_VAR: u64 = 0xffff_ffff_ffff_0000; pub const DATA_LEN: usize = 0x100_0000; -mod dma_alloc { +mod shared_alloc { use bitmap_allocator::{BitAlloc, BitAlloc4K}; use spin::Mutex; - static DMA_ALLOCATOR: Mutex = Mutex::new(DmaAlloc::empty()); + static SHARED_MEMORY_ALLOCATOR: Mutex = Mutex::new(SharedAlloc::empty()); pub fn init(dma_base: usize, dma_size: usize) { println!("init dma - {:#x} - {:#x}\n", dma_base, dma_base + dma_size); @@ -35,18 +35,18 @@ mod dma_alloc { fn init_dma(dma_base: usize, dma_size: usize) { // set page table flags TBD: - *DMA_ALLOCATOR.lock() = DmaAlloc::new(dma_base as usize, dma_size); + *SHARED_MEMORY_ALLOCATOR.lock() = SharedAlloc::new(dma_base as usize, dma_size); } #[no_mangle] - pub extern "C" fn virtio_dma_alloc(blocks: usize) -> PhysAddr { - let paddr = unsafe { DMA_ALLOCATOR.lock().alloc_contiguous(blocks, 0) }.unwrap_or(0); + pub extern "C" fn virtio_shared_alloc(blocks: usize) -> PhysAddr { + let paddr = unsafe { SHARED_MEMORY_ALLOCATOR.lock().alloc_contiguous(blocks, 0) }.unwrap_or(0); paddr } #[no_mangle] - pub extern "C" fn virtio_dma_dealloc(paddr: PhysAddr, blocks: usize) -> i32 { - let _ = unsafe { DMA_ALLOCATOR.lock().dealloc_contiguous(paddr, blocks) }; + pub extern "C" fn virtio_shared_dealloc(paddr: PhysAddr, blocks: usize) -> i32 { + let _ = unsafe { SHARED_MEMORY_ALLOCATOR.lock().dealloc_contiguous(paddr, blocks) }; 0 } @@ -63,14 +63,14 @@ mod dma_alloc { type VirtAddr = usize; type PhysAddr = usize; - struct DmaAlloc { + struct SharedAlloc { base: usize, inner: BitAlloc4K, } const BLOCK_SIZE: usize = 4096; - impl Default for DmaAlloc { + impl Default for SharedAlloc { fn default() -> Self { Self { base: 0, @@ -79,13 +79,13 @@ mod dma_alloc { } } - impl DmaAlloc { + impl SharedAlloc { pub fn new(base: usize, length: usize) -> Self { let mut inner = BitAlloc4K::DEFAULT; let blocks = length / BLOCK_SIZE; assert!(blocks <= BitAlloc4K::CAP); inner.insert(0..blocks); - DmaAlloc { base, inner } + SharedAlloc { base, inner } } const fn empty() -> Self { diff --git a/src/devices/vsock/src/transport/virtio_pci.rs b/src/devices/vsock/src/transport/virtio_pci.rs index cbaf2524..a2cb0262 100644 --- a/src/devices/vsock/src/transport/virtio_pci.rs +++ b/src/devices/vsock/src/transport/virtio_pci.rs @@ -49,7 +49,7 @@ impl DmaRecord { pub struct VirtioVsock { pub virtio_transport: Box, - dma_allocator: Box, + shared_allocator: Box, timer: Box, rx: RefCell, tx: RefCell, @@ -65,7 +65,7 @@ pub struct VirtioVsock { impl VirtioVsock { pub fn new( mut virtio_transport: Box, - dma_allocator: Box, + shared_allocator: Box, timer: Box, ) -> Result { let transport = virtio_transport.as_mut(); @@ -106,7 +106,7 @@ impl VirtioVsock { VirtQueueLayout::new(QUEUE_SIZE as u16).ok_or(VirtioError::CreateVirtioQueue)?; // We have three queue for vsock (rx, tx and event) let queue_size = queue_layout.size() << 2; - let queue_dma_pages = dma_allocator + let queue_dma_pages = shared_allocator .alloc_pages(queue_size / PAGE_SIZE) .ok_or(VsockTransportError::DmaAllocation)?; dma_record.insert(queue_dma_pages, DmaRecord::new(queue_dma_pages, queue_size)); @@ -135,7 +135,7 @@ impl VirtioVsock { Ok(Self { virtio_transport, - dma_allocator, + shared_allocator, timer, rx: RefCell::new(queue_rx), tx: RefCell::new(queue_tx), @@ -292,7 +292,7 @@ impl VirtioVsock { fn allocate_dma_memory(&mut self, size: usize) -> Option { let dma_size = align_up(size); - let dma_addr = self.dma_allocator.alloc_pages(dma_size / PAGE_SIZE)?; + let dma_addr = self.shared_allocator.alloc_pages(dma_size / PAGE_SIZE)?; let record = DmaRecord::new(dma_addr, dma_size); self.dma_record.insert(dma_addr, record); @@ -303,7 +303,7 @@ impl VirtioVsock { fn free_dma_memory(&mut self, dma_addr: u64) -> Option { let record = self.dma_record.get(&dma_addr)?; - self.dma_allocator + self.shared_allocator .free_pages(record.dma_addr, record.dma_size / PAGE_SIZE); self.dma_record.remove(&dma_addr); @@ -438,7 +438,7 @@ impl VsockTransport for VirtioVsock { impl Drop for VirtioVsock { fn drop(&mut self) { for record in &self.dma_record { - self.dma_allocator + self.shared_allocator .free_pages(record.1.dma_addr, record.1.dma_size / PAGE_SIZE) } } diff --git a/src/devices/vsock/src/transport/vmcall.rs b/src/devices/vsock/src/transport/vmcall.rs index ca31c6d1..7ff0824a 100644 --- a/src/devices/vsock/src/transport/vmcall.rs +++ b/src/devices/vsock/src/transport/vmcall.rs @@ -41,7 +41,7 @@ const VMCALL_SERVICE_MIGTD_GUID: guid::Guid = guid::Guid::from_fields( pub struct VmcallVsock { mid: u64, cid: u64, - dma_allocator: Box, + shared_allocator: Box, timer: Box, // DMA record table dma_record: BTreeMap, @@ -51,7 +51,7 @@ impl VmcallVsock { pub fn new( mid: u64, cid: u64, - dma_allocator: Box, + shared_allocator: Box, timer: Box, ) -> Result { register_callback(VMCALL_VECTOR, vmcall_notification); @@ -59,7 +59,7 @@ impl VmcallVsock { Ok(Self { mid, cid, - dma_allocator, + shared_allocator, timer, dma_record: BTreeMap::new(), }) @@ -229,7 +229,7 @@ impl VmcallVsock { fn allocate_dma(&mut self, size: usize) -> Result<&'static mut [u8]> { let dma_size = align_up(size); let dma_addr = self - .dma_allocator + .shared_allocator .alloc_pages(dma_size / PAGE_SIZE) .ok_or(VsockTransportError::DmaAllocation)?; @@ -237,7 +237,7 @@ impl VmcallVsock { } fn free_dma(&mut self, dma: &[u8]) { - self.dma_allocator + self.shared_allocator .free_pages(dma.as_ptr() as u64, dma.len() / PAGE_SIZE); } } @@ -313,7 +313,7 @@ impl VsockTransport for VmcallVsock { impl Drop for VmcallVsock { fn drop(&mut self) { for record in &self.dma_record { - self.dma_allocator + self.shared_allocator .free_pages(*record.0, *record.1 / PAGE_SIZE) } } diff --git a/src/migtd/src/bin/migtd/main.rs b/src/migtd/src/bin/migtd/main.rs index 682b492e..c510bbbe 100644 --- a/src/migtd/src/bin/migtd/main.rs +++ b/src/migtd/src/bin/migtd/main.rs @@ -96,8 +96,9 @@ fn handle_pre_mig() { #[cfg(all(feature = "coverage", feature = "tdx"))] { const MAX_COVERAGE_DATA_PAGE_COUNT: usize = 0x200; - let mut dma = td_payload::mm::dma::DmaMemory::new(MAX_COVERAGE_DATA_PAGE_COUNT) - .expect("New dma fail."); + let mut shared = + td_payload::mm::shared::SharedMemory::new(MAX_COVERAGE_DATA_PAGE_COUNT) + .expect("New dma fail."); let buffer = dma.as_mut_bytes(); let coverage_len = minicov::get_coverage_data_size(); assert!(coverage_len < MAX_COVERAGE_DATA_PAGE_COUNT * td_paging::PAGE_SIZE); diff --git a/src/migtd/src/driver/serial.rs b/src/migtd/src/driver/serial.rs index e803a587..17b4f60c 100644 --- a/src/migtd/src/driver/serial.rs +++ b/src/migtd/src/driver/serial.rs @@ -4,7 +4,7 @@ use alloc::boxed::Box; use core::sync::atomic::AtomicBool; -use td_payload::mm::dma::{alloc_dma_pages, free_dma_pages}; +use td_payload::mm::shared::{alloc_shared_pages, free_shared_pages}; use virtio_serial::*; use crate::driver::timer; @@ -17,13 +17,13 @@ pub static TIMEOUT: AtomicBool = AtomicBool::new(false); // Implement a DMA allocator for vsock device struct Allocator; -impl DmaPageAllocator for Allocator { +impl SharedPageAllocator for Allocator { fn alloc_pages(&self, page_num: usize) -> Option { - unsafe { alloc_dma_pages(page_num).map(|addr| addr as u64) } + unsafe { alloc_shared_pages(page_num).map(|addr| addr as u64) } } fn free_pages(&self, addr: u64, page_num: usize) { - unsafe { free_dma_pages(addr as usize, page_num) } + unsafe { free_shared_pages(addr as usize, page_num) } } } diff --git a/src/migtd/src/driver/vsock.rs b/src/migtd/src/driver/vsock.rs index 00be3073..97d767fc 100644 --- a/src/migtd/src/driver/vsock.rs +++ b/src/migtd/src/driver/vsock.rs @@ -5,7 +5,7 @@ use core::sync::atomic::AtomicBool; use alloc::boxed::Box; -use td_payload::mm::dma::{alloc_dma_pages, free_dma_pages}; +use td_payload::mm::shared::{alloc_shared_pages, free_shared_pages}; use vsock::{stream::VsockDevice, VsockDmaPageAllocator, VsockTimeout}; use crate::driver::timer; @@ -20,11 +20,11 @@ struct Allocator; impl VsockDmaPageAllocator for Allocator { fn alloc_pages(&self, page_num: usize) -> Option { - unsafe { alloc_dma_pages(page_num).map(|addr| addr as u64) } + unsafe { alloc_shared_pages(page_num).map(|addr| addr as u64) } } fn free_pages(&self, addr: u64, page_num: usize) { - unsafe { free_dma_pages(addr as usize, page_num) } + unsafe { free_shared_pages(addr as usize, page_num) } } } diff --git a/src/migtd/src/lib.rs b/src/migtd/src/lib.rs index f41996b2..ecfb470d 100644 --- a/src/migtd/src/lib.rs +++ b/src/migtd/src/lib.rs @@ -41,7 +41,7 @@ pub extern "C" fn _start(hob: u64, payload: u64) -> ! { heap_size: HEAP_SIZE, stack_size: STACK_SIZE, page_table_size: PT_SIZE, - dma_size: DEFAULT_DMA_SIZE, + shared_memory_size: DEFAULT_SHARED_MEMORY_SIZE, #[cfg(feature = "cet-shstk")] shadow_stack_size: DEFAULT_SHADOW_STACK_SIZE, }; diff --git a/src/migtd/src/migration/session.rs b/src/migtd/src/migration/session.rs index ac75dde8..31cf2809 100644 --- a/src/migtd/src/migration/session.rs +++ b/src/migtd/src/migration/session.rs @@ -5,7 +5,7 @@ use alloc::vec::Vec; use core::mem::size_of; use scroll::Pread; -use td_payload::mm::dma::DmaMemory; +use td_payload::mm::shared::SharedMemory; use td_uefi_pi::hob as hob_lib; use tdx_tdcall::tdx; use zerocopy::AsBytes; @@ -60,8 +60,8 @@ impl MigrationSession { pub fn query() -> Result<()> { // Allocate one shared page for command and response buffer - let mut cmd_mem = DmaMemory::new(1).ok_or(MigrationResult::OutOfResource)?; - let mut rsp_mem = DmaMemory::new(1).ok_or(MigrationResult::OutOfResource)?; + let mut cmd_mem = SharedMemory::new(1).ok_or(MigrationResult::OutOfResource)?; + let mut rsp_mem = SharedMemory::new(1).ok_or(MigrationResult::OutOfResource)?; // Set Migration query command buffer let mut cmd = VmcallServiceCommand::new(cmd_mem.as_mut_bytes(), VMCALL_SERVICE_COMMON_GUID) @@ -117,8 +117,8 @@ impl MigrationSession { match self.state { MigrationState::WaitForRequest => { // Allocate shared page for command and response buffer - let mut cmd_mem = DmaMemory::new(1).ok_or(MigrationResult::OutOfResource)?; - let mut rsp_mem = DmaMemory::new(1).ok_or(MigrationResult::OutOfResource)?; + let mut cmd_mem = SharedMemory::new(1).ok_or(MigrationResult::OutOfResource)?; + let mut rsp_mem = SharedMemory::new(1).ok_or(MigrationResult::OutOfResource)?; // Set Migration wait for request command buffer let mut cmd = @@ -209,8 +209,8 @@ impl MigrationSession { pub fn shutdown() -> Result<()> { // Allocate shared page for command and response buffer - let mut cmd_mem = DmaMemory::new(1).ok_or(MigrationResult::OutOfResource)?; - let mut rsp_mem = DmaMemory::new(1).ok_or(MigrationResult::OutOfResource)?; + let mut cmd_mem = SharedMemory::new(1).ok_or(MigrationResult::OutOfResource)?; + let mut rsp_mem = SharedMemory::new(1).ok_or(MigrationResult::OutOfResource)?; // Set Command let mut cmd = VmcallServiceCommand::new(cmd_mem.as_mut_bytes(), VMCALL_SERVICE_MIGTD_GUID) @@ -233,8 +233,8 @@ impl MigrationSession { }; // Allocate shared page for command and response buffer - let mut cmd_mem = DmaMemory::new(1).ok_or(MigrationResult::OutOfResource)?; - let mut rsp_mem = DmaMemory::new(1).ok_or(MigrationResult::OutOfResource)?; + let mut cmd_mem = SharedMemory::new(1).ok_or(MigrationResult::OutOfResource)?; + let mut rsp_mem = SharedMemory::new(1).ok_or(MigrationResult::OutOfResource)?; // Set Command let mut cmd = VmcallServiceCommand::new(cmd_mem.as_mut_bytes(), VMCALL_SERVICE_MIGTD_GUID) diff --git a/tests/test-td-payload/src/main.rs b/tests/test-td-payload/src/main.rs index b9fed1d6..9ce48127 100644 --- a/tests/test-td-payload/src/main.rs +++ b/tests/test-td-payload/src/main.rs @@ -126,9 +126,9 @@ pub extern "C" fn main() { #[cfg(all(feature = "coverage", feature = "tdx"))] { const MAX_COVERAGE_DATA_PAGE_COUNT: usize = 0x200; - let mut dma = td_payload::mm::dma::DmaMemory::new(MAX_COVERAGE_DATA_PAGE_COUNT) - .expect("New dma fail."); - let buffer = dma.as_mut_bytes(); + let mut shared = td_payload::mm::shared::SharedMemory::new(MAX_COVERAGE_DATA_PAGE_COUNT) + .expect("New shared memory fail."); + let buffer = shared.as_mut_bytes(); let coverage_len = minicov::get_coverage_data_size(); assert!(coverage_len < MAX_COVERAGE_DATA_PAGE_COUNT * td_paging::PAGE_SIZE); minicov::capture_coverage_to_buffer(&mut buffer[0..coverage_len]);