From 6785a9b41341abeeb2b949efe91a3924d916dc6d Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Mon, 13 Feb 2023 00:39:13 +0900 Subject: [PATCH] bpf: Support atomic CAS --- src/imp/core_atomic.rs | 11 + src/imp/fallback/bpf_spin_lock.rs | 702 ++++++++++++++++++++++++++++++ src/imp/float.rs | 2 + src/imp/mod.rs | 36 +- src/lib.rs | 119 ++++- 5 files changed, 868 insertions(+), 2 deletions(-) create mode 100644 src/imp/fallback/bpf_spin_lock.rs diff --git a/src/imp/core_atomic.rs b/src/imp/core_atomic.rs index d53e79acd..109c3d412 100644 --- a/src/imp/core_atomic.rs +++ b/src/imp/core_atomic.rs @@ -5,10 +5,12 @@ use core::sync::atomic::Ordering; +#[cfg(not(target_arch = "bpf"))] #[repr(transparent)] pub(crate) struct AtomicBool { inner: core::sync::atomic::AtomicBool, } +#[cfg(not(target_arch = "bpf"))] impl AtomicBool { #[inline] pub(crate) const fn new(v: bool) -> Self { @@ -43,9 +45,11 @@ impl AtomicBool { self.inner.store(val, order); } } +#[cfg(not(target_arch = "bpf"))] #[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))] #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))] no_fetch_ops_impl!(AtomicBool, bool); +#[cfg(not(target_arch = "bpf"))] #[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))] #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))] impl AtomicBool { @@ -78,6 +82,7 @@ impl AtomicBool { self.inner.compare_exchange_weak(current, new, success, failure) } } +#[cfg(not(target_arch = "bpf"))] impl core::ops::Deref for AtomicBool { type Target = core::sync::atomic::AtomicBool; #[inline] @@ -411,12 +416,18 @@ macro_rules! atomic_int { atomic_int!(int, AtomicIsize, isize); atomic_int!(uint, AtomicUsize, usize); +#[cfg(not(target_arch = "bpf"))] atomic_int!(int, AtomicI8, i8); +#[cfg(not(target_arch = "bpf"))] atomic_int!(uint, AtomicU8, u8); +#[cfg(not(target_arch = "bpf"))] atomic_int!(int, AtomicI16, i16); +#[cfg(not(target_arch = "bpf"))] atomic_int!(uint, AtomicU16, u16); +#[cfg(not(target_arch = "bpf"))] #[cfg(not(target_pointer_width = "16"))] // cfg(target_has_atomic_load_store = "32") atomic_int!(int, AtomicI32, i32); +#[cfg(not(target_arch = "bpf"))] #[cfg(not(target_pointer_width = "16"))] // cfg(target_has_atomic_load_store = "32") atomic_int!(uint, AtomicU32, u32); #[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_64)))] diff --git a/src/imp/fallback/bpf_spin_lock.rs b/src/imp/fallback/bpf_spin_lock.rs new file mode 100644 index 000000000..b45c9dcc1 --- /dev/null +++ b/src/imp/fallback/bpf_spin_lock.rs @@ -0,0 +1,702 @@ +// Atomic implementation on BPF. + +#[cfg(not(feature = "fallback"))] +pub(crate) use super::core_atomic::{AtomicI64, AtomicIsize, AtomicPtr, AtomicU64, AtomicUsize}; +pub(crate) use fallback::{ + AtomicBool, AtomicI16, AtomicI32, AtomicI8, AtomicU16, AtomicU32, AtomicU8, +}; +#[cfg(feature = "fallback")] +pub(crate) use fallback::{ + AtomicI128, AtomicI64, AtomicIsize, AtomicPtr, AtomicU128, AtomicU64, AtomicUsize, +}; + +mod fallback { + // Fallback implementation using global locks on BPF. + // + // This implementation uses spinlock for global locks. + // + // Note that we cannot use a lock per atomic type, since the in-memory representation of the atomic + // type and the value type must be the same. + + use core::{ + cell::UnsafeCell, + intrinsics, + sync::atomic::{self, Ordering}, + }; + + use crate::utils::{Backoff, CachePadded}; + + const IS_ALWAYS_LOCK_FREE: bool = false; + + #[inline] + unsafe fn atomic_swap(dst: *mut u64, val: u64, order: Ordering) -> u64 { + // SAFETY: the caller must uphold the safety contract for `atomic_swap`. + unsafe { + match order { + Ordering::Acquire => intrinsics::atomic_xchg_acquire(dst, val), + Ordering::Release => intrinsics::atomic_xchg_release(dst, val), + Ordering::AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), + Ordering::Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), + Ordering::SeqCst => intrinsics::atomic_xchg_seqcst(dst, val), + _ => unreachable!("{:?}", order), + } + } + } + #[inline] + fn swap(a: &atomic::AtomicU64, val: u64, order: Ordering) -> u64 { + // SAFETY: atomic::AtomicU64 is #[repr(C)] and internally UnsafeCell<$int_type>. + // See also https://github.com/rust-lang/rust/pull/66705 and + // https://github.com/rust-lang/rust/issues/66136#issuecomment-557867116. + let dst = unsafe { + (*(a as *const atomic::AtomicU64 as *const core::cell::UnsafeCell)).get() + }; + // SAFETY: any data races are prevented by atomic intrinsics and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { atomic_swap(dst, val, order) } + } + + struct Spinlock { + state: atomic::AtomicU64, + } + + impl Spinlock { + #[inline] + const fn new() -> Self { + Self { state: atomic::AtomicU64::new(0) } + } + + #[inline] + fn lock(&self) -> SpinlockGuard<'_> { + let mut backoff = Backoff::new(); + loop { + if swap(&self.state, 1, Ordering::Acquire) == 0 { + return SpinlockGuard { lock: self }; + } + + while self.state.load(Ordering::Relaxed) == 1 { + backoff.snooze(); + } + } + } + } + + #[must_use] + struct SpinlockGuard<'a> { + /// The parent lock. + lock: &'a Spinlock, + } + + impl Drop for SpinlockGuard<'_> { + #[inline] + fn drop(&mut self) { + self.lock.state.store(0, Ordering::Release); + } + } + + // Adapted from https://github.com/crossbeam-rs/crossbeam/blob/crossbeam-utils-0.8.7/crossbeam-utils/src/atomic/atomic_cell.rs#L969-L1016. + #[inline] + fn lock(addr: usize) -> SpinlockGuard<'static> { + // The number of locks is a prime number because we want to make sure `addr % LEN` gets + // dispersed across all locks. + const LEN: usize = 67; + #[allow(clippy::declare_interior_mutable_const)] + const L: CachePadded = CachePadded::new(Spinlock::new()); + static LOCKS: [CachePadded; LEN] = [ + L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, + L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, + L, L, L, L, L, L, L, L, L, + ]; + + // If the modulus is a constant number, the compiler will use crazy math to transform this into + // a sequence of cheap arithmetic operations rather than using the slow modulo instruction. + let lock = &LOCKS[addr % LEN]; + lock.lock() + } + + #[repr(C, align(1))] + pub(crate) struct AtomicBool { + v: UnsafeCell, + } + + // Send is implicitly implemented. + // SAFETY: any data races are prevented by the lock. + unsafe impl Sync for AtomicBool {} + + #[cfg(feature = "fallback")] + no_fetch_ops_impl!(AtomicBool, bool); + impl AtomicBool { + #[inline] + pub(crate) const fn new(v: bool) -> Self { + Self { v: UnsafeCell::new(v as u8) } + } + + #[inline] + pub(crate) fn is_lock_free() -> bool { + Self::is_always_lock_free() + } + #[inline] + pub(crate) const fn is_always_lock_free() -> bool { + IS_ALWAYS_LOCK_FREE + } + + #[inline] + pub(crate) fn get_mut(&mut self) -> &mut bool { + // SAFETY: the mutable reference guarantees unique ownership. + unsafe { &mut *(self.v.get() as *mut bool) } + } + + #[inline] + pub(crate) fn into_inner(self) -> bool { + self.v.into_inner() != 0 + } + + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + pub(crate) fn load(&self, order: Ordering) -> bool { + crate::utils::assert_load_ordering(order); + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + self.v.get().read() != 0 + } + } + + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + pub(crate) fn store(&self, val: bool, order: Ordering) { + crate::utils::assert_store_ordering(order); + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + self.v.get().write(val as u8); + } + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn swap(&self, val: bool, _order: Ordering) -> bool { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + self.v.get().replace(val as u8) != 0 + } + } + + #[cfg(feature = "fallback")] + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + pub(crate) fn compare_exchange( + &self, + current: bool, + new: bool, + success: Ordering, + failure: Ordering, + ) -> Result { + crate::utils::assert_compare_exchange_ordering(success, failure); + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + if result == current as u8 { + self.v.get().write(new as u8); + Ok(result != 0) + } else { + Err(result != 0) + } + } + } + + #[cfg(feature = "fallback")] + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + pub(crate) fn compare_exchange_weak( + &self, + current: bool, + new: bool, + success: Ordering, + failure: Ordering, + ) -> Result { + self.compare_exchange(current, new, success, failure) + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn fetch_and(&self, val: bool, _order: Ordering) -> bool { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + self.v.get().write(result & val as u8); + result != 0 + } + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn fetch_nand(&self, val: bool, order: Ordering) -> bool { + if val { + // !(x & true) == !x + // We must invert the bool. + self.fetch_xor(true, order) + } else { + // !(x & false) == true + // We must set the bool to true. + self.swap(true, order) + } + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn fetch_or(&self, val: bool, _order: Ordering) -> bool { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + self.v.get().write(result | val as u8); + result != 0 + } + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn fetch_xor(&self, val: bool, _order: Ordering) -> bool { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + self.v.get().write(result ^ val as u8); + result != 0 + } + } + } + + #[cfg(feature = "fallback")] + #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))] + #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))] + #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))] + #[cfg_attr(target_pointer_width = "128", repr(C, align(16)))] + pub(crate) struct AtomicPtr { + p: UnsafeCell<*mut T>, + } + + #[cfg(feature = "fallback")] + // SAFETY: any data races are prevented by the lock. + unsafe impl Send for AtomicPtr {} + #[cfg(feature = "fallback")] + // SAFETY: any data races are prevented by the lock. + unsafe impl Sync for AtomicPtr {} + + #[cfg(feature = "fallback")] + impl AtomicPtr { + #[inline] + pub(crate) const fn new(p: *mut T) -> Self { + Self { p: UnsafeCell::new(p) } + } + + #[inline] + pub(crate) fn is_lock_free() -> bool { + Self::is_always_lock_free() + } + #[inline] + pub(crate) const fn is_always_lock_free() -> bool { + IS_ALWAYS_LOCK_FREE + } + + #[inline] + pub(crate) fn get_mut(&mut self) -> &mut *mut T { + // SAFETY: the mutable reference guarantees unique ownership. + // (UnsafeCell::get_mut requires Rust 1.50) + unsafe { &mut *self.p.get() } + } + + #[inline] + pub(crate) fn into_inner(self) -> *mut T { + self.p.into_inner() + } + + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + pub(crate) fn load(&self, order: Ordering) -> *mut T { + crate::utils::assert_load_ordering(order); + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.p.get() as usize); + self.p.get().read() + } + } + + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + pub(crate) fn store(&self, ptr: *mut T, order: Ordering) { + crate::utils::assert_store_ordering(order); + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.p.get() as usize); + self.p.get().write(ptr); + } + } + + #[inline] + pub(crate) fn swap(&self, ptr: *mut T, _order: Ordering) -> *mut T { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.p.get() as usize); + self.p.get().replace(ptr) + } + } + + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + pub(crate) fn compare_exchange( + &self, + current: *mut T, + new: *mut T, + success: Ordering, + failure: Ordering, + ) -> Result<*mut T, *mut T> { + crate::utils::assert_compare_exchange_ordering(success, failure); + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.p.get() as usize); + let result = self.p.get().read(); + if result == current { + self.p.get().write(new); + Ok(result) + } else { + Err(result) + } + } + } + + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + pub(crate) fn compare_exchange_weak( + &self, + current: *mut T, + new: *mut T, + success: Ordering, + failure: Ordering, + ) -> Result<*mut T, *mut T> { + self.compare_exchange(current, new, success, failure) + } + } + + macro_rules! atomic_int { + (uint, $atomic_type:ident, $int_type:ident, $align:expr) => { + #[repr(C, align($align))] + pub(crate) struct $atomic_type { + v: UnsafeCell<$int_type>, + } + + // Send is implicitly implemented. + // SAFETY: any data races are prevented by the lock. + unsafe impl Sync for $atomic_type {} + + #[cfg(feature = "fallback")] + no_fetch_ops_impl!($atomic_type, $int_type); + impl $atomic_type { + #[inline] + pub(crate) const fn new(v: $int_type) -> Self { + Self { v: UnsafeCell::new(v) } + } + + #[inline] + pub(crate) fn is_lock_free() -> bool { + Self::is_always_lock_free() + } + #[inline] + pub(crate) const fn is_always_lock_free() -> bool { + IS_ALWAYS_LOCK_FREE + } + + #[inline] + pub(crate) fn get_mut(&mut self) -> &mut $int_type { + // SAFETY: the mutable reference guarantees unique ownership. + // (UnsafeCell::get_mut requires Rust 1.50) + unsafe { &mut *self.v.get() } + } + + #[inline] + pub(crate) fn into_inner(self) -> $int_type { + self.v.into_inner() + } + + #[inline] + #[cfg_attr( + all(debug_assertions, not(portable_atomic_no_track_caller)), + track_caller + )] + pub(crate) fn load(&self, order: Ordering) -> $int_type { + crate::utils::assert_load_ordering(order); + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + self.v.get().read() + } + } + + #[inline] + #[cfg_attr( + all(debug_assertions, not(portable_atomic_no_track_caller)), + track_caller + )] + pub(crate) fn store(&self, val: $int_type, order: Ordering) { + crate::utils::assert_store_ordering(order); + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + self.v.get().write(val) + } + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn swap(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + self.v.get().replace(val) + } + } + + #[cfg(feature = "fallback")] + #[inline] + #[cfg_attr( + all(debug_assertions, not(portable_atomic_no_track_caller)), + track_caller + )] + pub(crate) fn compare_exchange( + &self, + current: $int_type, + new: $int_type, + success: Ordering, + failure: Ordering, + ) -> Result<$int_type, $int_type> { + crate::utils::assert_compare_exchange_ordering(success, failure); + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + if result == current { + self.v.get().write(new); + Ok(result) + } else { + Err(result) + } + } + } + + #[cfg(feature = "fallback")] + #[inline] + #[cfg_attr( + all(debug_assertions, not(portable_atomic_no_track_caller)), + track_caller + )] + pub(crate) fn compare_exchange_weak( + &self, + current: $int_type, + new: $int_type, + success: Ordering, + failure: Ordering, + ) -> Result<$int_type, $int_type> { + self.compare_exchange(current, new, success, failure) + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn fetch_add(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + self.v.get().write(result.wrapping_add(val)); + result + } + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn fetch_sub(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + self.v.get().write(result.wrapping_sub(val)); + result + } + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn fetch_and(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + self.v.get().write(result & val); + result + } + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn fetch_nand(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + self.v.get().write(!(result & val)); + result + } + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn fetch_or(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + self.v.get().write(result | val); + result + } + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn fetch_xor(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + self.v.get().write(result ^ val); + result + } + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn fetch_max(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + self.v.get().write(core::cmp::max(result, val)); + result + } + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn fetch_min(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + self.v.get().write(core::cmp::min(result, val)); + result + } + } + + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn fetch_not(&self, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + self.v.get().write(!result); + result + } + } + #[cfg(feature = "fallback")] + #[inline] + pub(crate) fn not(&self, order: Ordering) { + self.fetch_not(order); + } + } + }; + (int, $atomic_type:ident, $int_type:ident, $align:expr) => { + atomic_int!(uint, $atomic_type, $int_type, $align); + #[cfg(feature = "fallback")] + impl $atomic_type { + #[inline] + pub(crate) fn fetch_neg(&self, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by the lock and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + let _guard = lock(self.v.get() as usize); + let result = self.v.get().read(); + self.v.get().write(result.wrapping_neg()); + result + } + } + #[inline] + pub(crate) fn neg(&self, order: Ordering) { + self.fetch_neg(order); + } + } + }; + } + + #[cfg(feature = "fallback")] + #[cfg(target_pointer_width = "64")] + atomic_int!(int, AtomicIsize, isize, 8); + #[cfg(feature = "fallback")] + #[cfg(target_pointer_width = "64")] + atomic_int!(uint, AtomicUsize, usize, 8); + atomic_int!(int, AtomicI8, i8, 1); + atomic_int!(uint, AtomicU8, u8, 1); + atomic_int!(int, AtomicI16, i16, 2); + atomic_int!(uint, AtomicU16, u16, 2); + atomic_int!(int, AtomicI32, i32, 4); + atomic_int!(uint, AtomicU32, u32, 4); + #[cfg(feature = "fallback")] + atomic_int!(int, AtomicI64, i64, 8); + #[cfg(feature = "fallback")] + atomic_int!(uint, AtomicU64, u64, 8); + #[cfg(feature = "fallback")] + atomic_int!(int, AtomicI128, i128, 16); + #[cfg(feature = "fallback")] + atomic_int!(uint, AtomicU128, u128, 16); +} + +#[cfg(test)] +mod tests { + use super::*; + + test_atomic_int!(i8); + test_atomic_int!(u8); + test_atomic_int!(i16); + test_atomic_int!(u16); + test_atomic_int!(i32); + test_atomic_int!(u32); + test_atomic_int!(i64); + test_atomic_int!(u64); + test_atomic_int!(i128); + test_atomic_int!(u128); +} diff --git a/src/imp/float.rs b/src/imp/float.rs index 446131656..bd02e016a 100644 --- a/src/imp/float.rs +++ b/src/imp/float.rs @@ -76,6 +76,7 @@ macro_rules! atomic_float { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -86,6 +87,7 @@ macro_rules! atomic_float { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] impl $atomic_type { diff --git a/src/imp/mod.rs b/src/imp/mod.rs index 480ef31f8..44730020a 100644 --- a/src/imp/mod.rs +++ b/src/imp/mod.rs @@ -3,7 +3,13 @@ // cfg(target_has_atomic_load_store = "ptr") #[cfg(not(any( - portable_atomic_no_atomic_load_store, + all( + portable_atomic_no_atomic_load_store, + not(all( + target_arch = "bpf", + not(any(feature = "fallback", feature = "critical-section")), + )) + ), portable_atomic_unsafe_assume_single_core, target_arch = "avr", target_arch = "msp430", @@ -94,6 +100,7 @@ mod x86; // Lock-based fallback implementations #[cfg(feature = "fallback")] +#[cfg(not(target_arch = "bpf"))] #[cfg(any( test, not(any( @@ -121,6 +128,11 @@ mod x86; #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))] mod fallback; +#[cfg(target_arch = "bpf")] +#[cfg(not(feature = "critical-section"))] +#[path = "fallback/bpf_spin_lock.rs"] +mod bpf; + // ----------------------------------------------------------------------------- // Critical section based fallback implementations @@ -164,6 +176,7 @@ pub(crate) mod float; portable_atomic_unsafe_assume_single_core, target_arch = "avr", target_arch = "msp430", + target_arch = "bpf", )))] #[cfg_attr( portable_atomic_no_cfg_target_has_atomic, @@ -199,6 +212,12 @@ pub(crate) use self::riscv::{ pub(crate) use self::interrupt::{ AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize, }; +// bpf +#[cfg(target_arch = "bpf")] +#[cfg(not(feature = "critical-section"))] +pub(crate) use self::bpf::{ + AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize, +}; // Atomic{I,U}32 #[cfg(not(any( @@ -206,6 +225,7 @@ pub(crate) use self::interrupt::{ portable_atomic_unsafe_assume_single_core, target_arch = "avr", target_arch = "msp430", + target_arch = "bpf", )))] #[cfg_attr( portable_atomic_no_cfg_target_has_atomic, @@ -236,11 +256,16 @@ pub(crate) use self::riscv::{AtomicI32, AtomicU32}; #[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(portable_atomic_no_atomic_cas))] #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(not(target_has_atomic = "ptr")))] pub(crate) use self::interrupt::{AtomicI32, AtomicU32}; +// bpf +#[cfg(target_arch = "bpf")] +#[cfg(not(feature = "critical-section"))] +pub(crate) use self::bpf::{AtomicI32, AtomicU32}; // Atomic{I,U}64 #[cfg(not(any( portable_atomic_no_atomic_load_store, portable_atomic_unsafe_assume_single_core, + target_arch = "bpf", )))] #[cfg_attr( portable_atomic_no_cfg_target_has_atomic, @@ -301,6 +326,10 @@ pub(crate) use self::fallback::{AtomicI64, AtomicU64}; #[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(portable_atomic_no_atomic_cas))] #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(not(target_has_atomic = "ptr")))] pub(crate) use self::interrupt::{AtomicI64, AtomicU64}; +// bpf +#[cfg(target_arch = "bpf")] +#[cfg(not(feature = "critical-section"))] +pub(crate) use self::bpf::{AtomicI64, AtomicU64}; // Atomic{I,U}128 // aarch64 stable @@ -369,3 +398,8 @@ pub(crate) use self::fallback::{AtomicI128, AtomicU128}; #[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(portable_atomic_no_atomic_cas))] #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(not(target_has_atomic = "ptr")))] pub(crate) use self::interrupt::{AtomicI128, AtomicU128}; +// bpf +#[cfg(feature = "fallback")] +#[cfg(target_arch = "bpf")] +#[cfg(not(feature = "critical-section"))] +pub(crate) use self::bpf::{AtomicI128, AtomicU128}; diff --git a/src/lib.rs b/src/lib.rs index ccd2221b7..542af501c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -226,6 +226,7 @@ See also [the `atomic128` module's readme](https://github.com/taiki-e/portable-a ), feature(asm_experimental_arch) )] +#![cfg_attr(target_arch = "bpf", feature(core_intrinsics))] // Old nightly only // These features are already stabilized or have already been removed from compilers, // and can safely be enabled for old nightly as long as version detection works. @@ -665,6 +666,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -675,6 +677,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -725,7 +728,8 @@ impl AtomicBool { portable_atomic_unsafe_assume_single_core, feature = "critical-section", target_arch = "avr", - target_arch = "msp430", + target_arch = "msp430" + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -736,6 +740,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -794,6 +799,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -804,6 +810,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -856,6 +863,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -866,6 +874,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -919,6 +928,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -929,6 +939,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -974,6 +985,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -984,6 +996,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -1028,6 +1041,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -1038,6 +1052,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -1091,6 +1106,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -1101,6 +1117,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -1145,6 +1162,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -1155,6 +1173,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -1208,6 +1227,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -1218,6 +1238,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -1258,6 +1279,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -1268,6 +1290,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -1317,6 +1340,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -1327,6 +1351,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -1389,6 +1414,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -1399,6 +1425,7 @@ impl AtomicBool { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -1662,6 +1689,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -1672,6 +1700,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -1716,6 +1745,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -1726,6 +1756,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -1784,6 +1815,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -1794,6 +1826,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -1870,6 +1903,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -1880,6 +1914,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -1960,6 +1995,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -1970,6 +2006,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T { @@ -2015,6 +2052,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -2025,6 +2063,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T { @@ -2066,6 +2105,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -2076,6 +2116,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T { @@ -2129,6 +2170,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -2139,6 +2181,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T { @@ -2207,6 +2250,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -2217,6 +2261,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T { @@ -2283,6 +2328,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -2293,6 +2339,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T { @@ -2358,6 +2405,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -2368,6 +2416,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T { @@ -2400,6 +2449,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -2410,6 +2460,7 @@ impl AtomicPtr { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] fn as_atomic_usize(&self) -> &AtomicUsize { @@ -2702,6 +2753,7 @@ assert_eq!(some_var.swap(10, Ordering::Relaxed), 5); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -2712,6 +2764,7 @@ assert_eq!(some_var.swap(10, Ordering::Relaxed), 5); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -2767,6 +2820,7 @@ assert_eq!(some_var.load(Ordering::Relaxed), 10); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -2777,6 +2831,7 @@ assert_eq!(some_var.load(Ordering::Relaxed), 10); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -2841,6 +2896,7 @@ loop { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -2851,6 +2907,7 @@ loop { feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -2897,6 +2954,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 10); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -2907,6 +2965,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 10); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -2948,6 +3007,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 10); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -2958,6 +3018,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 10); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -2993,6 +3054,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 10); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3003,6 +3065,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 10); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3044,6 +3107,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 10); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3054,6 +3118,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 10); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3092,6 +3157,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b100001); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3102,6 +3168,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b100001); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3149,6 +3216,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b100001); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3159,6 +3227,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b100001); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3197,6 +3266,7 @@ assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31)); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3207,6 +3277,7 @@ assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31)); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3245,6 +3316,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b111111); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3255,6 +3327,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b111111); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3302,6 +3375,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b111111); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3312,6 +3386,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b111111); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3350,6 +3425,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b011110); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3360,6 +3436,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b011110); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3407,6 +3484,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b011110); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3417,6 +3495,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b011110); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3475,6 +3554,7 @@ assert_eq!(x.load(Ordering::SeqCst), 9); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3485,6 +3565,7 @@ assert_eq!(x.load(Ordering::SeqCst), 9); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3553,6 +3634,7 @@ assert!(max_foo == 42); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3563,6 +3645,7 @@ assert!(max_foo == 42); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3614,6 +3697,7 @@ assert_eq!(min_foo, 12); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3624,6 +3708,7 @@ assert_eq!(min_foo, 12); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3659,6 +3744,7 @@ assert_eq!(foo.load(Ordering::Relaxed), !0); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3669,6 +3755,7 @@ assert_eq!(foo.load(Ordering::Relaxed), !0); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3708,6 +3795,7 @@ assert_eq!(foo.load(Ordering::Relaxed), !0); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3718,6 +3806,7 @@ assert_eq!(foo.load(Ordering::Relaxed), !0); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3766,6 +3855,7 @@ assert_eq!(foo.load(Ordering::Relaxed), 5); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3776,6 +3866,7 @@ assert_eq!(foo.load(Ordering::Relaxed), 5); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3816,6 +3907,7 @@ assert_eq!(foo.load(Ordering::Relaxed), 5); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -3826,6 +3918,7 @@ assert_eq!(foo.load(Ordering::Relaxed), 5); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -3992,6 +4085,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -4002,6 +4096,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -4035,6 +4130,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -4045,6 +4141,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -4087,6 +4184,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -4097,6 +4195,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -4128,6 +4227,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -4138,6 +4238,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -4161,6 +4262,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -4171,6 +4273,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -4215,6 +4318,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -4225,6 +4329,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -4267,6 +4372,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -4277,6 +4383,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -4303,6 +4410,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -4313,6 +4421,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -4336,6 +4445,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -4346,6 +4456,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -4370,6 +4481,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[cfg_attr( @@ -4380,6 +4492,7 @@ This type has the same in-memory representation as the underlying floating point feature = "critical-section", target_arch = "avr", target_arch = "msp430", + all(target_arch = "bpf", feature = "fallback"), )) )] #[inline] @@ -4538,6 +4651,7 @@ atomic_int!(AtomicU64, u64, 8); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + target_arch = "bpf", )) )] #[cfg_attr( @@ -4548,6 +4662,7 @@ atomic_int!(AtomicU64, u64, 8); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + target_arch = "bpf", )) )] atomic_int!(AtomicI128, i128, 16); @@ -4586,6 +4701,7 @@ atomic_int!(AtomicI128, i128, 16); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + target_arch = "bpf", )) )] #[cfg_attr( @@ -4596,6 +4712,7 @@ atomic_int!(AtomicI128, i128, 16); feature = "critical-section", target_arch = "avr", target_arch = "msp430", + target_arch = "bpf", )) )] atomic_int!(AtomicU128, u128, 16);