diff --git a/dasp_ring_buffer/Cargo.toml b/dasp_ring_buffer/Cargo.toml index 18d80e7a..68086bd5 100644 --- a/dasp_ring_buffer/Cargo.toml +++ b/dasp_ring_buffer/Cargo.toml @@ -14,5 +14,8 @@ edition = "2018" default = ["std"] std = [] +[dev-dependencies] +itertools = "0.9.0" + [package.metadata.docs.rs] all-features = true diff --git a/dasp_ring_buffer/src/lib.rs b/dasp_ring_buffer/src/lib.rs index 59fae5d8..4987d145 100644 --- a/dasp_ring_buffer/src/lib.rs +++ b/dasp_ring_buffer/src/lib.rs @@ -511,6 +511,12 @@ where self.len } + /// The remaining space left. + #[inline] + pub fn remaining(&self) -> usize { + self.max_len() - self.len + } + /// Whether or not the ring buffer's length is equal to `0`. /// /// Equivalent to `self.len() == 0`. @@ -680,6 +686,7 @@ where /// assert_eq!(ring_buffer.len(), 3); /// } /// ``` + #[inline] pub fn push(&mut self, elem: S::Element) -> Option where S: SliceMut, @@ -726,6 +733,7 @@ where /// assert_eq!(rb.pop(), None); /// } /// ``` + #[inline] pub fn pop(&mut self) -> Option where S: SliceMut, @@ -748,6 +756,226 @@ where Some(old_elem) } + /// Copy data from `other` into `self` efficiently. + /// + /// The function will return an error if there is not enough space to copy `other` into `self`. + /// + /// See `Bounded::extend` for examples. + #[inline] + pub fn try_extend(&mut self, other: O) -> Result<(), ()> + where + S: SliceMut, + O: Slice, + { + let other = other.slice(); + if other.len() > self.remaining() { + return Err(()); + } + let start = self.start_free(); + + if self.is_free_space_contiguous() + || self.max_len() - (self.start + self.len) >= other.len() + { + // new data will fit into end of self.data + (&mut self.data.slice_mut()[start..start + other.len()]).copy_from_slice(other); + } else { + // new data will need to wrap + let max_len = self.max_len(); + let end_amt = max_len - self.start_free(); + (&mut self.data.slice_mut()[start..max_len]).copy_from_slice(&other[..end_amt]); + (&mut self.data.slice_mut()[..other.len() - end_amt]) + .copy_from_slice(&other[end_amt..]); + } + self.len += other.len(); + Ok(()) + } + + /// Copy data from `other` into `self` efficiently. + /// + /// # Panics + /// + /// The function will panic if there is not enough space to copy `other` into `self`. + /// + /// # Examples + /// + /// ``` + /// use dasp_ring_buffer::Bounded; + /// let from = &[2u8, 3]; + /// let mut to = Bounded::from([0u8; 4]); + /// to.push(0); + /// to.push(1); + /// to.extend(&from[..]); + /// assert_eq!(to.iter().copied().collect::>(), vec![0, 1, 2, 3]); + /// ``` + #[inline] + pub fn extend(&mut self, other: O) + where + S: SliceMut, + O: Slice, + { + self.try_extend(other).ok().expect("not enough space") + } + + /// Copy data from `self` into `other` efficiently. + /// + /// The function will return an error if there is not enough data in `self` to fill `other`. + /// + /// See `Bounded::read` for examples. + #[inline] + pub fn try_read(&mut self, mut other: O) -> Result<(), ()> + where + O: SliceMut, + { + let other = other.slice_mut(); + if other.len() > self.len() { + return Err(()); + } + let (first, second) = self.slices(); + if first.len() > other.len() { + other.copy_from_slice(&first[..other.len()]); + } else { + // ensure our code turns into 2 `memcpy`s + other[..first.len()].copy_from_slice(first); + other[first.len()..].copy_from_slice(&second[..self.len - first.len()]); + } + self.start = (self.start + other.len()) % self.max_len(); + self.len -= other.len(); + Ok(()) + } + + /// Copy data from `self` into `other` efficiently. + /// + /// # Panics + /// + /// The function will panic if there is not enough data in `self` to fill `other`. + /// + /// # Examples + /// + /// ``` + /// use dasp_ring_buffer::Bounded; + /// let mut from = Bounded::from([0u8; 4]); + /// from.extend(&[0, 1, 2][..]); + /// let mut to = [0u8; 2]; + /// from.read(&mut to[..]); + /// assert_eq!(from.pop(), Some(2)); + /// assert!(from.pop().is_none()); + /// assert_eq!(to, [0, 1]); + /// ``` + #[inline] + pub fn read(&mut self, other: O) + where + O: SliceMut, + { + self.try_read(other).ok().expect("not enough data") + } + + /// Copy all data in `self` to `other` efficiently. + /// + /// # Examples + /// + /// ``` + /// use dasp_ring_buffer::Bounded; + /// let mut from = Bounded::from_full([0u8, 1]); + /// let mut to = Bounded::from([0u8, 0]); + /// from.copy(&mut to); + /// assert_eq!(to.iter().copied().collect::>(), vec![0, 1]); + /// ``` + #[inline] + pub fn try_copy(&mut self, other: &mut Bounded) -> Result<(), ()> + where + O: SliceMut, + { + if self.len() > other.remaining() { + return Err(()); + } + let other_start = other.start_free(); + // 2 x 2 = 4 cases: both self and other's free space can be disjoint or contiguous. + match (self.is_data_contiguous(), other.is_free_space_contiguous()) { + // single memcpy + (true, true) => { + other.data.slice_mut()[other_start..other_start + self.len] + .copy_from_slice(&self.data.slice()[self.start..self.start + self.len]); + } + // 1 or 2 memcpys + (true, false) => { + let other_remaining_at_end = other.max_len() - other.start_free(); + if self.len <= other_remaining_at_end { + // our data will fit at the end of `other`. + other.data.slice_mut()[other_start..other_start + self.len] + .copy_from_slice(&self.data.slice()[self.start..self.start + self.len]); + } else { + other.data.slice_mut()[other_start..].copy_from_slice( + &self.data.slice()[self.start..self.start + other_remaining_at_end], + ); + other.data.slice_mut()[..self.len - other_remaining_at_end].copy_from_slice( + &self.data.slice() + [self.start + other_remaining_at_end..self.start + self.len], + ); + } + } + // 2 memcpys + (false, true) => { + // copy to the end of our buffer. + let first_len = self.max_len() - self.start; + other.data.slice_mut()[other_start..other_start + first_len] + .copy_from_slice(&self.data.slice()[self.start..]); + other.data.slice_mut()[other_start + first_len..other_start + self.len] + .copy_from_slice(&self.data.slice()[..self.len - first_len]); + } + // 2 or 3 memcpys + (false, false) => { + // see which split comes first + let self_first_len = self.max_len() - self.start; + let other_first_len = other.max_len() - other.start_free(); + if self_first_len <= other_first_len { + // We can copy all our first slice into other in one go. + other.data.slice_mut()[other_start..other_start + self_first_len] + .copy_from_slice(&self.data.slice()[self.start..]); + if self.len <= other_first_len { + // we can fit the whole thing in the first slice of other + other.data.slice_mut() + [other_start + self_first_len..other_start + self.len] + .copy_from_slice(&self.data.slice()[..self.len - self_first_len]); + } else { + other.data.slice_mut()[other_start + self_first_len..].copy_from_slice( + &self.data.slice()[..other_first_len - self_first_len], + ); + other.data.slice_mut()[..self.len - other_first_len].copy_from_slice( + &self.data.slice()[other_first_len - self_first_len..self.start_free()], + ); + } + } else { + // We must split our first slice up. + other.data.slice_mut()[other_start..].copy_from_slice( + &self.data.slice()[self.start..self.start + other_first_len], + ); + let remaining_first = self_first_len - other_first_len; + other.data.slice_mut()[..remaining_first] + .copy_from_slice(&self.data.slice()[self.start + other_first_len..]); + other.data.slice_mut() + [remaining_first..remaining_first + self.len - self_first_len] + .copy_from_slice(&self.data.slice()[..self.len - self_first_len]); + } + } + } + other.len += self.len; + self.len = 0; + Ok(()) + } + + /// Copy all data in `self` to `other` efficiently. + /// + /// # Panics + /// + /// The function will panic if there is not enough space to copy `self` into `other`. + #[inline] + pub fn copy(&mut self, other: &mut Bounded) + where + O: SliceMut, + { + self.try_copy(other).expect("not enough space") + } + /// Produce an iterator that drains the ring buffer by `pop`ping each element one at a time. /// /// Note that only elements yielded by `DrainBounded::next` will be popped from the ring buffer. @@ -762,6 +990,7 @@ where /// assert_eq!(rb.pop(), None); /// } /// ``` + #[inline] pub fn drain(&mut self) -> DrainBounded { DrainBounded { bounded: self } } @@ -812,6 +1041,25 @@ where let Bounded { start, len, data } = self; (start, len, data) } + + /// True if the data in the backing store is contiguous. + #[inline] + fn is_data_contiguous(&self) -> bool { + self.start + self.len <= self.max_len() + } + + /// True if the unused space in the backing store is contiguous. + #[inline] + fn is_free_space_contiguous(&self) -> bool { + self.start == 0 || self.start + self.len > self.max_len() + } + + /// Returns the offset of the element after the last element in the buffer (which might be 0 if + /// we wrapped). + #[inline] + fn start_free(&self) -> usize { + (self.start + self.len) % self.max_len() + } } impl From for Bounded @@ -891,3 +1139,76 @@ where self.bounded.len() } } + +#[cfg(all(test, feature = "std"))] +mod test { + use super::Bounded; + use itertools::iproduct; + + #[test] + fn copy() { + const LIM: usize = 4; + let data = [0u8, 1, 2, 3]; + // To make sure we cover edge cases, test on ALL permutations of length 5 bounded ringbufs. + for (from_start, from_len, to_start, to_len) in iproduct!(0..LIM, 0..LIM, 0..LIM, 0..LIM) { + let mut from = Bounded { + start: from_start, + len: from_len, + data, + }; + let old_from = from.clone(); + let mut to = Bounded { + start: to_start, + len: to_len, + data, + }; + let old_to = to.clone(); + let res = from.try_copy(&mut to); + if to_len > (LIM - from_len) { + assert!(res.is_err()); + } else { + assert!(res.is_ok()); + assert_eq!(from.len, 0); + assert_eq!(to.len, old_to.len + old_from.len); + // check contents (this is harder) + let first = from_start as u8; + let first_end = ((from_start + from_len) % LIM) as u8; + let second = to_start as u8; + let second_end = ((to_start + to_len) % LIM) as u8; + let expected = sequence_mod(second, second_end, LIM as u8) + .chain(sequence_mod(first, first_end, LIM as u8)) + .collect::>(); + let actual = to.iter().copied().collect::>(); + assert_eq!( + expected, actual, + "from({}-{}) to({}-{})", + first, first_end, second, second_end + ); + } + } + } + + fn sequence_mod(start: u8, end: u8, modulo: u8) -> impl Iterator { + struct ModIter { + pos: u8, + end: u8, + modulo: u8, + } + impl Iterator for ModIter { + type Item = u8; + fn next(&mut self) -> Option { + if self.pos == self.end { + return None; + } + let pos = self.pos; + self.pos = (self.pos + 1) % self.modulo; + Some(pos) + } + } + ModIter { + pos: start, + end, + modulo, + } + } +} diff --git a/dasp_ring_buffer/tests/ring_buffer.rs b/dasp_ring_buffer/tests/ring_buffer.rs index 938ec085..64b33471 100644 --- a/dasp_ring_buffer/tests/ring_buffer.rs +++ b/dasp_ring_buffer/tests/ring_buffer.rs @@ -39,3 +39,82 @@ fn test_bounded_get_out_of_range() { let rb = ring_buffer::Bounded::from([0i32; 3]); let _ = rb[0]; } + +#[test] +fn test_extend() { + // Test each branch to look for UB. + // space at end + let mut rb = ring_buffer::Bounded::from([0i32; 4]); + rb.push(1); + rb.extend(&[2, 3][..]); + assert_eq!(rb.slices(), (&[1, 2, 3][..], &[][..])); + + // old data wraps + let mut rb = ring_buffer::Bounded::from([0i32; 4]); + rb.extend(&[1, 2, 3, 4][..]); + rb.pop(); + rb.pop(); + rb.pop(); + rb.push(5); + rb.extend(&[6, 7][..]); + assert_eq!(rb.slices(), (&[4][..], &[5, 6, 7][..])); + + // we wrap + let mut rb = ring_buffer::Bounded::from([0i32; 4]); + rb.extend(&[1, 2, 3][..]); + rb.pop(); + rb.extend(&[4, 5][..]); + assert_eq!(rb.slices(), (&[2, 3, 4][..], &[5][..])); +} + +#[test] +#[should_panic] +fn test_extend_too_big() { + let mut rb = ring_buffer::Bounded::from([0i32; 3]); + rb.extend(&[1, 2][..]); + let other = [0i32; 3]; + rb.extend(&other[..]) +} + +#[test] +fn test_read() { + // Test each branch to look for UB. + // contiguous data + let mut rb = ring_buffer::Bounded::from([0i32; 4]); + rb.extend(&[1, 2, 3][..]); + let mut other = [0i32; 2]; + rb.read(&mut other[..]); + assert_eq!(rb.slices(), (&[3][..], &[][..])); + assert_eq!(other, [1, 2]); + + // not contiguous data, we only draw from first half + let mut rb = ring_buffer::Bounded::from([0i32; 4]); + rb.extend(&[1, 2, 3, 4][..]); + rb.pop(); + rb.pop(); + rb.push(5); + let mut other = [0i32; 1]; + rb.read(&mut other[..]); + assert_eq!(rb.slices(), (&[4][..], &[5][..])); + assert_eq!(other, [3]); + + // not contiguous data, we only draw from both halves + let mut rb = ring_buffer::Bounded::from([0i32; 4]); + rb.extend(&[1, 2, 3, 4][..]); + rb.pop(); + rb.pop(); + rb.push(5); + let mut other = [0i32; 3]; + rb.read(&mut other[..]); + assert_eq!(rb.slices(), (&[][..], &[][..])); + assert_eq!(other, [3, 4, 5]); +} + +#[test] +#[should_panic] +fn test_read_too_small() { + let mut rb = ring_buffer::Bounded::from([0i32; 3]); + rb.extend(&[1, 2][..]); + let mut other = [0i32; 3]; + rb.read(&mut other[..]) +}