diff --git a/futures-util/Cargo.toml b/futures-util/Cargo.toml index 68dbd86b7..454c55067 100644 --- a/futures-util/Cargo.toml +++ b/futures-util/Cargo.toml @@ -48,6 +48,7 @@ pin-project-lite = "0.2.6" futures = { path = "../futures", features = ["async-await", "thread-pool"] } futures-test = { path = "../futures-test" } tokio = "0.1.11" +futures-timer = "3.0.3" [package.metadata.docs.rs] all-features = true diff --git a/futures-util/src/lib.rs b/futures-util/src/lib.rs index abe35e68f..c5af69609 100644 --- a/futures-util/src/lib.rs +++ b/futures-util/src/lib.rs @@ -10,7 +10,6 @@ ) ))] #![warn(missing_docs, unsafe_op_in_unsafe_fn)] -#![cfg_attr(feature = "write-all-vectored", feature(io_slice_advance))] #![cfg_attr(docsrs, feature(doc_cfg))] #![allow(clippy::needless_borrow)] // https://github.com/rust-lang/futures-rs/pull/2558#issuecomment-1030745203 #![allow(clippy::arc_with_non_send_sync)] // false positive https://github.com/rust-lang/rust-clippy/issues/11076 diff --git a/futures-util/src/stream/futures_unordered/iter.rs b/futures-util/src/stream/futures_unordered/iter.rs index 20248c70f..34fc1010d 100644 --- a/futures-util/src/stream/futures_unordered/iter.rs +++ b/futures-util/src/stream/futures_unordered/iter.rs @@ -1,16 +1,12 @@ -use super::task::Task; -use super::FuturesUnordered; -use core::marker::PhantomData; +use crate::stream::futures_unordered_internal; use core::pin::Pin; -use core::ptr; -use core::sync::atomic::Ordering::Relaxed; + +use super::DummyStruct; /// Mutable iterator over all futures in the unordered set. #[derive(Debug)] pub struct IterPinMut<'a, Fut> { - pub(super) task: *const Task, - pub(super) len: usize, - pub(super) _marker: PhantomData<&'a mut FuturesUnordered>, + pub(super) inner: futures_unordered_internal::IterPinMut<'a, (), Fut, DummyStruct>, } /// Mutable iterator over all futures in the unordered set. @@ -20,10 +16,7 @@ pub struct IterMut<'a, Fut: Unpin>(pub(super) IterPinMut<'a, Fut>); /// Immutable iterator over all futures in the unordered set. #[derive(Debug)] pub struct IterPinRef<'a, Fut> { - pub(super) task: *const Task, - pub(super) len: usize, - pub(super) pending_next_all: *mut Task, - pub(super) _marker: PhantomData<&'a FuturesUnordered>, + pub(super) inner: futures_unordered_internal::IterPinRef<'a, (), Fut, DummyStruct>, } /// Immutable iterator over all the futures in the unordered set. @@ -33,42 +26,18 @@ pub struct Iter<'a, Fut: Unpin>(pub(super) IterPinRef<'a, Fut>); /// Owned iterator over all futures in the unordered set. #[derive(Debug)] pub struct IntoIter { - pub(super) len: usize, - pub(super) inner: FuturesUnordered, + pub(super) inner: futures_unordered_internal::IntoIter<(), Fut, DummyStruct>, } impl Iterator for IntoIter { type Item = Fut; fn next(&mut self) -> Option { - // `head_all` can be accessed directly and we don't need to spin on - // `Task::next_all` since we have exclusive access to the set. - let task = self.inner.head_all.get_mut(); - - if (*task).is_null() { - return None; - } - - unsafe { - // Moving out of the future is safe because it is `Unpin` - let future = (*(**task).future.get()).take().unwrap(); - - // Mutable access to a previously shared `FuturesUnordered` implies - // that the other threads already released the object before the - // current thread acquired it, so relaxed ordering can be used and - // valid `next_all` checks can be skipped. - let next = (**task).next_all.load(Relaxed); - *task = next; - if !task.is_null() { - *(**task).prev_all.get() = ptr::null_mut(); - } - self.len -= 1; - Some(future) - } + self.inner.next().map(|opt| opt.1) } fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) + (self.inner.len, Some(self.inner.len)) } } @@ -78,26 +47,11 @@ impl<'a, Fut> Iterator for IterPinMut<'a, Fut> { type Item = Pin<&'a mut Fut>; fn next(&mut self) -> Option { - if self.task.is_null() { - return None; - } - - unsafe { - let future = (*(*self.task).future.get()).as_mut().unwrap(); - - // Mutable access to a previously shared `FuturesUnordered` implies - // that the other threads already released the object before the - // current thread acquired it, so relaxed ordering can be used and - // valid `next_all` checks can be skipped. - let next = (*self.task).next_all.load(Relaxed); - self.task = next; - self.len -= 1; - Some(Pin::new_unchecked(future)) - } + self.inner.next().map(|opt| opt.1) } fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) + (self.inner.len, Some(self.inner.len)) } } @@ -121,26 +75,11 @@ impl<'a, Fut> Iterator for IterPinRef<'a, Fut> { type Item = Pin<&'a Fut>; fn next(&mut self) -> Option { - if self.task.is_null() { - return None; - } - - unsafe { - let future = (*(*self.task).future.get()).as_ref().unwrap(); - - // Relaxed ordering can be used since acquire ordering when - // `head_all` was initially read for this iterator implies acquire - // ordering for all previously inserted nodes (and we don't need to - // read `len_all` again for any other nodes). - let next = (*self.task).spin_next_all(self.pending_next_all, Relaxed); - self.task = next; - self.len -= 1; - Some(Pin::new_unchecked(future)) - } + self.inner.next().map(|opt| opt.1) } fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) + (self.inner.len, Some(self.inner.len)) } } @@ -159,14 +98,3 @@ impl<'a, Fut: Unpin> Iterator for Iter<'a, Fut> { } impl ExactSizeIterator for Iter<'_, Fut> {} - -// SAFETY: we do nothing thread-local and there is no interior mutability, -// so the usual structural `Send`/`Sync` apply. -unsafe impl Send for IterPinRef<'_, Fut> {} -unsafe impl Sync for IterPinRef<'_, Fut> {} - -unsafe impl Send for IterPinMut<'_, Fut> {} -unsafe impl Sync for IterPinMut<'_, Fut> {} - -unsafe impl Send for IntoIter {} -unsafe impl Sync for IntoIter {} diff --git a/futures-util/src/stream/futures_unordered/mod.rs b/futures-util/src/stream/futures_unordered/mod.rs index 2d4f15158..91231344b 100644 --- a/futures-util/src/stream/futures_unordered/mod.rs +++ b/futures-util/src/stream/futures_unordered/mod.rs @@ -2,33 +2,22 @@ //! //! This module is only available when the `std` or `alloc` feature of this //! library is activated, and it is activated by default. +//! -use crate::task::AtomicWaker; -use alloc::sync::{Arc, Weak}; -use core::cell::UnsafeCell; use core::fmt::{self, Debug}; use core::iter::FromIterator; -use core::marker::PhantomData; -use core::mem; use core::pin::Pin; -use core::ptr; -use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst}; -use core::sync::atomic::{AtomicBool, AtomicPtr}; use futures_core::future::Future; use futures_core::stream::{FusedStream, Stream}; use futures_core::task::{Context, Poll}; use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError}; -mod abort; - mod iter; pub use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef}; -mod task; -use self::task::Task; +use crate::stream::futures_unordered_internal::FuturesUnorderedInternal; -mod ready_to_run_queue; -use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue}; +use super::futures_unordered_internal::ReleasesTask; /// A set of futures which may complete in any order. /// @@ -55,9 +44,23 @@ use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue}; /// library is activated, and it is activated by default. #[must_use = "streams do nothing unless polled"] pub struct FuturesUnordered { - ready_to_run_queue: Arc>, - head_all: AtomicPtr>, - is_terminated: AtomicBool, + inner: FuturesUnorderedInternal<(), Fut, DummyStruct>, +} + +struct DummyStruct {} + +impl fmt::Debug for DummyStruct { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "") + } +} + +impl ReleasesTask<()> for DummyStruct { + fn release_task(&mut self, _key: &()) {} + + fn new() -> Self { + DummyStruct {} + } } unsafe impl Send for FuturesUnordered {} @@ -66,14 +69,14 @@ impl Unpin for FuturesUnordered {} impl Spawn for FuturesUnordered> { fn spawn_obj(&self, future_obj: FutureObj<'static, ()>) -> Result<(), SpawnError> { - self.push(future_obj); + self.inner.push((), future_obj); Ok(()) } } impl LocalSpawn for FuturesUnordered> { fn spawn_local_obj(&self, future_obj: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> { - self.push(future_obj); + self.inner.push((), future_obj); Ok(()) } } @@ -116,44 +119,21 @@ impl FuturesUnordered { /// In this state, [`FuturesUnordered::poll_next`](Stream::poll_next) will /// return [`Poll::Ready(None)`](Poll::Ready). pub fn new() -> Self { - let stub = Arc::new(Task { - future: UnsafeCell::new(None), - next_all: AtomicPtr::new(ptr::null_mut()), - prev_all: UnsafeCell::new(ptr::null()), - len_all: UnsafeCell::new(0), - next_ready_to_run: AtomicPtr::new(ptr::null_mut()), - queued: AtomicBool::new(true), - ready_to_run_queue: Weak::new(), - woken: AtomicBool::new(false), - }); - let stub_ptr = Arc::as_ptr(&stub); - let ready_to_run_queue = Arc::new(ReadyToRunQueue { - waker: AtomicWaker::new(), - head: AtomicPtr::new(stub_ptr as *mut _), - tail: UnsafeCell::new(stub_ptr), - stub, - }); - - Self { - head_all: AtomicPtr::new(ptr::null_mut()), - ready_to_run_queue, - is_terminated: AtomicBool::new(false), - } + Self { inner: FuturesUnorderedInternal::new() } } /// Returns the number of futures contained in the set. /// /// This represents the total number of in-flight futures. pub fn len(&self) -> usize { - let (_, len) = self.atomic_load_head_and_len_all(); - len + self.inner.len() } /// Returns `true` if the set contains no futures. pub fn is_empty(&self) -> bool { // Relaxed ordering can be used here since we don't need to read from // the head pointer, only check whether it is null. - self.head_all.load(Relaxed).is_null() + self.inner.is_empty() } /// Push a future into the set. @@ -163,31 +143,7 @@ impl FuturesUnordered { /// ensure that [`FuturesUnordered::poll_next`](Stream::poll_next) is called /// in order to receive wake-up notifications for the given future. pub fn push(&self, future: Fut) { - let task = Arc::new(Task { - future: UnsafeCell::new(Some(future)), - next_all: AtomicPtr::new(self.pending_next_all()), - prev_all: UnsafeCell::new(ptr::null_mut()), - len_all: UnsafeCell::new(0), - next_ready_to_run: AtomicPtr::new(ptr::null_mut()), - queued: AtomicBool::new(true), - ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue), - woken: AtomicBool::new(false), - }); - - // Reset the `is_terminated` flag if we've previously marked ourselves - // as terminated. - self.is_terminated.store(false, Relaxed); - - // Right now our task has a strong reference count of 1. We transfer - // ownership of this reference count to our internal linked list - // and we'll reclaim ownership through the `unlink` method below. - let ptr = self.link(task); - - // We'll need to get the future "into the system" to start tracking it, - // e.g. getting its wake-up notifications going to us tracking which - // futures are ready. To do that we unconditionally enqueue it for - // polling here. - self.ready_to_run_queue.enqueue(ptr); + self.inner.push((), future); } /// Returns an iterator that allows inspecting each future in the set. @@ -200,10 +156,10 @@ impl FuturesUnordered { /// Returns an iterator that allows inspecting each future in the set. pub fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, Fut> { - let (task, len) = self.atomic_load_head_and_len_all(); - let pending_next_all = self.pending_next_all(); - - IterPinRef { task, len, pending_next_all, _marker: PhantomData } + // IterPinRef { inner: Pin::new(&self.inner).iter_pin_ref() } + IterPinRef { + inner: unsafe { Pin::map_unchecked(self, |thing| &thing.inner) }.iter_pin_ref(), + } } /// Returns an iterator that allows modifying each future in the set. @@ -215,174 +171,11 @@ impl FuturesUnordered { } /// Returns an iterator that allows modifying each future in the set. - pub fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, Fut> { - // `head_all` can be accessed directly and we don't need to spin on - // `Task::next_all` since we have exclusive access to the set. - let task = *self.head_all.get_mut(); - let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; - - IterPinMut { task, len, _marker: PhantomData } - } - - /// Returns the current head node and number of futures in the list of all - /// futures within a context where access is shared with other threads - /// (mostly for use with the `len` and `iter_pin_ref` methods). - fn atomic_load_head_and_len_all(&self) -> (*const Task, usize) { - let task = self.head_all.load(Acquire); - let len = if task.is_null() { - 0 - } else { - unsafe { - (*task).spin_next_all(self.pending_next_all(), Acquire); - *(*task).len_all.get() - } - }; - - (task, len) - } - - /// Releases the task. It destroys the future inside and either drops - /// the `Arc` or transfers ownership to the ready to run queue. - /// The task this method is called on must have been unlinked before. - fn release_task(&mut self, task: Arc>) { - // `release_task` must only be called on unlinked tasks - debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); - unsafe { - debug_assert!((*task.prev_all.get()).is_null()); - } - - // The future is done, try to reset the queued flag. This will prevent - // `wake` from doing any work in the future - let prev = task.queued.swap(true, SeqCst); - - // Drop the future, even if it hasn't finished yet. This is safe - // because we're dropping the future on the thread that owns - // `FuturesUnordered`, which correctly tracks `Fut`'s lifetimes and - // such. - unsafe { - // Set to `None` rather than `take()`ing to prevent moving the - // future. - *task.future.get() = None; - } - - // If the queued flag was previously set, then it means that this task - // is still in our internal ready to run queue. We then transfer - // ownership of our reference count to the ready to run queue, and it'll - // come along and free it later, noticing that the future is `None`. - // - // If, however, the queued flag was *not* set then we're safe to - // release our reference count on the task. The queued flag was set - // above so all future `enqueue` operations will not actually - // enqueue the task, so our task will never see the ready to run queue - // again. The task itself will be deallocated once all reference counts - // have been dropped elsewhere by the various wakers that contain it. - if prev { - mem::forget(task); - } - } - - /// Insert a new task into the internal linked list. - fn link(&self, task: Arc>) -> *const Task { - // `next_all` should already be reset to the pending state before this - // function is called. - debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); - let ptr = Arc::into_raw(task); - - // Atomically swap out the old head node to get the node that should be - // assigned to `next_all`. - let next = self.head_all.swap(ptr as *mut _, AcqRel); - - unsafe { - // Store the new list length in the new node. - let new_len = if next.is_null() { - 1 - } else { - // Make sure `next_all` has been written to signal that it is - // safe to read `len_all`. - (*next).spin_next_all(self.pending_next_all(), Acquire); - *(*next).len_all.get() + 1 - }; - *(*ptr).len_all.get() = new_len; - - // Write the old head as the next node pointer, signaling to other - // threads that `len_all` and `next_all` are ready to read. - (*ptr).next_all.store(next, Release); - - // `prev_all` updates don't need to be synchronized, as the field is - // only ever used after exclusive access has been acquired. - if !next.is_null() { - *(*next).prev_all.get() = ptr; - } + pub fn iter_pin_mut(self: Pin<&mut Self>) -> IterPinMut<'_, Fut> { + // IterPinMut { inner: Pin::new(&mut self.inner).iter_pin_mut() } + IterPinMut { + inner: unsafe { Pin::map_unchecked_mut(self, |thing| &mut thing.inner) }.iter_pin_mut(), } - - ptr - } - - /// Remove the task from the linked list tracking all tasks currently - /// managed by `FuturesUnordered`. - /// This method is unsafe because it has be guaranteed that `task` is a - /// valid pointer. - unsafe fn unlink(&mut self, task: *const Task) -> Arc> { - unsafe { - // Compute the new list length now in case we're removing the head node - // and won't be able to retrieve the correct length later. - let head = *self.head_all.get_mut(); - debug_assert!(!head.is_null()); - let new_len = *(*head).len_all.get() - 1; - - let task = Arc::from_raw(task); - let next = task.next_all.load(Relaxed); - let prev = *task.prev_all.get(); - task.next_all.store(self.pending_next_all(), Relaxed); - *task.prev_all.get() = ptr::null_mut(); - - if !next.is_null() { - *(*next).prev_all.get() = prev; - } - - if !prev.is_null() { - (*prev).next_all.store(next, Relaxed); - } else { - *self.head_all.get_mut() = next; - } - - // Store the new list length in the head node. - let head = *self.head_all.get_mut(); - if !head.is_null() { - *(*head).len_all.get() = new_len; - } - - task - } - } - - /// Returns the reserved value for `Task::next_all` to indicate a pending - /// assignment from the thread that inserted the task. - /// - /// `FuturesUnordered::link` needs to update `Task` pointers in an order - /// that ensures any iterators created on other threads can correctly - /// traverse the entire `Task` list using the chain of `next_all` pointers. - /// This could be solved with a compare-exchange loop that stores the - /// current `head_all` in `next_all` and swaps out `head_all` with the new - /// `Task` pointer if the head hasn't already changed. Under heavy thread - /// contention, this compare-exchange loop could become costly. - /// - /// An alternative is to initialize `next_all` to a reserved pending state - /// first, perform an atomic swap on `head_all`, and finally update - /// `next_all` with the old head node. Iterators will then either see the - /// pending state value or the correct next node pointer, and can reload - /// `next_all` as needed until the correct value is loaded. The number of - /// retries needed (if any) would be small and will always be finite, so - /// this should generally perform better than the compare-exchange loop. - /// - /// A valid `Task` pointer in the `head_all` list is guaranteed to never be - /// this value, so it is safe to use as a reserved value until the correct - /// value can be written. - fn pending_next_all(&self) -> *mut Task { - // The `ReadyToRunQueue` stub is never inserted into the `head_all` - // list, and its pointer value will remain valid for the lifetime of - // this `FuturesUnordered`, so we can make use of its value here. - Arc::as_ptr(&self.ready_to_run_queue.stub) as *mut _ } } @@ -390,158 +183,9 @@ impl Stream for FuturesUnordered { type Item = Fut::Output; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let len = self.len(); - - // Keep track of how many child futures we have polled, - // in case we want to forcibly yield. - let mut polled = 0; - let mut yielded = 0; - - // Ensure `parent` is correctly set. - self.ready_to_run_queue.waker.register(cx.waker()); - - loop { - // Safety: &mut self guarantees the mutual exclusion `dequeue` - // expects - let task = match unsafe { self.ready_to_run_queue.dequeue() } { - Dequeue::Empty => { - if self.is_empty() { - // We can only consider ourselves terminated once we - // have yielded a `None` - *self.is_terminated.get_mut() = true; - return Poll::Ready(None); - } else { - return Poll::Pending; - } - } - Dequeue::Inconsistent => { - // At this point, it may be worth yielding the thread & - // spinning a few times... but for now, just yield using the - // task system. - cx.waker().wake_by_ref(); - return Poll::Pending; - } - Dequeue::Data(task) => task, - }; - - debug_assert!(task != self.ready_to_run_queue.stub()); - - // Safety: - // - `task` is a valid pointer. - // - We are the only thread that accesses the `UnsafeCell` that - // contains the future - let future = match unsafe { &mut *(*task).future.get() } { - Some(future) => future, - - // If the future has already gone away then we're just - // cleaning out this task. See the comment in - // `release_task` for more information, but we're basically - // just taking ownership of our reference count here. - None => { - // This case only happens when `release_task` was called - // for this task before and couldn't drop the task - // because it was already enqueued in the ready to run - // queue. - - // Safety: `task` is a valid pointer - let task = unsafe { Arc::from_raw(task) }; - - // Double check that the call to `release_task` really - // happened. Calling it required the task to be unlinked. - debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); - unsafe { - debug_assert!((*task.prev_all.get()).is_null()); - } - continue; - } - }; - - // Safety: `task` is a valid pointer - let task = unsafe { self.unlink(task) }; - - // Unset queued flag: This must be done before polling to ensure - // that the future's task gets rescheduled if it sends a wake-up - // notification **during** the call to `poll`. - let prev = task.queued.swap(false, SeqCst); - assert!(prev); - - // We're going to need to be very careful if the `poll` - // method below panics. We need to (a) not leak memory and - // (b) ensure that we still don't have any use-after-frees. To - // manage this we do a few things: - // - // * A "bomb" is created which if dropped abnormally will call - // `release_task`. That way we'll be sure the memory management - // of the `task` is managed correctly. In particular - // `release_task` will drop the future. This ensures that it is - // dropped on this thread and not accidentally on a different - // thread (bad). - // * We unlink the task from our internal queue to preemptively - // assume it'll panic, in which case we'll want to discard it - // regardless. - struct Bomb<'a, Fut> { - queue: &'a mut FuturesUnordered, - task: Option>>, - } - - impl Drop for Bomb<'_, Fut> { - fn drop(&mut self) { - if let Some(task) = self.task.take() { - self.queue.release_task(task); - } - } - } - - let mut bomb = Bomb { task: Some(task), queue: &mut *self }; - - // Poll the underlying future with the appropriate waker - // implementation. This is where a large bit of the unsafety - // starts to stem from internally. The waker is basically just - // our `Arc>` and can schedule the future for polling by - // enqueuing itself in the ready to run queue. - // - // Critically though `Task` won't actually access `Fut`, the - // future, while it's floating around inside of wakers. - // These structs will basically just use `Fut` to size - // the internal allocation, appropriately accessing fields and - // deallocating the task if need be. - let res = { - let task = bomb.task.as_ref().unwrap(); - // We are only interested in whether the future is awoken before it - // finishes polling, so reset the flag here. - task.woken.store(false, Relaxed); - // SAFETY: see the comments of Bomb and this block. - let waker = unsafe { Task::waker_ref(task) }; - let mut cx = Context::from_waker(&waker); - - // Safety: We won't move the future ever again - let future = unsafe { Pin::new_unchecked(future) }; - - future.poll(&mut cx) - }; - polled += 1; - - match res { - Poll::Pending => { - let task = bomb.task.take().unwrap(); - // If the future was awoken during polling, we assume - // the future wanted to explicitly yield. - yielded += task.woken.load(Relaxed) as usize; - bomb.queue.link(task); - - // If a future yields, we respect it and yield here. - // If all futures have been polled, we also yield here to - // avoid starving other tasks waiting on the executor. - // (polling the same future twice per iteration may cause - // the problem: https://github.com/rust-lang/futures-rs/pull/2333) - if yielded >= 2 || polled == len { - cx.waker().wake_by_ref(); - return Poll::Pending; - } - continue; - } - Poll::Ready(output) => return Poll::Ready(Some(output)), - } + match std::task::ready!(Pin::new(&mut self.inner).poll_next(cx)) { + Some((_, output)) => Poll::Ready(Some(output)), + None => Poll::Ready(None), } } @@ -560,34 +204,7 @@ impl Debug for FuturesUnordered { impl FuturesUnordered { /// Clears the set, removing all futures. pub fn clear(&mut self) { - *self = Self::new(); - } -} - -impl Drop for FuturesUnordered { - fn drop(&mut self) { - // When a `FuturesUnordered` is dropped we want to drop all futures - // associated with it. At the same time though there may be tons of - // wakers flying around which contain `Task` references - // inside them. We'll let those naturally get deallocated. - while !self.head_all.get_mut().is_null() { - let head = *self.head_all.get_mut(); - let task = unsafe { self.unlink(head) }; - self.release_task(task); - } - - // Note that at this point we could still have a bunch of tasks in the - // ready to run queue. None of those tasks, however, have futures - // associated with them so they're safe to destroy on any thread. At - // this point the `FuturesUnordered` struct, the owner of the one strong - // reference to the ready to run queue will drop the strong reference. - // At that point whichever thread releases the strong refcount last (be - // it this thread or some other thread as part of an `upgrade`) will - // clear out the ready to run queue and free all remaining tasks. - // - // While that freeing operation isn't guaranteed to happen here, it's - // guaranteed to happen "promptly" as no more "blocking work" will - // happen while there's a strong refcount held. + self.inner.clear() } } @@ -613,13 +230,8 @@ impl IntoIterator for FuturesUnordered { type Item = Fut; type IntoIter = IntoIter; - fn into_iter(mut self) -> Self::IntoIter { - // `head_all` can be accessed directly and we don't need to spin on - // `Task::next_all` since we have exclusive access to the set. - let task = *self.head_all.get_mut(); - let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; - - IntoIter { len, inner: self } + fn into_iter(self) -> Self::IntoIter { + IntoIter { inner: self.inner.into_iter() } } } @@ -638,7 +250,7 @@ impl FromIterator for FuturesUnordered { impl FusedStream for FuturesUnordered { fn is_terminated(&self) -> bool { - self.is_terminated.load(Relaxed) + self.inner.is_terminated() } } diff --git a/futures-util/src/stream/futures_unordered/abort.rs b/futures-util/src/stream/futures_unordered_internal/abort.rs similarity index 100% rename from futures-util/src/stream/futures_unordered/abort.rs rename to futures-util/src/stream/futures_unordered_internal/abort.rs diff --git a/futures-util/src/stream/futures_unordered_internal/iter.rs b/futures-util/src/stream/futures_unordered_internal/iter.rs new file mode 100644 index 000000000..30e31d76c --- /dev/null +++ b/futures-util/src/stream/futures_unordered_internal/iter.rs @@ -0,0 +1,178 @@ +use super::task::Task; +use super::{FuturesUnorderedInternal, ReleasesTask}; +use core::marker::PhantomData; +use core::pin::Pin; +use core::ptr; +use core::sync::atomic::Ordering::Relaxed; + +/// Mutable iterator over all futures in the unordered set. +#[derive(Debug)] +pub(crate) struct IterPinMut<'a, K, Fut, S: ReleasesTask> { + pub(super) task: *const Task, + pub(crate) len: usize, + pub(super) _marker: PhantomData<&'a mut FuturesUnorderedInternal>, +} + +/// Mutable iterator over all futures in the unordered set. +#[derive(Debug)] +pub(crate) struct IterMut<'a, K, Fut: Unpin, S: ReleasesTask>( + pub(super) IterPinMut<'a, K, Fut, S>, +); + +/// Immutable iterator over all futures in the unordered set. +#[derive(Debug)] +pub(crate) struct IterPinRef<'a, K, Fut, S: ReleasesTask> { + pub(super) task: *const Task, + pub(crate) len: usize, + pub(super) pending_next_all: *mut Task, + pub(super) _marker: PhantomData<&'a FuturesUnorderedInternal>, +} + +/// Immutable iterator over all the futures in the unordered set. +#[derive(Debug)] +pub(crate) struct Iter<'a, K, Fut: Unpin, S: ReleasesTask>(pub(super) IterPinRef<'a, K, Fut, S>); + +/// Owned iterator over all futures in the unordered set. +#[derive(Debug)] +pub(crate) struct IntoIter> { + pub(crate) len: usize, + pub(super) inner: FuturesUnorderedInternal, +} + +impl> Iterator for IntoIter { + type Item = (K, Fut); + + fn next(&mut self) -> Option { + // `head_all` can be accessed directly and we don't need to spin on + // `Task::next_all` since we have exclusive access to the set. + let task = self.inner.head_all.get_mut(); + + if (*task).is_null() { + return None; + } + + unsafe { + // Moving out of the future is safe because it is `Unpin` + let future = (*(**task).future.get()).take().unwrap(); + let key = (**task).take_key(); + + // Mutable access to a previously shared `FuturesUnorderedInternal` implies + // that the other threads already released the object before the + // current thread acquired it, so relaxed ordering can be used and + // valid `next_all` checks can be skipped. + let next = (**task).next_all.load(Relaxed); + *task = next; + if !task.is_null() { + *(**task).prev_all.get() = ptr::null_mut(); + } + self.len -= 1; + Some((key, future)) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.len, Some(self.len)) + } +} + +impl> ExactSizeIterator for IntoIter {} + +impl<'a, K, Fut, S: ReleasesTask> Iterator for IterPinMut<'a, K, Fut, S> { + type Item = (&'a K, Pin<&'a mut Fut>); + + fn next(&mut self) -> Option { + if self.task.is_null() { + return None; + } + + unsafe { + let future = (*(*self.task).future.get()).as_mut().unwrap(); + let key = (*(*self.task).key.get()).as_ref().unwrap(); + + // Mutable access to a previously shared `FuturesUnorderedInternal` implies + // that the other threads already released the object before the + // current thread acquired it, so relaxed ordering can be used and + // valid `next_all` checks can be skipped. + let next = (*self.task).next_all.load(Relaxed); + self.task = next; + self.len -= 1; + Some((key, Pin::new_unchecked(future))) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.len, Some(self.len)) + } +} + +impl> ExactSizeIterator for IterPinMut<'_, K, Fut, S> {} + +impl<'a, K, Fut: Unpin, S: ReleasesTask> Iterator for IterMut<'a, K, Fut, S> { + type Item = (&'a K, &'a mut Fut); + + fn next(&mut self) -> Option { + self.0.next().map(|opt| (opt.0, Pin::get_mut(opt.1))) + } + + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +impl> ExactSizeIterator for IterMut<'_, K, Fut, S> {} + +impl<'a, K, Fut, S: ReleasesTask> Iterator for IterPinRef<'a, K, Fut, S> { + type Item = (&'a K, Pin<&'a Fut>); + + fn next(&mut self) -> Option { + if self.task.is_null() { + return None; + } + + unsafe { + let future = (*(*self.task).future.get()).as_ref().unwrap(); + let key = (*(*self.task).key.get()).as_ref().unwrap(); + + // Relaxed ordering can be used since acquire ordering when + // `head_all` was initially read for this iterator implies acquire + // ordering for all previously inserted nodes (and we don't need to + // read `len_all` again for any other nodes). + let next = (*self.task).spin_next_all(self.pending_next_all, Relaxed); + self.task = next; + self.len -= 1; + Some((key, Pin::new_unchecked(future))) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.len, Some(self.len)) + } +} + +impl> ExactSizeIterator for IterPinRef<'_, K, Fut, S> {} + +impl<'a, K, Fut: Unpin, S: ReleasesTask> Iterator for Iter<'a, K, Fut, S> { + type Item = (&'a K, &'a Fut); + + fn next(&mut self) -> Option { + // self.0.next().map(Pin::get_ref) + self.0.next().map(|opt| (opt.0, Pin::get_ref(opt.1))) + } + + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +impl> ExactSizeIterator for Iter<'_, K, Fut, S> {} + +// SAFETY: we do nothing thread-local and there is no interior mutability, +// so the usual structural `Send`/`Sync` apply. +unsafe impl> Send for IterPinRef<'_, K, Fut, S> {} +unsafe impl> Sync for IterPinRef<'_, K, Fut, S> {} + +unsafe impl> Send for IterPinMut<'_, K, Fut, S> {} +unsafe impl> Sync for IterPinMut<'_, K, Fut, S> {} + +unsafe impl> Send for IntoIter {} +unsafe impl> Sync for IntoIter {} diff --git a/futures-util/src/stream/futures_unordered_internal/mod.rs b/futures-util/src/stream/futures_unordered_internal/mod.rs new file mode 100644 index 000000000..3b32e1dec --- /dev/null +++ b/futures-util/src/stream/futures_unordered_internal/mod.rs @@ -0,0 +1,672 @@ +//! An unbounded set of futures. +//! +//! This module is only available when the `std` or `alloc` feature of this +//! library is activated, and it is activated by default. + +/// This replaces futures_unordered as the main runner of unordered futures +/// The same futures_unordered api will now be a consumer of this module. +/// This module is changed from futures_unordered only in that Task structs contain a hashable key, +/// and that Task will be returned with its future's output when each future completes. +/// The whole Task must be returned, since we don't want the Key to need to be Copy or Clone, and +/// so the Key that is used to track the tasks in the consuming module must be the same instance of +/// the Key that is contained in the returned Task. +use crate::task::AtomicWaker; +use alloc::sync::{Arc, Weak}; +use core::cell::UnsafeCell; +use core::fmt::{self, Debug}; +use core::iter::FromIterator; +use core::marker::PhantomData; +use core::mem; +use core::pin::Pin; +use core::ptr; +use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst}; +use core::sync::atomic::{AtomicBool, AtomicPtr}; +use futures_core::future::Future; +use futures_core::stream::{FusedStream, Stream}; +use futures_core::task::{Context, Poll}; + +mod abort; + +mod iter; +#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/102352 +pub(super) use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef}; + +pub(crate) mod task; +use self::task::Task; + +mod ready_to_run_queue; +use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue}; + +/// A set of futures which may complete in any order. +/// +/// See [`FuturesOrdered`](crate::stream::FuturesOrdered) for a version of this +/// type that preserves a FIFO order. +/// +/// This structure is optimized to manage a large number of futures. +/// Futures managed by [`FuturesUnorderedInternal`] will only be polled when they +/// generate wake-up notifications. This reduces the required amount of work +/// needed to poll large numbers of futures. +/// +/// [`FuturesUnorderedInternal`] can be filled by [`collect`](Iterator::collect)ing an +/// iterator of futures into a [`FuturesUnorderedInternal`], or by +/// [`push`](FuturesUnorderedInternal::push)ing futures onto an existing +/// [`FuturesUnorderedInternal`]. When new futures are added, +/// [`poll_next`](Stream::poll_next) must be called in order to begin receiving +/// wake-ups for new futures. +/// +/// Note that you can create a ready-made [`FuturesUnorderedInternal`] via the +/// [`collect`](Iterator::collect) method, or you can start with an empty set +/// with the [`FuturesUnorderedInternal::new`] constructor. +/// +/// This type is only available when the `std` or `alloc` feature of this +/// library is activated, and it is activated by default. +#[must_use = "streams do nothing unless polled"] +pub(super) struct FuturesUnorderedInternal> { + ready_to_run_queue: Arc>, + head_all: AtomicPtr>, + is_terminated: AtomicBool, + pub(crate) inner: S, +} + +pub(crate) trait ReleasesTask { + fn release_task(&mut self, key: &K); + fn new() -> Self; +} + +unsafe impl> Send for FuturesUnorderedInternal {} +unsafe impl> Sync for FuturesUnorderedInternal {} +impl> Unpin for FuturesUnorderedInternal {} + +// FuturesUnorderedInternal is implemented using two linked lists. One which links all +// futures managed by a `FuturesUnorderedInternal` and one that tracks futures that have +// been scheduled for polling. The first linked list allows for thread safe +// insertion of nodes at the head as well as forward iteration, but is otherwise +// not thread safe and is only accessed by the thread that owns the +// `FuturesUnorderedInternal` value for any other operations. The second linked list is +// an implementation of the intrusive MPSC queue algorithm described by +// 1024cores.net. +// +// When a future is submitted to the set, a task is allocated and inserted in +// both linked lists. The next call to `poll_next` will (eventually) see this +// task and call `poll` on the future. +// +// Before a managed future is polled, the current context's waker is replaced +// with one that is aware of the specific future being run. This ensures that +// wake-up notifications generated by that specific future are visible to +// `FuturesUnorderedInternal`. When a wake-up notification is received, the task is +// inserted into the ready to run queue, so that its future can be polled later. +// +// Each task is wrapped in an `Arc` and thereby atomically reference counted. +// Also, each task contains an `AtomicBool` which acts as a flag that indicates +// whether the task is currently inserted in the atomic queue. When a wake-up +// notification is received, the task will only be inserted into the ready to +// run queue if it isn't inserted already. + +impl> Default for FuturesUnorderedInternal { + fn default() -> Self { + Self::new() + } +} + +impl> FuturesUnorderedInternal { + /// Constructs a new, empty [`FuturesUnorderedInternal`]. + /// + /// The returned [`FuturesUnorderedInternal`] does not contain any futures. + /// In this state, [`FuturesUnorderedInternal::poll_next`](Stream::poll_next) will + /// return [`Poll::Ready(None)`](Poll::Ready). + /// + /// Note that the key type over which this struct is generic does not have trait bounds + /// This is because, while I intend to use it as a key into hashmap structs, which would + /// require K: Hash + Eq, you could also use K for any other purpose. What is important is that + /// if you are storing the Task in another struct, if release_task is called, then you probably + /// want to remove the Task from anywhere else you're keeping it; use K for this. + pub(super) fn new() -> Self { + let stub = Arc::new(Task { + future: UnsafeCell::new(None), + key: UnsafeCell::new(None), + next_all: AtomicPtr::new(ptr::null_mut()), + prev_all: UnsafeCell::new(ptr::null()), + len_all: UnsafeCell::new(0), + next_ready_to_run: AtomicPtr::new(ptr::null_mut()), + queued: AtomicBool::new(true), + ready_to_run_queue: Weak::new(), + woken: AtomicBool::new(false), + }); + let stub_ptr = Arc::as_ptr(&stub); + let ready_to_run_queue = Arc::new(ReadyToRunQueue { + waker: AtomicWaker::new(), + head: AtomicPtr::new(stub_ptr as *mut _), + tail: UnsafeCell::new(stub_ptr), + stub, + }); + + Self { + head_all: AtomicPtr::new(ptr::null_mut()), + ready_to_run_queue, + is_terminated: AtomicBool::new(false), + inner: S::new(), + } + } + + /// Returns the number of futures contained in the set. + /// + /// This represents the total number of in-flight futures. + pub(super) fn len(&self) -> usize { + let (_, len) = self.atomic_load_head_and_len_all(); + len + } + + /// Returns `true` if the set contains no futures. + pub(super) fn is_empty(&self) -> bool { + // Relaxed ordering can be used here since we don't need to read from + // the head pointer, only check whether it is null. + self.head_all.load(Relaxed).is_null() + } + + /// Push a future into the set. + /// + /// This method adds the given future to the set. This method will not + /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must + /// ensure that [`FuturesUnorderedInternal::poll_next`](Stream::poll_next) is called + /// in order to receive wake-up notifications for the given future. + pub(super) fn push(&self, key: K, future: Fut) -> Arc> { + let task = Arc::new(Task { + future: UnsafeCell::new(Some(future)), + key: UnsafeCell::new(Some(key)), + next_all: AtomicPtr::new(self.pending_next_all()), + prev_all: UnsafeCell::new(ptr::null_mut()), + len_all: UnsafeCell::new(0), + next_ready_to_run: AtomicPtr::new(ptr::null_mut()), + queued: AtomicBool::new(true), + ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue), + woken: AtomicBool::new(false), + }); + + // Reset the `is_terminated` flag if we've previously marked ourselves + // as terminated. + self.is_terminated.store(false, Relaxed); + + // Right now our task has a strong reference count of 1. We transfer + // ownership of this reference count to our internal linked list + // and we'll reclaim ownership through the `unlink` method below. + let ptr = self.link(task.clone()); + + // We'll need to get the future "into the system" to start tracking it, + // e.g. getting its wake-up notifications going to us tracking which + // futures are ready. To do that we unconditionally enqueue it for + // polling here. + self.ready_to_run_queue.enqueue(ptr); + task + } + + /// Returns an iterator that allows inspecting each future in the set. + pub(super) fn iter(&self) -> Iter<'_, K, Fut, S> + where + Fut: Unpin, + { + Iter(Pin::new(self).iter_pin_ref()) + } + + /// Returns an iterator that allows inspecting each future in the set. + pub(super) fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, K, Fut, S> { + let (task, len) = self.atomic_load_head_and_len_all(); + let pending_next_all = self.pending_next_all(); + + IterPinRef { task, len, pending_next_all, _marker: PhantomData } + } + + /// Returns an iterator that allows modifying each future in the set. + pub(super) fn iter_mut(&mut self) -> IterMut<'_, K, Fut, S> + where + Fut: Unpin, + { + IterMut(Pin::new(self).iter_pin_mut()) + } + + /// Returns an iterator that allows modifying each future in the set. + pub(super) fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, K, Fut, S> { + // `head_all` can be accessed directly and we don't need to spin on + // `Task::next_all` since we have exclusive access to the set. + let task = *self.head_all.get_mut(); + let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; + + IterPinMut { task, len, _marker: PhantomData } + } + + /// Returns the current head node and number of futures in the list of all + /// futures within a context where access is shared with other threads + /// (mostly for use with the `len` and `iter_pin_ref` methods). + fn atomic_load_head_and_len_all(&self) -> (*const Task, usize) { + let task = self.head_all.load(Acquire); + let len = if task.is_null() { + 0 + } else { + unsafe { + (*task).spin_next_all(self.pending_next_all(), Acquire); + *(*task).len_all.get() + } + }; + + (task, len) + } + + /// Releases the task. It destroys the future inside and either drops + /// the `Arc` or transfers ownership to the ready to run queue. + /// The task this method is called on must have been unlinked before. + pub(super) fn release_task(&mut self, task: Arc>) { + if let Some(key) = task.key() { + self.inner.release_task(key); + } + // `release_task` must only be called on unlinked tasks + debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); + unsafe { + debug_assert!((*task.prev_all.get()).is_null()); + } + + // The future is done, try to reset the queued flag. This will prevent + // `wake` from doing any work in the future + let prev = task.queued.swap(true, SeqCst); + + // Drop the future, even if it hasn't finished yet. This is safe + // because we're dropping the future on the thread that owns + // `FuturesUnorderedInternal`, which correctly tracks `Fut`'s lifetimes and + // such. + unsafe { + // Set to `None` rather than `take()`ing to prevent moving the + // future. + *task.future.get() = None; + } + + // If the queued flag was previously set, then it means that this task + // is still in our internal ready to run queue. We then transfer + // ownership of our reference count to the ready to run queue, and it'll + // come along and free it later, noticing that the future is `None`. + // + // If, however, the queued flag was *not* set then we're safe to + // release our reference count on the task. The queued flag was set + // above so all future `enqueue` operations will not actually + // enqueue the task, so our task will never see the ready to run queue + // again. The task itself will be deallocated once all reference counts + // have been dropped elsewhere by the various wakers that contain it. + if prev { + mem::forget(task); + } + } + + /// Insert a new task into the internal linked list. + fn link(&self, task: Arc>) -> *const Task { + // `next_all` should already be reset to the pending state before this + // function is called. + debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); + let ptr = Arc::into_raw(task); + + // Atomically swap out the old head node to get the node that should be + // assigned to `next_all`. + let next = self.head_all.swap(ptr as *mut _, AcqRel); + + unsafe { + // Store the new list length in the new node. + let new_len = if next.is_null() { + 1 + } else { + // Make sure `next_all` has been written to signal that it is + // safe to read `len_all`. + (*next).spin_next_all(self.pending_next_all(), Acquire); + *(*next).len_all.get() + 1 + }; + *(*ptr).len_all.get() = new_len; + + // Write the old head as the next node pointer, signaling to other + // threads that `len_all` and `next_all` are ready to read. + (*ptr).next_all.store(next, Release); + + // `prev_all` updates don't need to be synchronized, as the field is + // only ever used after exclusive access has been acquired. + if !next.is_null() { + *(*next).prev_all.get() = ptr; + } + } + + ptr + } + + /// Remove the task from the linked list tracking all tasks currently + /// managed by `FuturesUnorderedInternal`. + /// This method is unsafe because it has be guaranteed that `task` is a + /// valid pointer. + pub(crate) unsafe fn unlink(&mut self, task: *const Task) -> Arc> { + // Compute the new list length now in case we're removing the head node + // and won't be able to retrieve the correct length later. + unsafe { + let head = *self.head_all.get_mut(); + debug_assert!(!head.is_null()); + let new_len = *(*head).len_all.get() - 1; + + let task = Arc::from_raw(task); + let next = task.next_all.load(Relaxed); + let prev = *task.prev_all.get(); + task.next_all.store(self.pending_next_all(), Relaxed); + *task.prev_all.get() = ptr::null_mut(); + + if !next.is_null() { + *(*next).prev_all.get() = prev; + } + + if !prev.is_null() { + (*prev).next_all.store(next, Relaxed); + } else { + *self.head_all.get_mut() = next; + } + + // Store the new list length in the head node. + let head = *self.head_all.get_mut(); + if !head.is_null() { + *(*head).len_all.get() = new_len; + } + task + } + } + + /// Returns the reserved value for `Task::next_all` to indicate a pending + /// assignment from the thread that inserted the task. + /// + /// `FuturesUnorderedInternal::link` needs to update `Task` pointers in an order + /// that ensures any iterators created on other threads can correctly + /// traverse the entire `Task` list using the chain of `next_all` pointers. + /// This could be solved with a compare-exchange loop that stores the + /// current `head_all` in `next_all` and swaps out `head_all` with the new + /// `Task` pointer if the head hasn't already changed. Under heavy thread + /// contention, this compare-exchange loop could become costly. + /// + /// An alternative is to initialize `next_all` to a reserved pending state + /// first, perform an atomic swap on `head_all`, and finally update + /// `next_all` with the old head node. Iterators will then either see the + /// pending state value or the correct next node pointer, and can reload + /// `next_all` as needed until the correct value is loaded. The number of + /// retries needed (if any) would be small and will always be finite, so + /// this should generally perform better than the compare-exchange loop. + /// + /// A valid `Task` pointer in the `head_all` list is guaranteed to never be + /// this value, so it is safe to use as a reserved value until the correct + /// value can be written. + fn pending_next_all(&self) -> *mut Task { + // The `ReadyToRunQueue` stub is never inserted into the `head_all` + // list, and its pointer value will remain valid for the lifetime of + // this `FuturesUnorderedInternal`, so we can make use of its value here. + Arc::as_ptr(&self.ready_to_run_queue.stub) as *mut _ + } +} + +impl> Stream for FuturesUnorderedInternal { + type Item = (Arc>, Fut::Output); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let len = self.len(); + + // Keep track of how many child futures we have polled, + // in case we want to forcibly yield. + let mut polled = 0; + let mut yielded = 0; + + // Ensure `parent` is correctly set. + self.ready_to_run_queue.waker.register(cx.waker()); + + loop { + // Safety: &mut self guarantees the mutual exclusion `dequeue` + // expects + let task = match unsafe { self.ready_to_run_queue.dequeue() } { + Dequeue::Empty => { + if self.is_empty() { + // We can only consider ourselves terminated once we + // have yielded a `None` + *self.is_terminated.get_mut() = true; + return Poll::Ready(None); + } + return Poll::Pending; + } + Dequeue::Inconsistent => { + // At this point, it may be worth yielding the thread & + // spinning a few times... but for now, just yield using the + // task system. + cx.waker().wake_by_ref(); + return Poll::Pending; + } + Dequeue::Data(task) => task, + }; + + debug_assert!(task != self.ready_to_run_queue.stub()); + + // Safety: + // - `task` is a valid pointer. + // - We are the only thread that accesses the `UnsafeCell` that + // contains the future + let future = match unsafe { &mut *(*task).future.get() } { + Some(future) => future, + + // If the future has already gone away then we're just + // cleaning out this task. See the comment in + // `release_task` for more information, but we're basically + // just taking ownership of our reference count here. + None => { + // This case only happens when `release_task` was called + // for this task before and couldn't drop the task + // because it was already enqueued in the ready to run + // queue. + + // Safety: `task` is a valid pointer + let task = unsafe { Arc::from_raw(task) }; + + // Double check that the call to `release_task` really + // happened. Calling it required the task to be unlinked. + debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); + unsafe { + debug_assert!((*task.prev_all.get()).is_null()); + } + continue; + } + }; + + // Safety: `task` is a valid pointer + let task = unsafe { self.unlink(task) }; + + // Unset queued flag: This must be done before polling to ensure + // that the future's task gets rescheduled if it sends a wake-up + // notification **during** the call to `poll`. + let prev = task.queued.swap(false, SeqCst); + assert!(prev); + + // We're going to need to be very careful if the `poll` + // method below panics. We need to (a) not leak memory and + // (b) ensure that we still don't have any use-after-frees. To + // manage this we do a few things: + // + // * A "bomb" is created which if dropped abnormally will call + // `release_task`. That way we'll be sure the memory management + // of the `task` is managed correctly. In particular + // `release_task` will drop the future. This ensures that it is + // dropped on this thread and not accidentally on a different + // thread (bad). + // * We unlink the task from our internal queue to preemptively + // assume it'll panic, in which case we'll want to discard it + // regardless. + struct Bomb<'a, K, Fut, S: ReleasesTask> { + queue: &'a mut FuturesUnorderedInternal, + task: Option>>, + } + + impl> Drop for Bomb<'_, K, Fut, S> { + fn drop(&mut self) { + if let Some(task) = self.task.take() { + self.queue.release_task(task); + } + } + } + + let mut bomb = Bomb { task: Some(task), queue: &mut *self }; + + // Poll the underlying future with the appropriate waker + // implementation. This is where a large bit of the unsafety + // starts to stem from internally. The waker is basically just + // our `Arc>` and can schedule the future for polling by + // enqueuing itself in the ready to run queue. + // + // Critically though `Task` won't actually access `Fut`, the + // future, while it's floating around inside of wakers. + // These structs will basically just use `Fut` to size + // the internal allocation, appropriately accessing fields and + // deallocating the task if need be. + let res = { + let task = bomb.task.as_ref().unwrap(); + // We are only interested in whether the future is awoken before it + // finishes polling, so reset the flag here. + task.woken.store(false, Relaxed); + // SAFETY: see the comments of Bomb and this block. + let waker = unsafe { Task::waker_ref(task) }; + let mut cx = Context::from_waker(&waker); + + // Safety: We won't move the future ever again + let future = unsafe { Pin::new_unchecked(future) }; + + future.poll(&mut cx) + }; + polled += 1; + + match res { + Poll::Pending => { + let task = bomb.task.take().unwrap(); + // If the future was awoken during polling, we assume + // the future wanted to explicitly yield. + yielded += task.woken.load(Relaxed) as usize; + bomb.queue.link(task); + + // If a future yields, we respect it and yield here. + // If all futures have been polled, we also yield here to + // avoid starving other tasks waiting on the executor. + // (polling the same future twice per iteration may cause + // the problem: https://github.com/rust-lang/futures-rs/pull/2333) + if yielded >= 2 || polled == len { + cx.waker().wake_by_ref(); + return Poll::Pending; + } + continue; + } + Poll::Ready(output) => { + let task = bomb.task.take().unwrap(); + unsafe { (*task.future.get()).take() }; + return Poll::Ready(Some((task, output))); + } + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let len = self.len(); + (len, Some(len)) + } +} + +impl> Debug for FuturesUnorderedInternal { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "FuturesUnorderedInternal {{ ... }}") + } +} + +impl> FuturesUnorderedInternal { + /// Clears the set, removing all futures. + pub(super) fn clear(&mut self) { + *self = Self::new(); + } +} + +impl> Drop for FuturesUnorderedInternal { + fn drop(&mut self) { + // When a `FuturesUnorderedInternal` is dropped we want to drop all futures + // associated with it. At the same time though there may be tons of + // wakers flying around which contain `Task` references + // inside them. We'll let those naturally get deallocated. + while !self.head_all.get_mut().is_null() { + let head = *self.head_all.get_mut(); + let task = unsafe { self.unlink(head) }; + self.release_task(task); + } + + // Note that at this point we could still have a bunch of tasks in the + // ready to run queue. None of those tasks, however, have futures + // associated with them so they're safe to destroy on any thread. At + // this point the `FuturesUnorderedInternal` struct, the owner of the one strong + // reference to the ready to run queue will drop the strong reference. + // At that point whichever thread releases the strong refcount last (be + // it this thread or some other thread as part of an `upgrade`) will + // clear out the ready to run queue and free all remaining tasks. + // + // While that freeing operation isn't guaranteed to happen here, it's + // guaranteed to happen "promptly" as no more "blocking work" will + // happen while there's a strong refcount held. + } +} + +impl<'a, K, Fut: Unpin, S: ReleasesTask> IntoIterator + for &'a FuturesUnorderedInternal +{ + type Item = (&'a K, &'a Fut); + type IntoIter = Iter<'a, K, Fut, S>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, K, Fut: Unpin, S: ReleasesTask> IntoIterator + for &'a mut FuturesUnorderedInternal +{ + type Item = (&'a K, &'a mut Fut); + type IntoIter = IterMut<'a, K, Fut, S>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl> IntoIterator for FuturesUnorderedInternal { + type Item = (K, Fut); + type IntoIter = IntoIter; + + fn into_iter(mut self) -> Self::IntoIter { + // `head_all` can be accessed directly and we don't need to spin on + // `Task::next_all` since we have exclusive access to the set. + let task = *self.head_all.get_mut(); + let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; + + IntoIter { len, inner: self } + } +} + +impl> FromIterator<(K, Fut)> for FuturesUnorderedInternal { + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let acc = Self::new(); + iter.into_iter().fold(acc, |acc, (key, fut)| { + acc.push(key, fut); + acc + }) + } +} + +impl> FusedStream for FuturesUnorderedInternal { + fn is_terminated(&self) -> bool { + self.is_terminated.load(Relaxed) + } +} + +impl> Extend<(K, Fut)> for FuturesUnorderedInternal { + fn extend(&mut self, iter: I) + where + I: IntoIterator, + { + for (key, fut) in iter { + self.push(key, fut); + } + } +} diff --git a/futures-util/src/stream/futures_unordered/ready_to_run_queue.rs b/futures-util/src/stream/futures_unordered_internal/ready_to_run_queue.rs similarity index 76% rename from futures-util/src/stream/futures_unordered/ready_to_run_queue.rs rename to futures-util/src/stream/futures_unordered_internal/ready_to_run_queue.rs index 77abdf4ea..47056f167 100644 --- a/futures-util/src/stream/futures_unordered/ready_to_run_queue.rs +++ b/futures-util/src/stream/futures_unordered_internal/ready_to_run_queue.rs @@ -8,27 +8,27 @@ use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; use super::abort::abort; use super::task::Task; -pub(super) enum Dequeue { - Data(*const Task), +pub(super) enum Dequeue { + Data(*const Task), Empty, Inconsistent, } -pub(super) struct ReadyToRunQueue { - // The waker of the task using `FuturesUnordered`. +pub(super) struct ReadyToRunQueue { + // The waker of the task using `FuturesUnorderedInternal`. pub(super) waker: AtomicWaker, // Head/tail of the readiness queue - pub(super) head: AtomicPtr>, - pub(super) tail: UnsafeCell<*const Task>, - pub(super) stub: Arc>, + pub(super) head: AtomicPtr>, + pub(super) tail: UnsafeCell<*const Task>, + pub(super) stub: Arc>, } /// An MPSC queue into which the tasks containing the futures are inserted /// whenever the future inside is scheduled for polling. -impl ReadyToRunQueue { +impl ReadyToRunQueue { /// The enqueue function from the 1024cores intrusive MPSC queue algorithm. - pub(super) fn enqueue(&self, task: *const Task) { + pub(super) fn enqueue(&self, task: *const Task) { unsafe { debug_assert!((*task).queued.load(Relaxed)); @@ -46,7 +46,7 @@ impl ReadyToRunQueue { /// /// Note that this is unsafe as it required mutual exclusion (only one /// thread can call this) to be guaranteed elsewhere. - pub(super) unsafe fn dequeue(&self) -> Dequeue { + pub(super) unsafe fn dequeue(&self) -> Dequeue { unsafe { let mut tail = *self.tail.get(); let mut next = (*tail).next_ready_to_run.load(Acquire); @@ -84,20 +84,18 @@ impl ReadyToRunQueue { } } - pub(super) fn stub(&self) -> *const Task { + pub(super) fn stub(&self) -> *const Task { Arc::as_ptr(&self.stub) } } -impl Drop for ReadyToRunQueue { +impl Drop for ReadyToRunQueue { fn drop(&mut self) { // Once we're in the destructor for `Inner` we need to clear out // the ready to run queue of tasks if there's anything left in there. - // - // Note that each task has a strong reference count associated with it - // which is owned by the ready to run queue. All tasks should have had - // their futures dropped already by the `FuturesUnordered` destructor - // above, so we're just pulling out tasks and dropping their refcounts. + + // All tasks have had their futures dropped already by the `FuturesUnorderedInternal` + // destructor above, and we have &mut self, so this is safe. unsafe { loop { match self.dequeue() { diff --git a/futures-util/src/stream/futures_unordered/task.rs b/futures-util/src/stream/futures_unordered_internal/task.rs similarity index 72% rename from futures-util/src/stream/futures_unordered/task.rs rename to futures-util/src/stream/futures_unordered_internal/task.rs index 2ae4cb7d8..bfb436a72 100644 --- a/futures-util/src/stream/futures_unordered/task.rs +++ b/futures-util/src/stream/futures_unordered_internal/task.rs @@ -1,22 +1,28 @@ use alloc::sync::{Arc, Weak}; +use core::borrow::Borrow; use core::cell::UnsafeCell; +use core::ops::Deref; use core::sync::atomic::Ordering::{self, Relaxed, SeqCst}; use core::sync::atomic::{AtomicBool, AtomicPtr}; use super::abort::abort; use super::ReadyToRunQueue; use crate::task::ArcWake; +use core::hash::Hash; -pub(super) struct Task { +pub(crate) struct Task { // The future - pub(super) future: UnsafeCell>, + pub(crate) future: UnsafeCell>, + + // The key + pub(crate) key: UnsafeCell>, // Next pointer for linked list tracking all active tasks (use // `spin_next_all` to read when access is shared across threads) - pub(super) next_all: AtomicPtr>, + pub(super) next_all: AtomicPtr>, // Previous task in linked list tracking all active tasks - pub(super) prev_all: UnsafeCell<*const Task>, + pub(super) prev_all: UnsafeCell<*const Task>, // Length of the linked list tracking all active tasks when this node was // inserted (use `spin_next_all` to synchronize before reading when access @@ -24,10 +30,10 @@ pub(super) struct Task { pub(super) len_all: UnsafeCell, // Next pointer in ready to run queue - pub(super) next_ready_to_run: AtomicPtr>, + pub(super) next_ready_to_run: AtomicPtr>, // Queue that we'll be enqueued to when woken - pub(super) ready_to_run_queue: Weak>, + pub(super) ready_to_run_queue: Weak>, // Whether or not this task is currently in the ready to run queue pub(super) queued: AtomicBool, @@ -43,10 +49,10 @@ pub(super) struct Task { // // The parent (`super`) module is trusted not to access `future` // across different threads. -unsafe impl Send for Task {} -unsafe impl Sync for Task {} +unsafe impl Send for Task {} +unsafe impl Sync for Task {} -impl ArcWake for Task { +impl ArcWake for Task { fn wake_by_ref(arc_self: &Arc) { let inner = match arc_self.ready_to_run_queue.upgrade() { Some(inner) => inner, @@ -63,9 +69,9 @@ impl ArcWake for Task { // as it'll want to come along and run our task later. // // Note that we don't change the reference count of the task here, - // we merely enqueue the raw pointer. The `FuturesUnordered` + // we merely enqueue the raw pointer. The `FuturesUnorderedInternal` // implementation guarantees that if we set the `queued` flag that - // there's a reference count held by the main `FuturesUnordered` queue + // there's a reference count held by the main `FuturesUnorderedInternal` queue // still. let prev = arc_self.queued.swap(true, SeqCst); if !prev { @@ -75,7 +81,7 @@ impl ArcWake for Task { } } -impl Task { +impl Task { /// Returns a waker reference for this task without cloning the Arc. pub(super) unsafe fn waker_ref(this: &Arc) -> waker_ref::WakerRef<'_> { unsafe { waker_ref::waker_ref(this) } @@ -104,17 +110,23 @@ impl Task { } } } + pub(crate) fn key(&self) -> Option<&K> { + unsafe { (&*self.key.get()).as_ref() } + } + pub(crate) fn take_key(&self) -> K { + unsafe { (*self.key.get()).take().unwrap() } + } } -impl Drop for Task { +impl Drop for Task { fn drop(&mut self) { - // Since `Task` is sent across all threads for any lifetime, + // Since `Task` is sent across all threads for any lifetime, // regardless of `Fut`, we, to guarantee memory safety, can't actually // touch `Fut` at any time except when we have a reference to the - // `FuturesUnordered` itself . + // `FuturesUnorderedInternal` itself . // // Consequently it *should* be the case that we always drop futures from - // the `FuturesUnordered` instance. This is a bomb, just in case there's + // the `FuturesUnorderedInternal` instance. This is a bomb, just in case there's // a bug in that logic. unsafe { if (*self.future.get()).is_some() { @@ -124,6 +136,55 @@ impl Drop for Task { } } +// Wrapper struct; exists effectively to implement hash on the type Arc +pub(crate) struct HashTask { + pub(crate) inner: Arc>, +} + +impl From>> for HashTask { + fn from(inner: Arc>) -> Self { + HashTask { inner } + } +} + +impl HashTask { + fn key(&self) -> Option<&K> { + Task::key(&*self) + } +} + +impl Deref for HashTask { + type Target = Task; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl Borrow for HashTask { + fn borrow(&self) -> &K { + // Never use the borrowed form after the key has been removed from the task + // IE. The Stub task never goes into the HashSet + // Or removing Task from HashSet using key, after key removed from task + unsafe { (*self.key.get()).as_ref().unwrap() } + } +} + +impl Hash for HashTask { + fn hash(&self, state: &mut H) { + unsafe { (*self.key.get()).as_ref() }.unwrap().hash(state) + } +} + +impl PartialEq for HashTask { + fn eq(&self, other: &Self) -> bool { + self.key() == other.key() + } +} +impl Eq for HashTask {} +unsafe impl Send for HashTask {} +unsafe impl Sync for HashTask {} + mod waker_ref { use alloc::sync::Arc; use core::marker::PhantomData; diff --git a/futures-util/src/stream/mapped_futures/iter.rs b/futures-util/src/stream/mapped_futures/iter.rs new file mode 100644 index 000000000..101c16656 --- /dev/null +++ b/futures-util/src/stream/mapped_futures/iter.rs @@ -0,0 +1,117 @@ +use super::TaskSet; +use crate::stream::futures_unordered_internal; +use core::hash::Hash; +use core::pin::Pin; + +/// Mutable iterator over all futures in the unordered set. +#[derive(Debug)] +pub struct IterPinMut<'a, K: Hash + Eq, Fut> { + pub(super) inner: futures_unordered_internal::IterPinMut<'a, K, Fut, TaskSet>, +} + +/// Mutable iterator over all futures in the unordered set. +#[derive(Debug)] +pub struct IterMut<'a, K: Hash + Eq, Fut: Unpin>(pub(super) IterPinMut<'a, K, Fut>); + +/// Immutable iterator over all futures in the unordered set. +#[derive(Debug)] +pub struct IterPinRef<'a, K: Hash + Eq, Fut> { + pub(super) inner: futures_unordered_internal::IterPinRef<'a, K, Fut, TaskSet>, +} + +/// Immutable iterator over all the futures in the unordered set. +#[derive(Debug)] +pub struct Iter<'a, K: Hash + Eq, Fut: Unpin>(pub(super) IterPinRef<'a, K, Fut>); + +/// Owned iterator over all futures in the unordered set. +#[derive(Debug)] +pub struct IntoIter { + pub(super) inner: futures_unordered_internal::IntoIter>, +} + +/// Immutable iterator over all keys in the mapping. +#[derive(Debug)] +pub struct Keys<'a, K: Hash + Eq, Fut> { + pub(super) inner: futures_unordered_internal::IterPinRef<'a, K, Fut, TaskSet>, +} + +impl Iterator for IntoIter { + type Item = (K, Fut); + + fn next(&mut self) -> Option { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl ExactSizeIterator for IntoIter {} + +impl<'a, K: Hash + Eq, Fut> Iterator for IterPinMut<'a, K, Fut> { + type Item = (&'a K, Pin<&'a mut Fut>); + + fn next(&mut self) -> Option { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option) { + (self.inner.len, Some(self.inner.len)) + } +} + +impl ExactSizeIterator for IterPinMut<'_, K, Fut> {} + +impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for IterMut<'a, K, Fut> { + type Item = (&'a K, &'a mut Fut); + + fn next(&mut self) -> Option { + self.0.next().map(|(key, fut_pin)| (key, Pin::get_mut(fut_pin))) + } + + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +impl ExactSizeIterator for IterMut<'_, K, Fut> {} + +impl<'a, K: Hash + Eq, Fut> Iterator for IterPinRef<'a, K, Fut> { + type Item = (&'a K, Pin<&'a Fut>); + + fn next(&mut self) -> Option { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl ExactSizeIterator for IterPinRef<'_, K, Fut> {} + +impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for Iter<'a, K, Fut> { + type Item = (&'a K, &'a Fut); + + fn next(&mut self) -> Option { + // self.0.next() + self.0.next().map(|(key, fut_pin)| (key, Pin::get_ref(fut_pin))) + } + + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +impl ExactSizeIterator for Iter<'_, K, Fut> {} + +impl ExactSizeIterator for Keys<'_, K, Fut> {} + +impl<'a, K: Hash + Eq, Fut> Iterator for Keys<'a, K, Fut> { + type Item = &'a K; + + fn next(&mut self) -> Option { + self.inner.next().map(|opt| opt.0) + } +} diff --git a/futures-util/src/stream/mapped_futures/mod.rs b/futures-util/src/stream/mapped_futures/mod.rs new file mode 100644 index 000000000..8fe764620 --- /dev/null +++ b/futures-util/src/stream/mapped_futures/mod.rs @@ -0,0 +1,466 @@ +//! An unbounded map of futures. + +use alloc::sync::Arc; +use core::fmt::{self, Debug}; +use core::hash::Hash; +use core::iter::FromIterator; +use core::pin::Pin; +use futures_core::future::Future; +use futures_core::stream::{FusedStream, Stream}; +use futures_core::task::{Context, Poll}; +use std::collections::HashSet; + +mod iter; +use self::iter::Keys; +#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/102352 +pub use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef}; +use crate::stream::futures_unordered_internal::task::HashTask; + +use super::futures_unordered_internal::{FuturesUnorderedInternal, ReleasesTask}; + +/// A map of futures which may complete in any order. +/// +/// This structure is optimized to manage a large number of futures. +/// Futures managed by [`MappedFutures`] will only be polled when they +/// generate wake-up notifications. This reduces the required amount of work +/// needed to poll large numbers of futures. +/// +/// [`MappedFutures`] can be filled by [`collect`](Iterator::collect)ing an +/// iterator of futures into a [`MappedFutures`], or by +/// [`insert`](MappedFutures::insert)ing futures onto an existing +/// [`MappedFutures`]. When new futures are added, +/// [`poll_next`](Stream::poll_next) must be called in order to begin receiving +/// wake-ups for new futures. +/// +/// Note that you can create a ready-made [`MappedFutures`] via the +/// [`collect`](Iterator::collect) method, or you can start with an empty set +/// with the [`MappedFutures::new`] constructor. +#[must_use = "streams do nothing unless polled"] +pub struct MappedFutures { + inner: FuturesUnorderedInternal>, +} + +struct TaskSet { + inner: HashSet>, +} + +impl Debug for TaskSet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "TaskSet {{ ... }}") + } +} + +impl ReleasesTask for TaskSet { + fn release_task(&mut self, key: &K) { + self.inner.remove(key); + } + + fn new() -> Self { + Self { inner: HashSet::new() } + } +} + +unsafe impl Send for MappedFutures {} +unsafe impl Sync for MappedFutures {} +impl Unpin for MappedFutures {} + +// MappedFutures is implemented using two linked lists. One which links all +// futures managed by a `MappedFutures` and one that tracks futures that have +// been scheduled for polling. The first linked list allows for thread safe +// insertion of nodes at the head as well as forward iteration, but is otherwise +// not thread safe and is only accessed by the thread that owns the +// `MappedFutures` value for any other operations. The second linked list is +// an implementation of the intrusive MPSC queue algorithm described by +// 1024cores.net. +// +// When a future is submitted to the set, a task is allocated and inserted in +// both linked lists. The next call to `poll_next` will (eventually) see this +// task and call `poll` on the future. +// +// Before a managed future is polled, the current context's waker is replaced +// with one that is aware of the specific future being run. This ensures that +// wake-up notifications generated by that specific future are visible to +// `MappedFutures`. When a wake-up notification is received, the task is +// inserted into the ready to run queue, so that its future can be polled later. +// +// Each task is wrapped in an `Arc` and thereby atomically reference counted. +// Also, each task contains an `AtomicBool` which acts as a flag that indicates +// whether the task is currently inserted in the atomic queue. When a wake-up +// notification is received, the task will only be inserted into the ready to +// run queue if it isn't inserted already. + +impl Default for MappedFutures { + fn default() -> Self { + Self::new() + } +} + +impl MappedFutures { + /// Constructs a new, empty [`MappedFutures`]. + /// + /// The returned [`MappedFutures`] does not contain any futures. + /// In this state, [`MappedFutures::poll_next`](Stream::poll_next) will + /// return [`Poll::Ready(None)`](Poll::Ready). + pub fn new() -> Self { + Self { inner: FuturesUnorderedInternal::new() } + } + + /// Returns the number of futures contained in the set. + /// + /// This represents the total number of in-flight futures. + pub fn len(&self) -> usize { + self.inner.len() + } + + /// Returns `true` if the set contains no futures. + pub fn is_empty(&self) -> bool { + // Relaxed ordering can be used here since we don't need to read from + // the head pointer, only check whether it is null. + self.inner.is_empty() + } + + /// This is a code stank + fn set(&mut self) -> &mut HashSet> { + &mut self.inner.inner.inner + } + + /// Insert a future into the set. + /// + /// This method adds the given future to the set. This method will not + /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must + /// ensure that [`MappedFutures::poll_next`](Stream::poll_next) is called + /// in order to receive wake-up notifications for the given future. + /// + /// This method will remove and drop a future that is already mapped to the provided key. + /// Returns true if another future was not removed to make room for the provided future. + pub fn insert(&mut self, key: K, future: Fut) -> bool { + let replacing = self.cancel(&key); + let task = self.inner.push(key, future); + // self.inner.self.hash_set.insert(task.into()); + self.set().insert(task.into()); + !replacing + } + + /// Insert a future into the set and return the displaced future, if there was one. + /// + /// This method adds the given future to the set. This method will not + /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must + /// ensure that [`MappedFutures::poll_next`](Stream::poll_next) is called + /// in order to receive wake-up notifications for the given future. + /// Returns true if another future was ma + pub fn replace(&mut self, key: K, future: Fut) -> Option + where + Fut: Unpin, + { + let replacing = self.remove(&key); + let task = self.inner.push(key, future); + // self.hash_set.insert(task.into()); + self.set().insert(task.into()); + replacing + } + + /// Remove a future from the set, dropping it. + /// + /// Returns true if a future was cancelled. + pub fn cancel(&mut self, key: &K) -> bool { + // if let Some(task) = self.hash_set.get(key) { + if let Some(task) = self.set().take(key) { + unsafe { + if (*task.future.get()).is_some() { + self.inner.unlink(Arc::as_ptr(&task.inner)); + self.inner.release_task(task.inner.clone()); + return true; + } + } + } + false + } + + /// Remove a future from the set and return it. + pub fn remove(&mut self, key: &K) -> Option + where + Fut: Unpin, + { + // if let Some(task) = self.hash_set.get(key) { + if let Some(task) = self.set().take(key) { + unsafe { + let fut = (*task.future.get()).take().unwrap(); + self.inner.unlink(Arc::as_ptr(&task.inner)); + self.inner.release_task(task.inner.clone()); + return Some(fut); + } + } + None + } + + /// Returns `true` if the map contains a future for the specified key. + pub fn contains(&mut self, key: &K) -> bool { + self.set().contains(key) + } + + /// Get a pinned mutable reference to the mapped future. + pub fn get_pin_mut(&mut self, key: &K) -> Option> { + // if let Some(task_ref) = self.hash_set.get(key) { + if let Some(task_ref) = self.set().get(key) { + unsafe { + if let Some(fut) = &mut *task_ref.inner.future.get() { + return Some(Pin::new_unchecked(fut)); + } + } + } + None + } + + /// Get a pinned mutable reference to the mapped future. + pub fn get_mut(&mut self, key: &K) -> Option<&mut Fut> + where + Fut: Unpin, + { + // if let Some(task_ref) = self.hash_set.get(key) { + if let Some(task_ref) = self.set().get(key) { + unsafe { + if let Some(fut) = &mut *task_ref.inner.future.get() { + return Some(fut); + } + } + } + None + } + + /// Get a shared reference to the mapped future. + pub fn get(&mut self, key: &K) -> Option<&Fut> { + // if let Some(task_ref) = self.hash_set.get(key) { + if let Some(task_ref) = self.set().get(key) { + unsafe { + if let Some(fut) = &*task_ref.inner.future.get() { + return Some(fut); + } + } + } + None + } + + /// Get a pinned shared reference to the mapped future. + pub fn get_pin(&mut self, key: &K) -> Option> { + // if let Some(task_ref) = self.hash_set.get(key) { + if let Some(task_ref) = self.set().get(key) { + unsafe { + if let Some(fut) = &*task_ref.future.get() { + return Some(Pin::new_unchecked(fut)); + } + } + } + None + } + + /// Returns an iterator of keys in the mapping. + pub fn keys<'a>(&'a self) -> Keys<'a, K, Fut> { + Keys { + // inner: self.hash_set.iter().map(Box::new(|hash_task| HashTask::key_unwrap(hash_task))), + // inner: self.set().iter().map(Box::new(|hash_task| HashTask::key_unwrap(hash_task))), + // inner: self.set().keys(), + inner: Pin::new(&self.inner).iter_pin_ref(), + } + } + + /// Returns an iterator that allows inspecting each future in the set. + pub fn iter(&self) -> Iter<'_, K, Fut> + where + Fut: Unpin, + { + Iter(Pin::new(self).iter_pin_ref()) + } + + /// Returns an iterator that allows inspecting each future in the set. + pub fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, K, Fut> { + // let (task, len) = self.atomic_load_head_and_len_all(); + // let pending_next_all = self.pending_next_all(); + + // IterPinRef { task, len, pending_next_all, _marker: PhantomData } + IterPinRef { inner: unsafe { self.map_unchecked(|thing| &thing.inner) }.iter_pin_ref() } + // IterPinRef { task: (), len: (), pending_next_all: (), _marker: () } + } + + /// Returns an iterator that allows modifying each future in the set. + pub fn iter_mut(&mut self) -> IterMut<'_, K, Fut> + where + Fut: Unpin, + { + IterMut(Pin::new(self).iter_pin_mut()) + } + + /// Returns an iterator that allows modifying each future in the set. + pub fn iter_pin_mut(self: Pin<&mut Self>) -> IterPinMut<'_, K, Fut> { + // `head_all` can be accessed directly and we don't need to spin on + // `Task::next_all` since we have exclusive access to the set. + + // IterPinMut { inner: Pin::new(&mut self.inner).iter_pin_mut() } + IterPinMut { + inner: unsafe { self.map_unchecked_mut(|thing| &mut thing.inner) }.iter_pin_mut(), + } + // IterPinMut { task, len, _marker: PhantomData } + } +} + +impl Stream for MappedFutures { + type Item = (K, Fut::Output); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match std::task::ready!(Pin::new(&mut self.inner).poll_next(cx)) { + Some((task, output)) => { + // let key = (*(**task).key.get()).take().unwrap(); + // let key = task.take_key(); + // MappedFutures::set(self.get_mut()).remove(&key); + MappedFutures::set(self.get_mut()).remove(task.key().unwrap()); + Poll::Ready(Some((task.take_key(), output))) + } + None => Poll::Ready(None), + } + } + + fn size_hint(&self) -> (usize, Option) { + let len = self.len(); + (len, Some(len)) + } +} + +impl Debug for MappedFutures { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "MappedFutures {{ ... }}") + } +} + +impl MappedFutures { + /// Clears the set, removing all futures. + pub fn clear(&mut self) { + self.inner.clear(); + self.set().clear(); + } +} + +impl<'a, K: Hash + Eq, Fut: Unpin> IntoIterator for &'a MappedFutures { + type Item = (&'a K, &'a Fut); + type IntoIter = Iter<'a, K, Fut>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, K: Hash + Eq, Fut: Unpin> IntoIterator for &'a mut MappedFutures { + type Item = (&'a K, &'a mut Fut); + type IntoIter = IterMut<'a, K, Fut>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl IntoIterator for MappedFutures { + type Item = (K, Fut); + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter { inner: self.inner.into_iter() } + } +} + +impl FromIterator<(K, Fut)> for MappedFutures { + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let acc = Self::new(); + iter.into_iter().fold(acc, |mut acc, (key, item)| { + acc.insert(key, item); + acc + }) + } +} + +impl FusedStream for MappedFutures { + fn is_terminated(&self) -> bool { + self.inner.is_terminated() + } +} + +impl Extend<(K, Fut)> for MappedFutures { + fn extend(&mut self, iter: I) + where + I: IntoIterator, + { + for (key, item) in iter { + self.insert(key, item); + } + } +} + +#[cfg(test)] +pub mod tests { + use crate::stream::*; + use futures::executor::block_on; + use futures::future::LocalBoxFuture; + use futures_timer::Delay; + // use futures_util::StreamExt; + // use crate::StreamExt; + use std::boxed::Box; + use std::time::Duration; + + fn insert_millis(futs: &mut MappedFutures, key: u32, millis: u64) { + futs.insert(key, Delay::new(Duration::from_millis(millis))); + } + + fn insert_millis_pinned( + futs: &mut MappedFutures>, + key: u32, + millis: u64, + ) { + futs.insert(key, Box::pin(Delay::new(Duration::from_millis(millis)))); + } + + #[test] + fn map_futures() { + let mut futures: MappedFutures = MappedFutures::new(); + insert_millis(&mut futures, 1, 50); + insert_millis(&mut futures, 2, 50); + insert_millis(&mut futures, 3, 150); + insert_millis(&mut futures, 4, 200); + + assert_eq!(block_on(futures.next()).unwrap().0, 1); + assert_eq!(futures.cancel(&3), true); + assert_eq!(block_on(futures.next()).unwrap().0, 2); + assert_eq!(block_on(futures.next()).unwrap().0, 4); + assert_eq!(block_on(futures.next()), None); + } + + #[test] + fn remove_pinned() { + let mut futures: MappedFutures> = MappedFutures::new(); + insert_millis_pinned(&mut futures, 1, 50); + insert_millis_pinned(&mut futures, 3, 150); + insert_millis_pinned(&mut futures, 4, 200); + + assert_eq!(block_on(futures.next()).unwrap().0, 1); + assert_eq!(block_on(futures.remove(&3).unwrap()), ()); + insert_millis_pinned(&mut futures, 2, 60); + assert_eq!(block_on(futures.next()).unwrap().0, 4); + assert_eq!(block_on(futures.next()).unwrap().0, 2); + assert_eq!(block_on(futures.next()), None); + } + + #[test] + fn mutate() { + let mut futures: MappedFutures = MappedFutures::new(); + insert_millis(&mut futures, 1, 500); + insert_millis(&mut futures, 2, 1000); + insert_millis(&mut futures, 3, 1500); + insert_millis(&mut futures, 4, 2000); + + assert_eq!(block_on(futures.next()).unwrap().0, 1); + futures.get_mut(&3).unwrap().reset(Duration::from_millis(300)); + assert_eq!(block_on(futures.next()).unwrap().0, 3); + assert_eq!(block_on(futures.next()).unwrap().0, 2); + assert_eq!(block_on(futures.next()).unwrap().0, 4); + assert_eq!(block_on(futures.next()), None); + } +} diff --git a/futures-util/src/stream/mod.rs b/futures-util/src/stream/mod.rs index 789e1ad22..736267aa7 100644 --- a/futures-util/src/stream/mod.rs +++ b/futures-util/src/stream/mod.rs @@ -118,11 +118,19 @@ pub use self::futures_ordered::FuturesOrdered; #[cfg_attr(target_os = "none", cfg(target_has_atomic = "ptr"))] #[cfg(feature = "alloc")] pub mod futures_unordered; +mod futures_unordered_internal; #[cfg_attr(target_os = "none", cfg(target_has_atomic = "ptr"))] #[cfg(feature = "alloc")] #[doc(inline)] pub use self::futures_unordered::FuturesUnordered; +#[cfg_attr(target_os = "none", cfg(target_has_atomic = "ptr"))] +#[cfg(feature = "alloc")] +pub mod mapped_futures; +#[cfg(feature = "alloc")] +#[doc(inline)] +pub use self::mapped_futures::MappedFutures; + #[cfg_attr(target_os = "none", cfg(target_has_atomic = "ptr"))] #[cfg(feature = "alloc")] pub mod select_all;