seize/
deferred.rs

1use std::cell::UnsafeCell;
2use std::ptr;
3use std::sync::atomic::{AtomicPtr, AtomicU64, Ordering};
4
5use crate::raw::Node;
6use crate::tls::Thread;
7use crate::{AsLink, Collector, Link};
8
9/// A batch of pointers to be reclaimed in the future.
10///
11/// Sometimes it is necessary to defer the retirement of a batch of pointers.
12/// For example, a set of pointers may be reachable from multiple locations in a
13/// data structure and can only be retired after a specific object is reclaimed.
14/// In such cases, the [`Deferred`] type can serve as a cheap place to defer the
15/// retirement of pointers, without allocating extra memory.
16///
17/// [`Deferred`] is a concurrent list, meaning that pointers can be added from
18/// multiple threads concurrently. It is not meant to be used to amortize the
19/// cost of retirement, which is done through thread-local batches controlled
20/// with [`Collector::batch_size`], as access from a single-thread can be more
21/// expensive than is required. Deferred batches are useful when you need to
22/// control when a batch of objects is retired directly, a relatively rare use
23/// case.
24///
25/// # Examples
26///
27/// ```rust
28/// # use seize::{Deferred, Collector, Linked, reclaim};
29/// # use std::sync::{Arc, atomic::{AtomicPtr, Ordering}};
30/// let collector = Collector::new().batch_size(10);
31///
32/// // allocate a set of pointers
33/// let items = (0..10)
34///     .map(|i| AtomicPtr::new(collector.link_boxed(i)))
35///     .collect::<Arc<[_]>>();
36///
37/// // create a batch of objects to retire
38/// let mut batch = Deferred::new();
39///
40/// for item in items.iter() {
41///     // make the item unreachable with an atomic swap
42///     let old = item.swap(std::ptr::null_mut(), Ordering::AcqRel);
43///     // don't retire just yet, add the object to the batch
44///     unsafe { batch.defer(old) };
45/// }
46///
47/// // sometime later... retire all the items in the batch
48/// unsafe { batch.retire_all(&collector, reclaim::boxed::<Linked<usize>>) }
49/// ```
50#[derive(Default)]
51pub struct Deferred {
52    head: AtomicPtr<Node>,
53    pub(crate) min_epoch: AtomicU64,
54}
55
56impl Deferred {
57    /// Create a new batch of deferred objects.
58    pub const fn new() -> Deferred {
59        Deferred {
60            head: AtomicPtr::new(ptr::null_mut()),
61            min_epoch: AtomicU64::new(0),
62        }
63    }
64
65    /// Add an object to the batch.
66    ///
67    /// # Safety
68    ///
69    /// After this method is called, it is *undefined behavior* to add this
70    /// pointer to the batch again, or any other batch. The pointer must
71    /// also be valid for access as a [`Link`], per the [`AsLink`] trait.
72    pub unsafe fn defer<T: AsLink>(&self, ptr: *mut T) {
73        // `ptr` is guaranteed to be a valid pointer that can be cast to a node
74        // (because of `T: AsLink`).
75        //
76        // Any other thread with a reference to the pointer only has a shared reference
77        // to the `UnsafeCell<Node>`, which is allowed to alias. The caller guarantees
78        // that the same pointer is not retired twice, so we can safely write to the
79        // node through the shared pointer.
80        let node = UnsafeCell::raw_get(ptr.cast::<UnsafeCell<Node>>());
81
82        let birth_epoch = unsafe { (*node).birth_epoch };
83
84        // Keep track of the oldest node in the batch.
85        self.min_epoch.fetch_min(birth_epoch, Ordering::Relaxed);
86
87        // Relaxed: `self.head` is only ever accessed through a mutable reference.
88        let mut prev = self.head.load(Ordering::Relaxed);
89
90        loop {
91            unsafe { (*node).next_batch = prev }
92
93            // Relaxed: `self.head` is only ever accessed through a mutable reference.
94            match self
95                .head
96                .compare_exchange_weak(prev, node, Ordering::Relaxed, Ordering::Relaxed)
97            {
98                Ok(_) => return,
99                Err(found) => prev = found,
100            }
101        }
102    }
103
104    /// Retires a batch of values, running `reclaim` when no threads hold a
105    /// reference to any objects in the batch.
106    ///
107    /// Note that this method is disconnected from any guards on the current
108    /// thread, so the pointers may be reclaimed immediately.
109    ///
110    /// # Safety
111    ///
112    /// The safety requirements of [`Collector::retire`] apply to each object in
113    /// the batch.
114    ///
115    /// [`Collector::retire`]: crate::Collector::retire
116    pub unsafe fn retire_all(&mut self, collector: &Collector, reclaim: unsafe fn(*mut Link)) {
117        // Note that `add_batch` doesn't ever actually reclaim the pointer immediately
118        // if the current thread is active, similar to `retire`.
119        unsafe { collector.raw.add_batch(self, reclaim, Thread::current()) }
120    }
121
122    /// Run a function for each object in the batch.
123    ///
124    /// This function does not consume the batch and can be called multiple
125    /// times, **before retirement**.
126    pub fn for_each(&mut self, mut f: impl FnMut(*mut Node)) {
127        let mut list = *self.head.get_mut();
128
129        while !list.is_null() {
130            let curr = list;
131
132            // Advance the cursor.
133            // Safety: `curr` is a valid, non-null node in the list.
134            list = unsafe { (*curr).next_batch };
135
136            f(curr);
137        }
138    }
139}