seize/guard.rs
1use std::cell::UnsafeCell;
2use std::fmt;
3use std::marker::PhantomData;
4use std::sync::atomic::{AtomicPtr, Ordering};
5
6use crate::raw::{self, Reservation};
7use crate::tls::Thread;
8use crate::{AsLink, Collector, Link};
9
10/// A guard that enables protected loads of concurrent objects.
11///
12/// This trait provides common functionality implemented by [`LocalGuard`],
13/// [`OwnedGuard`], and [`UnprotectedGuard`].
14///
15/// See [the guide](crate::guide#starting-operations) for an introduction to
16/// using guards.
17pub trait Guard {
18 /// Refreshes the guard.
19 ///
20 /// Calling this method is similar to dropping and immediately
21 /// creating a new guard. The current thread remains active, but any
22 /// pointers that were previously protected may be reclaimed.
23 ///
24 /// # Safety
25 ///
26 /// This method is not marked as `unsafe`, but will affect
27 /// the validity of pointers returned by [`protect`](Guard::protect),
28 /// similar to dropping a guard. It is intended to be used safely
29 /// by users of concurrent data structures, as references will
30 /// be tied to the guard and this method takes `&mut self`.
31 fn refresh(&mut self);
32
33 /// Flush any retired values in the local batch.
34 ///
35 /// This method flushes any values from the current thread's local
36 /// batch, starting the reclamation process. Note that no memory
37 /// can be reclaimed while this guard is active, but calling `flush`
38 /// may allow memory to be reclaimed more quickly after the guard is
39 /// dropped.
40 ///
41 /// Note that the batch must contain at least as many objects as the
42 /// number of currently active threads for a flush to be performed.
43 ///
44 /// See [`Collector::batch_size`] for details about batching.
45 fn flush(&self);
46
47 /// Protects the load of an atomic pointer.
48 ///
49 /// Any valid pointer loaded through a guard using the `protect` method is
50 /// guaranteed to stay valid until the guard is dropped, or the object
51 /// is retired by the current thread. Importantly, if another thread
52 /// retires this object, it will not be reclaimed for the lifetime of
53 /// this guard.
54 ///
55 /// Note that the lifetime of a guarded pointer is logically tied to that of
56 /// the guard -- when the guard is dropped the pointer is invalidated --
57 /// but a raw pointer is returned for convenience. Data structures that
58 /// return shared references to values should ensure that the lifetime
59 /// of the reference is tied to the lifetime of a guard.
60 fn protect<T: AsLink>(&self, ptr: &AtomicPtr<T>, ordering: Ordering) -> *mut T;
61
62 /// Retires a value, running `reclaim` when no threads hold a reference to
63 /// it.
64 ///
65 /// This method delays reclamation until the guard is dropped as opposed to
66 /// [`Collector::retire`], which may reclaim objects immediately.
67 ///
68 ///
69 /// # Safety
70 ///
71 /// The retired object must no longer be accessible to any thread that
72 /// enters after it is removed.
73 ///
74 /// Retiring the same pointer twice can cause **undefined behavior**, even
75 /// if the reclaimer doesn't free memory.
76 ///
77 /// Additionally, the pointer must be valid to access as a [`Link`], per the
78 /// [`AsLink`] trait, and the reclaimer passed to `retire` must
79 /// correctly free values of type `T`.
80 unsafe fn defer_retire<T: AsLink>(&self, ptr: *mut T, reclaim: unsafe fn(*mut Link));
81
82 /// Returns a numeric identifier for the current thread.
83 ///
84 /// Guards rely on thread-local state, including thread IDs. If you already
85 /// have a guard you can use this method to get a cheap identifier for the
86 /// current thread, avoiding TLS overhead. Note that thread IDs may be
87 /// reused, so the value returned is only unique for the lifetime of
88 /// this thread.
89 fn thread_id(&self) -> usize;
90
91 /// Returns `true` if this guard belongs to the given collector.
92 ///
93 /// This can be used to verify that user-provided guards are valid
94 /// for the expected collector.
95 fn belongs_to(&self, collector: &Collector) -> bool;
96
97 /// Create a [`Link`] that can be used to link an object to the collector.
98 ///
99 /// This is identical to [`Collector::link`], but may have slightly less
100 /// overhead due to the existence of a guard.
101 fn link(&self, collector: &Collector) -> Link;
102}
103
104/// A guard that keeps the current thread marked as active.
105///
106/// Local guards are created by calling [`Collector::enter`]. Unlike
107/// [`OwnedGuard`], a local guard is tied to the current thread and does not
108/// implement `Send`. This makes local guards relatively cheap to create and
109/// destroy.
110///
111/// Most of the functionality provided by this type is through the [`Guard`]
112/// trait.
113pub struct LocalGuard<'a> {
114 collector: &'a Collector,
115 // The current thread.
116 thread: Thread,
117 // The reservation of the current thread.
118 reservation: *const Reservation,
119 // `LocalGuard` not be `Send or Sync` as we are tied to the state of the
120 // current thread in the collector.
121 _unsend: PhantomData<*mut ()>,
122}
123
124impl LocalGuard<'_> {
125 #[inline]
126 pub(crate) fn enter(collector: &Collector) -> LocalGuard<'_> {
127 let thread = Thread::current();
128 // Safety: `thread` is the current thread.
129 let reservation = unsafe { collector.raw.reservation(thread) };
130
131 // Calls to `enter` may be reentrant, so we need to keep track of the number
132 // of active guards for the current thread.
133 let guards = reservation.guards.get();
134 reservation.guards.set(guards + 1);
135
136 if guards == 0 {
137 // Safety: Only called on the current thread, which is currently inactive.
138 unsafe { collector.raw.enter(reservation) };
139 }
140
141 LocalGuard {
142 thread,
143 reservation,
144 collector,
145 _unsend: PhantomData,
146 }
147 }
148}
149
150impl Guard for LocalGuard<'_> {
151 /// Protects the load of an atomic pointer.
152 #[inline]
153 fn protect<T: AsLink>(&self, ptr: &AtomicPtr<T>, _: Ordering) -> *mut T {
154 // Safety: `self.reservation` is owned by the current thread.
155 unsafe { self.collector.raw.protect_local(ptr, &*self.reservation) }
156 }
157
158 /// Retires a value, running `reclaim` when no threads hold a reference to
159 /// it.
160 #[inline]
161 unsafe fn defer_retire<T: AsLink>(&self, ptr: *mut T, reclaim: unsafe fn(*mut Link)) {
162 // Safety:
163 // - `self.thread` is the current thread.
164 // - The validity of the pointer is guaranteed by the caller.
165 unsafe { self.collector.raw.add(ptr, reclaim, self.thread) }
166 }
167
168 /// Refreshes the guard.
169 #[inline]
170 fn refresh(&mut self) {
171 // Safety: `self.reservation` is owned by the current thread.
172 let reservation = unsafe { &*self.reservation };
173 let guards = reservation.guards.get();
174
175 if guards == 1 {
176 // Safety: We have a unique reference to the last active guard.
177 unsafe { self.collector.raw.refresh(reservation) }
178 }
179 }
180
181 /// Flush any retired values in the local batch.
182 #[inline]
183 fn flush(&self) {
184 // Note that this does not actually retire any values, it just attempts
185 // to add the batch to any active reservations lists, including ours.
186 //
187 // Safety: `self.thread` is the current thread.
188 unsafe { self.collector.raw.try_retire_batch(self.thread) }
189 }
190
191 /// Returns a numeric identifier for the current thread.
192 #[inline]
193 fn thread_id(&self) -> usize {
194 self.thread.id
195 }
196
197 /// Returns `true` if this guard belongs to the given collector.
198 #[inline]
199 fn belongs_to(&self, collector: &Collector) -> bool {
200 Collector::id_eq(self.collector, collector)
201 }
202
203 #[inline]
204 fn link(&self, collector: &Collector) -> Link {
205 // Safety: `self.reservation` is owned by the current thread.
206 let reservation = unsafe { &*self.reservation };
207
208 Link {
209 node: UnsafeCell::new(collector.raw.node(reservation)),
210 }
211 }
212}
213
214impl Drop for LocalGuard<'_> {
215 #[inline]
216 fn drop(&mut self) {
217 // Safety: `self.reservation` is owned by the current thread.
218 let reservation = unsafe { &*self.reservation };
219
220 // Decrement the active guard count.
221 let guards = reservation.guards.get();
222 reservation.guards.set(guards - 1);
223
224 if guards == 1 {
225 // Safety: We have a unique reference to the last active guard.
226 unsafe { self.collector.raw.leave(reservation) };
227 }
228 }
229}
230
231impl fmt::Debug for LocalGuard<'_> {
232 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
233 f.debug_tuple("LocalGuard").finish()
234 }
235}
236
237/// A guard that protects objects for it's lifetime, independent of the current
238/// thread.
239///
240/// Unlike [`LocalGuard`], an owned guard is independent of the current thread,
241/// allowing them to implement `Send`. This is useful for holding guards across
242/// `.await` points in work-stealing schedulers, where execution may be resumed
243/// on a different thread than started on. However, owned guards are more
244/// expensive to create and destroy, so should be avoided if cross-thread usage
245/// is not required.
246///
247/// Most of the functionality provided by this type is through the [`Guard`]
248/// trait.
249pub struct OwnedGuard<'a> {
250 collector: &'a Collector,
251 // An owned thread, unique to this guard.
252 thread: Thread,
253 // The reservation of this guard.
254 reservation: *const Reservation,
255}
256
257// Safety: `OwnedGuard` owns its thread slot, so is not tied to any
258// thread-locals
259unsafe impl Send for OwnedGuard<'_> {}
260unsafe impl Sync for OwnedGuard<'_> {}
261
262impl OwnedGuard<'_> {
263 #[inline]
264 pub(crate) fn enter(collector: &Collector) -> OwnedGuard<'_> {
265 // Create a thread slot that will last for the lifetime of this guard.
266 let thread = Thread::create();
267
268 // Safety: We have ownership of `thread`.
269 let reservation = unsafe { collector.raw.reservation(thread) };
270
271 // Safety: We have ownership of `reservation`.
272 unsafe { collector.raw.enter(reservation) };
273
274 OwnedGuard {
275 collector,
276 thread,
277 reservation,
278 }
279 }
280}
281
282impl Guard for OwnedGuard<'_> {
283 /// Protects the load of an atomic pointer.
284 #[inline]
285 fn protect<T: AsLink>(&self, ptr: &AtomicPtr<T>, _: Ordering) -> *mut T {
286 // Safety: `self.reservation` is owned by the current thread.
287 let reservation = unsafe { &*self.reservation };
288 self.collector.raw.protect(ptr, reservation)
289 }
290
291 /// Retires a value, running `reclaim` when no threads hold a reference to
292 /// it.
293 #[inline]
294 unsafe fn defer_retire<T: AsLink>(&self, ptr: *mut T, reclaim: unsafe fn(*mut Link)) {
295 // Safety: `self.reservation` is owned by the current thread.
296 let reservation = unsafe { &*self.reservation };
297 let _lock = reservation.lock.lock().unwrap();
298
299 // Safety:
300 // - We hold the lock and so have unique access to the batch.
301 // - The validity of the pointer is guaranteed by the caller.
302 unsafe { self.collector.raw.add(ptr, reclaim, self.thread) }
303 }
304
305 /// Refreshes the guard.
306 #[inline]
307 fn refresh(&mut self) {
308 // Safety: `self.reservation` is owned by the current thread.
309 let reservation = unsafe { &*self.reservation };
310 unsafe { self.collector.raw.refresh(reservation) }
311 }
312
313 /// Flush any retired values in the local batch.
314 #[inline]
315 fn flush(&self) {
316 // Safety: `self.reservation` is owned by the current thread.
317 let reservation = unsafe { &*self.reservation };
318 let _lock = reservation.lock.lock().unwrap();
319 // Note that this does not actually retire any values, it just attempts
320 // to add the batch to any active reservations lists, including ours.
321 //
322 // Safety: We hold the lock and so have unique access to the batch.
323 unsafe { self.collector.raw.try_retire_batch(self.thread) }
324 }
325
326 /// Returns a numeric identifier for the current thread.
327 #[inline]
328 fn thread_id(&self) -> usize {
329 // We can't return the ID of our thread slot because `OwnedGuard`
330 // is `Send` so the ID is not uniquely tied to the current thread.
331 // We also can't return the OS thread ID because it might conflict
332 // with our thread IDs, so we have to get/create the current thread.
333 Thread::current().id
334 }
335
336 /// Returns `true` if this guard belongs to the given collector.
337 #[inline]
338 fn belongs_to(&self, collector: &Collector) -> bool {
339 Collector::id_eq(self.collector, collector)
340 }
341
342 #[inline]
343 fn link(&self, _collector: &Collector) -> Link {
344 // Avoid going through shared thread local storage.
345 let node = raw::Node {
346 birth_epoch: self.collector.raw.birth_epoch(),
347 };
348
349 Link {
350 node: UnsafeCell::new(node),
351 }
352 }
353}
354
355impl Drop for OwnedGuard<'_> {
356 #[inline]
357 fn drop(&mut self) {
358 // Safety: `self.reservation` is owned by the current thread.
359 let reservation = unsafe { &*self.reservation };
360
361 // Safety: `self.thread` is an owned thread.
362 unsafe { self.collector.raw.leave(reservation) };
363
364 // Safety: We are in `drop` and never share `self.thread`.
365 unsafe { Thread::free(self.thread.id) };
366 }
367}
368
369/// Returns a dummy guard object.
370///
371/// Calling [`protect`](Guard::protect) on an unprotected guard will
372/// load the pointer directly, and [`retire`](Guard::defer_retire) will
373/// reclaim objects immediately.
374///
375/// Unprotected guards are useful when calling guarded functions
376/// on a data structure that has just been created or is about
377/// to be destroyed, because you know that no other thread holds
378/// a reference to it.
379///
380/// # Safety
381///
382/// You must ensure that code used with this guard is sound with
383/// the unprotected behavior described above.
384#[inline]
385pub unsafe fn unprotected() -> UnprotectedGuard {
386 UnprotectedGuard
387}
388
389/// A dummy guard object.
390///
391/// See [`unprotected`] for details.
392#[derive(Clone, Debug)]
393#[non_exhaustive]
394pub struct UnprotectedGuard;
395
396impl Guard for UnprotectedGuard {
397 /// Loads the pointer directly, using the given ordering.
398 #[inline]
399 fn protect<T: AsLink>(&self, ptr: &AtomicPtr<T>, ordering: Ordering) -> *mut T {
400 ptr.load(ordering)
401 }
402
403 /// Reclaims the pointer immediately.
404 #[inline]
405 unsafe fn defer_retire<T: AsLink>(&self, ptr: *mut T, reclaim: unsafe fn(*mut Link)) {
406 unsafe { reclaim(ptr.cast::<Link>()) }
407 }
408
409 /// This method is a no-op.
410 #[inline]
411 fn refresh(&mut self) {}
412
413 /// This method is a no-op.
414 #[inline]
415 fn flush(&self) {}
416
417 /// Returns a numeric identifier for the current thread.
418 #[inline]
419 fn thread_id(&self) -> usize {
420 Thread::current().id
421 }
422
423 /// Unprotected guards aren't tied to a specific collector, so this always
424 /// returns `true`.
425 #[inline]
426 fn belongs_to(&self, _collector: &Collector) -> bool {
427 true
428 }
429
430 #[inline]
431 fn link(&self, collector: &Collector) -> Link {
432 collector.link()
433 }
434}