1use std::cell::UnsafeCell;
5use std::sync;
6
7pub struct LazyV2<T: Sync> {
9 lock: sync::Once,
10 ptr: UnsafeCell<*const T>,
11}
12
13unsafe impl<T: Sync> Sync for LazyV2<T> {}
14
15impl<T: Sync> LazyV2<T> {
16 pub const INIT: LazyV2<T> = LazyV2 {
18 lock: sync::Once::new(),
19 ptr: UnsafeCell::new(0 as *const T),
20 };
21
22 pub fn get<F>(&'static self, init: F) -> &'static T
24 where
25 F: FnOnce() -> T,
26 {
27 self.lock.call_once(|| unsafe {
28 *self.ptr.get() = Box::into_raw(Box::new(init()));
29 });
30 unsafe { &**self.ptr.get() }
31 }
32}
33
34#[cfg(test)]
35mod test {
36 use std::sync::atomic::AtomicIsize;
37 use std::sync::atomic::Ordering;
38 use std::sync::Arc;
39 use std::sync::Barrier;
40 use std::thread;
41
42 use super::LazyV2;
43
44 #[test]
45 fn many_threads_calling_get() {
46 const N_THREADS: usize = 32;
47 const N_ITERS_IN_THREAD: usize = 32;
48 const N_ITERS: usize = 16;
49
50 static mut LAZY: LazyV2<String> = LazyV2::INIT;
51 static CALL_COUNT: AtomicIsize = AtomicIsize::new(0);
52
53 let value = "Hello, world!".to_owned();
54
55 for _ in 0..N_ITERS {
56 unsafe {
58 LAZY = LazyV2::INIT;
59 }
60 CALL_COUNT.store(0, Ordering::SeqCst);
61
62 let mut threads = vec![];
64 let barrier = Arc::new(Barrier::new(N_THREADS));
65
66 for _ in 0..N_THREADS {
67 let cloned_value_thread = value.clone();
68 let cloned_barrier = barrier.clone();
69 threads.push(thread::spawn(move || {
70 cloned_barrier.wait();
72 for _ in 0..N_ITERS_IN_THREAD {
73 assert_eq!(&cloned_value_thread, unsafe {
74 LAZY.get(|| {
75 CALL_COUNT.fetch_add(1, Ordering::SeqCst);
76 cloned_value_thread.clone()
77 })
78 });
79 }
80 }));
81 }
82
83 for thread in threads {
84 thread.join().unwrap();
85 }
86
87 assert_eq!(CALL_COUNT.load(Ordering::SeqCst), 1);
88 }
89 }
90}