blob: 5b0150265174a4b32fda0582c103cee74143b5fc [file] [log] [blame]
Chih-Hung Hsiehcfc3a232020-06-10 20:13:05 -07001//! Lazily initialized data.
2//! Used in generated code.
3
Haibo Huang52aa7852020-07-10 20:23:55 -07004// Avoid deprecation warnings when compiling rust-protobuf
5#![allow(deprecated)]
6
Chih-Hung Hsiehcfc3a232020-06-10 20:13:05 -07007use std::mem;
8use std::sync;
9
10/// Lasily initialized data.
Haibo Huang52aa7852020-07-10 20:23:55 -070011#[deprecated(
12 since = "2.16",
13 note = "Please regenerate .rs files from .proto files to use newer APIs"
14)]
Chih-Hung Hsiehcfc3a232020-06-10 20:13:05 -070015pub struct Lazy<T> {
16 #[doc(hidden)]
17 pub lock: sync::Once,
18 #[doc(hidden)]
19 pub ptr: *const T,
20}
21
22impl<T> Lazy<T> {
23 /// Uninitialized `Lazy` object.
24 ///
25 /// The initializer is added in rust-protobuf 2.11, for compatibility with
26 /// previously generated code, existing fields are kept public.
27 pub const INIT: Lazy<T> = Lazy {
28 lock: sync::Once::new(),
29 ptr: 0 as *const T,
30 };
31
32 /// Get lazy field value, initialize it with given function if not yet.
33 pub fn get<F>(&'static mut self, init: F) -> &'static T
34 where
35 F: FnOnce() -> T,
36 {
37 // ~ decouple the lifetimes of 'self' and 'self.lock' such we
38 // can initialize self.ptr in the call_once closure (note: we
39 // do have to initialize self.ptr in the closure to guarantee
40 // the ptr is valid for all calling threads at any point in
41 // time)
42 let lock: &sync::Once = unsafe { mem::transmute(&self.lock) };
43 lock.call_once(|| unsafe {
44 self.ptr = mem::transmute(Box::new(init()));
45 });
46 unsafe { &*self.ptr }
47 }
48}
49
50/// Used to initialize `lock` field in `Lazy` struct.
51#[deprecated(
52 since = "2.11",
53 note = "Regenerate .proto files to use safer initializer"
54)]
55pub const ONCE_INIT: sync::Once = sync::Once::new();
56
57#[cfg(test)]
58mod test {
59 use super::Lazy;
60 use std::sync::atomic::AtomicIsize;
61 use std::sync::atomic::Ordering;
62 use std::sync::Arc;
63 use std::sync::Barrier;
64 use std::thread;
65
66 #[test]
67 fn many_threads_calling_get() {
68 const N_THREADS: usize = 32;
69 const N_ITERS_IN_THREAD: usize = 32;
70 const N_ITERS: usize = 16;
71
72 static mut LAZY: Lazy<String> = Lazy::INIT;
73 static CALL_COUNT: AtomicIsize = AtomicIsize::new(0);
74
75 let value = "Hello, world!".to_owned();
76
77 for _ in 0..N_ITERS {
78 // Reset mutable state.
79 unsafe {
80 LAZY = Lazy::INIT;
81 }
82 CALL_COUNT.store(0, Ordering::SeqCst);
83
84 // Create a bunch of threads, all calling .get() at the same time.
85 let mut threads = vec![];
86 let barrier = Arc::new(Barrier::new(N_THREADS));
87
88 for _ in 0..N_THREADS {
89 let cloned_value_thread = value.clone();
90 let cloned_barrier = barrier.clone();
91 threads.push(thread::spawn(move || {
92 // Ensure all threads start at once to maximise contention.
93 cloned_barrier.wait();
94 for _ in 0..N_ITERS_IN_THREAD {
95 assert_eq!(&cloned_value_thread, unsafe {
96 LAZY.get(|| {
97 CALL_COUNT.fetch_add(1, Ordering::SeqCst);
98 cloned_value_thread.clone()
99 })
100 });
101 }
102 }));
103 }
104
105 for thread in threads {
106 thread.join().unwrap();
107 }
108
109 assert_eq!(CALL_COUNT.load(Ordering::SeqCst), 1);
110 }
111 }
112}