Snap for 7269314 from dd8664866b7dd1c3e84f669e2bd49927ad0f7f1c to sc-v2-release

Change-Id: Ia3ca909b8bbea4ef719a3910397c94620b17b11b
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 3dcef5c..1d9c34d 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,5 +1,5 @@
 {
   "git": {
-    "sha1": "99c3230b263202aca56497b1f8e418a7b3647a23"
+    "sha1": "d841a2028dc72b4e09739116f07e865db60f3690"
   }
 }
diff --git a/Android.bp b/Android.bp
index 121cba8..bba5b1e 100644
--- a/Android.bp
+++ b/Android.bp
@@ -42,7 +42,6 @@
 rust_defaults {
     name: "crossbeam-epoch_defaults",
     crate_name: "crossbeam_epoch",
-    // has rustc warnings
     srcs: ["src/lib.rs"],
     test_suites: ["general-tests"],
     auto_gen_config: true,
@@ -61,7 +60,6 @@
         "librand",
         "libscopeguard",
     ],
-    proc_macros: ["libconst_fn"],
 }
 
 rust_test_host {
@@ -77,9 +75,45 @@
     defaults: ["crossbeam-epoch_defaults"],
 }
 
+rust_defaults {
+    name: "crossbeam-epoch_defaults_loom",
+    crate_name: "loom",
+    srcs: ["tests/loom.rs"],
+    test_suites: ["general-tests"],
+    auto_gen_config: true,
+    edition: "2018",
+    features: [
+        "alloc",
+        "default",
+        "lazy_static",
+        "std",
+    ],
+    rustlibs: [
+        "libcfg_if",
+        "libcrossbeam_epoch",
+        "libcrossbeam_utils",
+        "liblazy_static",
+        "libmemoffset",
+        "librand",
+        "libscopeguard",
+    ],
+}
+
+rust_test_host {
+    name: "crossbeam-epoch_host_test_tests_loom",
+    defaults: ["crossbeam-epoch_defaults_loom"],
+    test_options: {
+        unit_test: true,
+    },
+}
+
+rust_test {
+    name: "crossbeam-epoch_device_test_tests_loom",
+    defaults: ["crossbeam-epoch_defaults_loom"],
+}
+
 rust_library {
     name: "libcrossbeam_epoch",
-    // has rustc warnings
     host_supported: true,
     crate_name: "crossbeam_epoch",
     srcs: ["src/lib.rs"],
@@ -97,20 +131,18 @@
         "libmemoffset",
         "libscopeguard",
     ],
-    proc_macros: ["libconst_fn"],
 }
 
 // dependent_library ["feature_list"]
 //   autocfg-1.0.1
 //   cfg-if-1.0.0
-//   const_fn-0.4.5
 //   crossbeam-utils-0.8.3 "lazy_static,std"
-//   getrandom-0.1.16 "std"
+//   getrandom-0.2.2 "std"
 //   lazy_static-1.4.0
-//   libc-0.2.87
-//   memoffset-0.6.1 "default"
+//   libc-0.2.92
+//   memoffset-0.6.3 "default"
 //   ppv-lite86-0.2.10 "simd,std"
-//   rand-0.7.3 "alloc,default,getrandom,getrandom_package,libc,std"
-//   rand_chacha-0.2.2 "std"
-//   rand_core-0.5.1 "alloc,getrandom,std"
+//   rand-0.8.3 "alloc,default,getrandom,libc,rand_chacha,rand_hc,std,std_rng"
+//   rand_chacha-0.3.0 "std"
+//   rand_core-0.6.2 "alloc,getrandom,std"
 //   scopeguard-1.1.0
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0e154a6..0f30b70 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,14 @@
+# Version 0.9.3
+
+- Make `loom` dependency optional. (#666)
+
+# Version 0.9.2
+
+- Add `Atomic::compare_exchange` and `Atomic::compare_exchange_weak`. (#628)
+- Deprecate `Atomic::compare_and_set` and `Atomic::compare_and_set_weak`. Use `Atomic::compare_exchange` or `Atomic::compare_exchange_weak` instead. (#628)
+- Make `const_fn` dependency optional. (#611)
+- Add unstable support for `loom`. (#487)
+
 # Version 0.9.1
 
 - Bump `memoffset` dependency to version 0.6. (#592)
diff --git a/Cargo.lock b/Cargo.lock
index baebea0..fd37e90 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1,5 +1,7 @@
 # This file is automatically @generated by Cargo.
 # It is not intended for manual editing.
+version = 3
+
 [[package]]
 name = "autocfg"
 version = "1.0.1"
@@ -7,10 +9,10 @@
 checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
 
 [[package]]
-name = "cfg-if"
-version = "0.1.10"
+name = "cc"
+version = "1.0.67"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd"
 
 [[package]]
 name = "cfg-if"
@@ -20,18 +22,19 @@
 
 [[package]]
 name = "const_fn"
-version = "0.4.3"
+version = "0.4.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab"
+checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6"
 
 [[package]]
 name = "crossbeam-epoch"
-version = "0.9.1"
+version = "0.9.3"
 dependencies = [
- "cfg-if 1.0.0",
+ "cfg-if",
  "const_fn",
  "crossbeam-utils",
  "lazy_static",
+ "loom",
  "memoffset",
  "rand",
  "scopeguard",
@@ -39,22 +42,36 @@
 
 [[package]]
 name = "crossbeam-utils"
-version = "0.8.1"
+version = "0.8.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d"
+checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49"
 dependencies = [
  "autocfg",
- "cfg-if 1.0.0",
+ "cfg-if",
  "lazy_static",
+ "loom",
+]
+
+[[package]]
+name = "generator"
+version = "0.6.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a9fed24fd1e18827652b4d55652899a1e9da8e54d91624dc3437a5bc3a9f9a9c"
+dependencies = [
+ "cc",
+ "libc",
+ "log",
+ "rustversion",
+ "winapi",
 ]
 
 [[package]]
 name = "getrandom"
-version = "0.1.15"
+version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
+checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8"
 dependencies = [
- "cfg-if 0.1.10",
+ "cfg-if",
  "libc",
  "wasi",
 ]
@@ -67,9 +84,29 @@
 
 [[package]]
 name = "libc"
-version = "0.2.80"
+version = "0.2.86"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614"
+checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c"
+
+[[package]]
+name = "log"
+version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "loom"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d44c73b4636e497b4917eb21c33539efa3816741a2d3ff26c6316f1b529481a4"
+dependencies = [
+ "cfg-if",
+ "generator",
+ "scoped-tls",
+]
 
 [[package]]
 name = "memoffset"
@@ -88,11 +125,10 @@
 
 [[package]]
 name = "rand"
-version = "0.7.3"
+version = "0.8.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
+checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
 dependencies = [
- "getrandom",
  "libc",
  "rand_chacha",
  "rand_core",
@@ -101,9 +137,9 @@
 
 [[package]]
 name = "rand_chacha"
-version = "0.2.2"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
+checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
 dependencies = [
  "ppv-lite86",
  "rand_core",
@@ -111,23 +147,35 @@
 
 [[package]]
 name = "rand_core"
-version = "0.5.1"
+version = "0.6.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
+checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
 dependencies = [
  "getrandom",
 ]
 
 [[package]]
 name = "rand_hc"
-version = "0.2.0"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
+checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
 dependencies = [
  "rand_core",
 ]
 
 [[package]]
+name = "rustversion"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd"
+
+[[package]]
+name = "scoped-tls"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2"
+
+[[package]]
 name = "scopeguard"
 version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -135,6 +183,28 @@
 
 [[package]]
 name = "wasi"
-version = "0.9.0+wasi-snapshot-preview1"
+version = "0.10.2+wasi-snapshot-preview1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
+checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/Cargo.toml b/Cargo.toml
index e14da07..2d7eb00 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,12 +13,11 @@
 [package]
 edition = "2018"
 name = "crossbeam-epoch"
-version = "0.9.1"
+version = "0.9.3"
 authors = ["The Crossbeam Project Developers"]
 description = "Epoch-based garbage collection"
 homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-epoch"
 documentation = "https://docs.rs/crossbeam-epoch"
-readme = "README.md"
 keywords = ["lock-free", "rcu", "atomic", "garbage"]
 categories = ["concurrency", "memory-management", "no-std"]
 license = "MIT OR Apache-2.0"
@@ -27,10 +26,11 @@
 version = "1"
 
 [dependencies.const_fn]
-version = "0.4"
+version = "0.4.4"
+optional = true
 
 [dependencies.crossbeam-utils]
-version = "0.8"
+version = "0.8.3"
 default-features = false
 
 [dependencies.lazy_static]
@@ -44,11 +44,15 @@
 version = "1.1.0"
 default-features = false
 [dev-dependencies.rand]
-version = "0.7.3"
+version = "0.8"
 
 [features]
 alloc = []
 default = ["std"]
-nightly = ["crossbeam-utils/nightly"]
-sanitize = []
+loom = ["loom-crate", "crossbeam-utils/loom"]
+nightly = ["crossbeam-utils/nightly", "const_fn"]
 std = ["alloc", "crossbeam-utils/std", "lazy_static"]
+[target."cfg(crossbeam_loom)".dependencies.loom-crate]
+version = "0.4"
+optional = true
+package = "loom"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index b56b6f5..8961f25 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -4,11 +4,10 @@
 # - Update CHANGELOG.md
 # - Update README.md
 # - Create "crossbeam-epoch-X.Y.Z" git tag
-version = "0.9.1"
+version = "0.9.3"
 authors = ["The Crossbeam Project Developers"]
 edition = "2018"
 license = "MIT OR Apache-2.0"
-readme = "README.md"
 repository = "https://github.com/crossbeam-rs/crossbeam"
 homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-epoch"
 documentation = "https://docs.rs/crossbeam-epoch"
@@ -31,18 +30,28 @@
 # This is disabled by default and requires recent nightly compiler.
 # Note that this is outside of the normal semver guarantees and minor versions
 # of crossbeam may make breaking changes to them at any time.
-nightly = ["crossbeam-utils/nightly"]
+nightly = ["crossbeam-utils/nightly", "const_fn"]
 
-# TODO: docs
-sanitize = [] # Makes it more likely to trigger any potential data races.
+# Enable the use of loom for concurrency testing.
+#
+# This configuration option is outside of the normal semver guarantees: minor
+# versions of crossbeam may make breaking changes to it at any time.
+loom = ["loom-crate", "crossbeam-utils/loom"]
 
 [dependencies]
 cfg-if = "1"
-const_fn = "0.4"
+const_fn = { version = "0.4.4", optional = true }
 memoffset = "0.6"
 
+# Enable the use of loom for concurrency testing.
+#
+# This configuration option is outside of the normal semver guarantees: minor
+# versions of crossbeam may make breaking changes to it at any time.
+[target.'cfg(crossbeam_loom)'.dependencies]
+loom-crate = { package = "loom", version = "0.4", optional = true }
+
 [dependencies.crossbeam-utils]
-version = "0.8"
+version = "0.8.3"
 path = "../crossbeam-utils"
 default-features = false
 
@@ -55,4 +64,4 @@
 default-features = false
 
 [dev-dependencies]
-rand = "0.7.3"
+rand = "0.8"
diff --git a/METADATA b/METADATA
index 726c65f..fc37c40 100644
--- a/METADATA
+++ b/METADATA
@@ -7,13 +7,13 @@
   }
   url {
     type: ARCHIVE
-    value: "https://static.crates.io/crates/crossbeam-epoch/crossbeam-epoch-0.9.1.crate"
+    value: "https://static.crates.io/crates/crossbeam-epoch/crossbeam-epoch-0.9.3.crate"
   }
-  version: "0.9.1"
+  version: "0.9.3"
   license_type: NOTICE
   last_upgrade_date {
-    year: 2020
-    month: 12
-    day: 21
+    year: 2021
+    month: 4
+    day: 1
   }
 }
diff --git a/README.md b/README.md
index 7e3d3a9..c8ebd87 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
 
 [![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)](
 https://github.com/crossbeam-rs/crossbeam/actions)
-[![License](https://img.shields.io/badge/license-MIT%20OR%20Apache--2.0-blue.svg)](
+[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)](
 https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-epoch#license)
 [![Cargo](https://img.shields.io/crates/v/crossbeam-epoch.svg)](
 https://crates.io/crates/crossbeam-epoch)
@@ -20,7 +20,7 @@
 shared objects until no pointers to them can exist.
 
 Everything in this crate except the global GC can be used in `no_std` environments, provided that
-features `alloc` and `nightly` are enabled.
+`alloc` feature is enabled.
 
 ## Usage
 
diff --git a/examples/sanitize.rs b/examples/sanitize.rs
index 5110f8a..4109c34 100644
--- a/examples/sanitize.rs
+++ b/examples/sanitize.rs
@@ -14,7 +14,7 @@
     if rng.gen() {
         thread::sleep(Duration::from_millis(1));
     }
-    let timeout = Duration::from_millis(rng.gen_range(0, 10));
+    let timeout = Duration::from_millis(rng.gen_range(0..10));
     let now = Instant::now();
 
     while now.elapsed() < timeout {
diff --git a/examples/treiber_stack.rs b/examples/treiber_stack.rs
deleted file mode 100644
index a2c3c16..0000000
--- a/examples/treiber_stack.rs
+++ /dev/null
@@ -1,107 +0,0 @@
-use std::mem::ManuallyDrop;
-use std::ptr;
-use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
-
-use crossbeam_epoch::{self as epoch, Atomic, Owned};
-use crossbeam_utils::thread::scope;
-
-/// Treiber's lock-free stack.
-///
-/// Usable with any number of producers and consumers.
-#[derive(Debug)]
-pub struct TreiberStack<T> {
-    head: Atomic<Node<T>>,
-}
-
-#[derive(Debug)]
-struct Node<T> {
-    data: ManuallyDrop<T>,
-    next: Atomic<Node<T>>,
-}
-
-impl<T> TreiberStack<T> {
-    /// Creates a new, empty stack.
-    pub fn new() -> TreiberStack<T> {
-        TreiberStack {
-            head: Atomic::null(),
-        }
-    }
-
-    /// Pushes a value on top of the stack.
-    pub fn push(&self, t: T) {
-        let mut n = Owned::new(Node {
-            data: ManuallyDrop::new(t),
-            next: Atomic::null(),
-        });
-
-        let guard = epoch::pin();
-
-        loop {
-            let head = self.head.load(Relaxed, &guard);
-            n.next.store(head, Relaxed);
-
-            match self.head.compare_and_set(head, n, Release, &guard) {
-                Ok(_) => break,
-                Err(e) => n = e.new,
-            }
-        }
-    }
-
-    /// Attempts to pop the top element from the stack.
-    ///
-    /// Returns `None` if the stack is empty.
-    pub fn pop(&self) -> Option<T> {
-        let guard = epoch::pin();
-        loop {
-            let head = self.head.load(Acquire, &guard);
-
-            match unsafe { head.as_ref() } {
-                Some(h) => {
-                    let next = h.next.load(Relaxed, &guard);
-
-                    if self
-                        .head
-                        .compare_and_set(head, next, Relaxed, &guard)
-                        .is_ok()
-                    {
-                        unsafe {
-                            guard.defer_destroy(head);
-                            return Some(ManuallyDrop::into_inner(ptr::read(&(*h).data)));
-                        }
-                    }
-                }
-                None => return None,
-            }
-        }
-    }
-
-    /// Returns `true` if the stack is empty.
-    pub fn is_empty(&self) -> bool {
-        let guard = epoch::pin();
-        self.head.load(Acquire, &guard).is_null()
-    }
-}
-
-impl<T> Drop for TreiberStack<T> {
-    fn drop(&mut self) {
-        while self.pop().is_some() {}
-    }
-}
-
-fn main() {
-    let stack = TreiberStack::new();
-
-    scope(|scope| {
-        for _ in 0..10 {
-            scope.spawn(|_| {
-                for i in 0..10_000 {
-                    stack.push(i);
-                    assert!(stack.pop().is_some());
-                }
-            });
-        }
-    })
-    .unwrap();
-
-    assert!(stack.pop().is_none());
-}
diff --git a/src/atomic.rs b/src/atomic.rs
index 5177187..e4ca23f 100644
--- a/src/atomic.rs
+++ b/src/atomic.rs
@@ -5,12 +5,12 @@
 use core::mem::{self, MaybeUninit};
 use core::ops::{Deref, DerefMut};
 use core::slice;
-use core::sync::atomic::{AtomicUsize, Ordering};
+use core::sync::atomic::Ordering;
 
 use crate::alloc::alloc;
 use crate::alloc::boxed::Box;
 use crate::guard::Guard;
-use const_fn::const_fn;
+use crate::primitive::sync::atomic::AtomicUsize;
 use crossbeam_utils::atomic::AtomicConsume;
 
 /// Given ordering for the success case in a compare-exchange operation, returns the strongest
@@ -26,7 +26,12 @@
 }
 
 /// The error returned on failed compare-and-set operation.
-pub struct CompareAndSetError<'g, T: ?Sized + Pointable, P: Pointer<T>> {
+// TODO: remove in the next major version.
+#[deprecated(note = "Use `CompareExchangeError` instead")]
+pub type CompareAndSetError<'g, T, P> = CompareExchangeError<'g, T, P>;
+
+/// The error returned on failed compare-and-swap operation.
+pub struct CompareExchangeError<'g, T: ?Sized + Pointable, P: Pointer<T>> {
     /// The value in the atomic pointer at the time of the failed operation.
     pub current: Shared<'g, T>,
 
@@ -34,9 +39,9 @@
     pub new: P,
 }
 
-impl<'g, T: 'g, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareAndSetError<'g, T, P> {
+impl<T, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareExchangeError<'_, T, P> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.debug_struct("CompareAndSetError")
+        f.debug_struct("CompareExchangeError")
             .field("current", &self.current)
             .field("new", &self.new)
             .finish()
@@ -54,6 +59,11 @@
 ///    ordering is chosen.
 /// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
 ///    for the failure case.
+// TODO: remove in the next major version.
+#[deprecated(
+    note = "`compare_and_set` and `compare_and_set_weak` that use this trait are deprecated, \
+            use `compare_exchange` or `compare_exchange_weak instead`"
+)]
 pub trait CompareAndSetOrdering {
     /// The ordering of the operation when it succeeds.
     fn success(&self) -> Ordering;
@@ -65,6 +75,7 @@
     fn failure(&self) -> Ordering;
 }
 
+#[allow(deprecated)]
 impl CompareAndSetOrdering for Ordering {
     #[inline]
     fn success(&self) -> Ordering {
@@ -77,6 +88,7 @@
     }
 }
 
+#[allow(deprecated)]
 impl CompareAndSetOrdering for (Ordering, Ordering) {
     #[inline]
     fn success(&self) -> Ordering {
@@ -327,8 +339,8 @@
     /// let a = Atomic::<i32>::null();
     /// ```
     ///
-    #[const_fn(feature = "nightly")]
-    pub const fn null() -> Atomic<T> {
+    #[cfg_attr(all(feature = "nightly", not(crossbeam_loom)), const_fn::const_fn)]
+    pub fn null() -> Atomic<T> {
         Self {
             data: AtomicUsize::new(0),
             _marker: PhantomData,
@@ -426,8 +438,14 @@
     /// pointer that was written is returned. On failure the actual current value and `new` are
     /// returned.
     ///
-    /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
-    /// ordering of this operation.
+    /// This method takes two `Ordering` arguments to describe the memory
+    /// ordering of this operation. `success` describes the required ordering for the
+    /// read-modify-write operation that takes place if the comparison with `current` succeeds.
+    /// `failure` describes the required ordering for the load operation that takes place when
+    /// the comparison fails. Using `Acquire` as success ordering makes the store part
+    /// of this operation `Relaxed`, and using `Release` makes the successful load
+    /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
+    /// and must be equivalent to or weaker than the success ordering.
     ///
     /// # Examples
     ///
@@ -439,30 +457,160 @@
     ///
     /// let guard = &epoch::pin();
     /// let curr = a.load(SeqCst, guard);
+    /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard);
+    /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard);
+    /// ```
+    pub fn compare_exchange<'g, P>(
+        &self,
+        current: Shared<'_, T>,
+        new: P,
+        success: Ordering,
+        failure: Ordering,
+        _: &'g Guard,
+    ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
+    where
+        P: Pointer<T>,
+    {
+        let new = new.into_usize();
+        self.data
+            .compare_exchange(current.into_usize(), new, success, failure)
+            .map(|_| unsafe { Shared::from_usize(new) })
+            .map_err(|current| unsafe {
+                CompareExchangeError {
+                    current: Shared::from_usize(current),
+                    new: P::from_usize(new),
+                }
+            })
+    }
+
+    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
+    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
+    /// same object, but with different tags, will not be considered equal.
+    ///
+    /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison
+    /// succeeds, which can result in more efficient code on some platforms.  The return value is a
+    /// result indicating whether the new pointer was written. On success the pointer that was
+    /// written is returned. On failure the actual current value and `new` are returned.
+    ///
+    /// This method takes two `Ordering` arguments to describe the memory
+    /// ordering of this operation. `success` describes the required ordering for the
+    /// read-modify-write operation that takes place if the comparison with `current` succeeds.
+    /// `failure` describes the required ordering for the load operation that takes place when
+    /// the comparison fails. Using `Acquire` as success ordering makes the store part
+    /// of this operation `Relaxed`, and using `Release` makes the successful load
+    /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
+    /// and must be equivalent to or weaker than the success ordering.
+    ///
+    /// [`compare_exchange`]: Atomic::compare_exchange
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::new(1234);
+    /// let guard = &epoch::pin();
+    ///
+    /// let mut new = Owned::new(5678);
+    /// let mut ptr = a.load(SeqCst, guard);
+    /// loop {
+    ///     match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) {
+    ///         Ok(p) => {
+    ///             ptr = p;
+    ///             break;
+    ///         }
+    ///         Err(err) => {
+    ///             ptr = err.current;
+    ///             new = err.new;
+    ///         }
+    ///     }
+    /// }
+    ///
+    /// let mut curr = a.load(SeqCst, guard);
+    /// loop {
+    ///     match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) {
+    ///         Ok(_) => break,
+    ///         Err(err) => curr = err.current,
+    ///     }
+    /// }
+    /// ```
+    pub fn compare_exchange_weak<'g, P>(
+        &self,
+        current: Shared<'_, T>,
+        new: P,
+        success: Ordering,
+        failure: Ordering,
+        _: &'g Guard,
+    ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
+    where
+        P: Pointer<T>,
+    {
+        let new = new.into_usize();
+        self.data
+            .compare_exchange_weak(current.into_usize(), new, success, failure)
+            .map(|_| unsafe { Shared::from_usize(new) })
+            .map_err(|current| unsafe {
+                CompareExchangeError {
+                    current: Shared::from_usize(current),
+                    new: P::from_usize(new),
+                }
+            })
+    }
+
+    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
+    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
+    /// same object, but with different tags, will not be considered equal.
+    ///
+    /// The return value is a result indicating whether the new pointer was written. On success the
+    /// pointer that was written is returned. On failure the actual current value and `new` are
+    /// returned.
+    ///
+    /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
+    /// ordering of this operation.
+    ///
+    /// # Migrating to `compare_exchange`
+    ///
+    /// `compare_and_set` is equivalent to `compare_exchange` with the following mapping for
+    /// memory orderings:
+    ///
+    /// Original | Success | Failure
+    /// -------- | ------- | -------
+    /// Relaxed  | Relaxed | Relaxed
+    /// Acquire  | Acquire | Acquire
+    /// Release  | Release | Relaxed
+    /// AcqRel   | AcqRel  | Acquire
+    /// SeqCst   | SeqCst  | SeqCst
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// # #![allow(deprecated)]
+    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
+    /// use std::sync::atomic::Ordering::SeqCst;
+    ///
+    /// let a = Atomic::new(1234);
+    ///
+    /// let guard = &epoch::pin();
+    /// let curr = a.load(SeqCst, guard);
     /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
     /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
     /// ```
+    // TODO: remove in the next major version.
+    #[allow(deprecated)]
+    #[deprecated(note = "Use `compare_exchange` instead")]
     pub fn compare_and_set<'g, O, P>(
         &self,
         current: Shared<'_, T>,
         new: P,
         ord: O,
-        _: &'g Guard,
+        guard: &'g Guard,
     ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
     where
         O: CompareAndSetOrdering,
         P: Pointer<T>,
     {
-        let new = new.into_usize();
-        self.data
-            .compare_exchange(current.into_usize(), new, ord.success(), ord.failure())
-            .map(|_| unsafe { Shared::from_usize(new) })
-            .map_err(|current| unsafe {
-                CompareAndSetError {
-                    current: Shared::from_usize(current),
-                    new: P::from_usize(new),
-                }
-            })
+        self.compare_exchange(current, new, ord.success(), ord.failure(), guard)
     }
 
     /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
@@ -479,9 +627,23 @@
     ///
     /// [`compare_and_set`]: Atomic::compare_and_set
     ///
+    /// # Migrating to `compare_exchange_weak`
+    ///
+    /// `compare_and_set_weak` is equivalent to `compare_exchange_weak` with the following mapping for
+    /// memory orderings:
+    ///
+    /// Original | Success | Failure
+    /// -------- | ------- | -------
+    /// Relaxed  | Relaxed | Relaxed
+    /// Acquire  | Acquire | Acquire
+    /// Release  | Release | Relaxed
+    /// AcqRel   | AcqRel  | Acquire
+    /// SeqCst   | SeqCst  | SeqCst
+    ///
     /// # Examples
     ///
     /// ```
+    /// # #![allow(deprecated)]
     /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
     /// use std::sync::atomic::Ordering::SeqCst;
     ///
@@ -511,27 +673,21 @@
     ///     }
     /// }
     /// ```
+    // TODO: remove in the next major version.
+    #[allow(deprecated)]
+    #[deprecated(note = "Use `compare_exchange_weak` instead")]
     pub fn compare_and_set_weak<'g, O, P>(
         &self,
         current: Shared<'_, T>,
         new: P,
         ord: O,
-        _: &'g Guard,
+        guard: &'g Guard,
     ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
     where
         O: CompareAndSetOrdering,
         P: Pointer<T>,
     {
-        let new = new.into_usize();
-        self.data
-            .compare_exchange_weak(current.into_usize(), new, ord.success(), ord.failure())
-            .map(|_| unsafe { Shared::from_usize(new) })
-            .map_err(|current| unsafe {
-                CompareAndSetError {
-                    current: Shared::from_usize(current),
-                    new: P::from_usize(new),
-                }
-            })
+        self.compare_exchange_weak(current, new, ord.success(), ord.failure(), guard)
     }
 
     /// Bitwise "and" with the current tag.
@@ -638,7 +794,17 @@
     /// }
     /// ```
     pub unsafe fn into_owned(self) -> Owned<T> {
-        Owned::from_usize(self.data.into_inner())
+        #[cfg(crossbeam_loom)]
+        {
+            // FIXME: loom does not yet support into_inner, so we use unsync_load for now,
+            // which should have the same synchronization properties:
+            // https://github.com/tokio-rs/loom/issues/117
+            Owned::from_usize(self.data.unsync_load())
+        }
+        #[cfg(not(crossbeam_loom))]
+        {
+            Owned::from_usize(self.data.into_inner())
+        }
     }
 }
 
@@ -1358,7 +1524,7 @@
     }
 }
 
-#[cfg(test)]
+#[cfg(all(test, not(crossbeam_loom)))]
 mod tests {
     use super::Shared;
 
@@ -1371,4 +1537,11 @@
     fn valid_tag_i64() {
         Shared::<i64>::null().with_tag(7);
     }
+
+    #[cfg(feature = "nightly")]
+    #[test]
+    fn const_atomic_null() {
+        use super::Atomic;
+        const _: Atomic<u8> = Atomic::<u8>::null();
+    }
 }
diff --git a/src/collector.rs b/src/collector.rs
index 8224e11..7cfb819 100644
--- a/src/collector.rs
+++ b/src/collector.rs
@@ -14,9 +14,9 @@
 /// ```
 use core::fmt;
 
-use crate::alloc::sync::Arc;
 use crate::guard::Guard;
 use crate::internal::{Global, Local};
+use crate::primitive::sync::Arc;
 
 /// An epoch-based garbage collector.
 pub struct Collector {
@@ -109,7 +109,7 @@
     }
 }
 
-#[cfg(test)]
+#[cfg(all(test, not(crossbeam_loom)))]
 mod tests {
     use std::mem;
     use std::sync::atomic::{AtomicUsize, Ordering};
@@ -151,9 +151,9 @@
                 let a = Owned::new(7).into_shared(guard);
                 guard.defer_destroy(a);
 
-                assert!(!(*(*guard.local).bag.get()).is_empty());
+                assert!(!(*guard.local).bag.with(|b| (*b).is_empty()));
 
-                while !(*(*guard.local).bag.get()).is_empty() {
+                while !(*guard.local).bag.with(|b| (*b).is_empty()) {
                     guard.flush();
                 }
             }
@@ -172,7 +172,7 @@
                 let a = Owned::new(7).into_shared(guard);
                 guard.defer_destroy(a);
             }
-            assert!(!(*(*guard.local).bag.get()).is_empty());
+            assert!(!(*guard.local).bag.with(|b| (*b).is_empty()));
         }
     }
 
@@ -199,6 +199,7 @@
         .unwrap();
     }
 
+    #[cfg(not(crossbeam_sanitize))] // TODO: assertions failed due to `cfg(crossbeam_sanitize)` reduce `internal::MAX_OBJECTS`
     #[test]
     fn incremental() {
         const COUNT: usize = 100_000;
diff --git a/src/default.rs b/src/default.rs
index 1deac21..b7797ce 100644
--- a/src/default.rs
+++ b/src/default.rs
@@ -6,7 +6,7 @@
 
 use crate::collector::{Collector, LocalHandle};
 use crate::guard::Guard;
-use lazy_static::lazy_static;
+use crate::primitive::{lazy_static, thread_local};
 
 lazy_static! {
     /// The global data for the default garbage collector.
@@ -45,7 +45,7 @@
         .unwrap_or_else(|_| f(&COLLECTOR.register()))
 }
 
-#[cfg(test)]
+#[cfg(all(test, not(crossbeam_loom)))]
 mod tests {
     use crossbeam_utils::thread;
 
diff --git a/src/deferred.rs b/src/deferred.rs
index 9f4869b..d953c46 100644
--- a/src/deferred.rs
+++ b/src/deferred.rs
@@ -16,7 +16,7 @@
 /// A `FnOnce()` that is stored inline if small, or otherwise boxed on the heap.
 ///
 /// This is a handy way of keeping an unsized `FnOnce()` within a sized structure.
-pub struct Deferred {
+pub(crate) struct Deferred {
     call: unsafe fn(*mut u8),
     data: Data,
     _marker: PhantomData<*mut ()>, // !Send + !Sync
@@ -30,7 +30,7 @@
 
 impl Deferred {
     /// Constructs a new `Deferred` from a `FnOnce()`.
-    pub fn new<F: FnOnce()>(f: F) -> Self {
+    pub(crate) fn new<F: FnOnce()>(f: F) -> Self {
         let size = mem::size_of::<F>();
         let align = mem::align_of::<F>();
 
@@ -73,13 +73,13 @@
 
     /// Calls the function.
     #[inline]
-    pub fn call(mut self) {
+    pub(crate) fn call(mut self) {
         let call = self.call;
         unsafe { call(&mut self.data as *mut Data as *mut u8) };
     }
 }
 
-#[cfg(test)]
+#[cfg(all(test, not(crossbeam_loom)))]
 mod tests {
     use super::Deferred;
     use std::cell::Cell;
diff --git a/src/epoch.rs b/src/epoch.rs
index e7759d9..663508b 100644
--- a/src/epoch.rs
+++ b/src/epoch.rs
@@ -7,14 +7,15 @@
 //! If an object became garbage in some epoch, then we can be sure that after two advancements no
 //! participant will hold a reference to it. That is the crux of safe memory reclamation.
 
-use core::sync::atomic::{AtomicUsize, Ordering};
+use crate::primitive::sync::atomic::AtomicUsize;
+use core::sync::atomic::Ordering;
 
 /// An epoch that can be marked as pinned or unpinned.
 ///
 /// Internally, the epoch is represented as an integer that wraps around at some unspecified point
 /// and a flag that represents whether it is pinned or unpinned.
 #[derive(Copy, Clone, Default, Debug, Eq, PartialEq)]
-pub struct Epoch {
+pub(crate) struct Epoch {
     /// The least significant bit is set if pinned. The rest of the bits hold the epoch.
     data: usize,
 }
@@ -22,7 +23,7 @@
 impl Epoch {
     /// Returns the starting epoch in unpinned state.
     #[inline]
-    pub fn starting() -> Self {
+    pub(crate) fn starting() -> Self {
         Self::default()
     }
 
@@ -30,7 +31,7 @@
     ///
     /// Internally, epochs are represented as numbers in the range `(isize::MIN / 2) .. (isize::MAX
     /// / 2)`, so the returned distance will be in the same interval.
-    pub fn wrapping_sub(self, rhs: Self) -> isize {
+    pub(crate) fn wrapping_sub(self, rhs: Self) -> isize {
         // The result is the same with `(self.data & !1).wrapping_sub(rhs.data & !1) as isize >> 1`,
         // because the possible difference of LSB in `(self.data & !1).wrapping_sub(rhs.data & !1)`
         // will be ignored in the shift operation.
@@ -39,13 +40,13 @@
 
     /// Returns `true` if the epoch is marked as pinned.
     #[inline]
-    pub fn is_pinned(self) -> bool {
+    pub(crate) fn is_pinned(self) -> bool {
         (self.data & 1) == 1
     }
 
     /// Returns the same epoch, but marked as pinned.
     #[inline]
-    pub fn pinned(self) -> Epoch {
+    pub(crate) fn pinned(self) -> Epoch {
         Epoch {
             data: self.data | 1,
         }
@@ -53,7 +54,7 @@
 
     /// Returns the same epoch, but marked as unpinned.
     #[inline]
-    pub fn unpinned(self) -> Epoch {
+    pub(crate) fn unpinned(self) -> Epoch {
         Epoch {
             data: self.data & !1,
         }
@@ -63,7 +64,7 @@
     ///
     /// The returned epoch will be marked as pinned only if the previous one was as well.
     #[inline]
-    pub fn successor(self) -> Epoch {
+    pub(crate) fn successor(self) -> Epoch {
         Epoch {
             data: self.data.wrapping_add(2),
         }
@@ -72,7 +73,7 @@
 
 /// An atomic value that holds an `Epoch`.
 #[derive(Default, Debug)]
-pub struct AtomicEpoch {
+pub(crate) struct AtomicEpoch {
     /// Since `Epoch` is just a wrapper around `usize`, an `AtomicEpoch` is similarly represented
     /// using an `AtomicUsize`.
     data: AtomicUsize,
@@ -81,14 +82,14 @@
 impl AtomicEpoch {
     /// Creates a new atomic epoch.
     #[inline]
-    pub fn new(epoch: Epoch) -> Self {
+    pub(crate) fn new(epoch: Epoch) -> Self {
         let data = AtomicUsize::new(epoch.data);
         AtomicEpoch { data }
     }
 
     /// Loads a value from the atomic epoch.
     #[inline]
-    pub fn load(&self, ord: Ordering) -> Epoch {
+    pub(crate) fn load(&self, ord: Ordering) -> Epoch {
         Epoch {
             data: self.data.load(ord),
         }
@@ -96,19 +97,37 @@
 
     /// Stores a value into the atomic epoch.
     #[inline]
-    pub fn store(&self, epoch: Epoch, ord: Ordering) {
+    pub(crate) fn store(&self, epoch: Epoch, ord: Ordering) {
         self.data.store(epoch.data, ord);
     }
 
     /// Stores a value into the atomic epoch if the current value is the same as `current`.
     ///
-    /// The return value is always the previous value. If it is equal to `current`, then the value
-    /// is updated.
+    /// The return value is a result indicating whether the new value was written and containing
+    /// the previous value. On success this value is guaranteed to be equal to `current`.
     ///
-    /// The `Ordering` argument describes the memory ordering of this operation.
+    /// This method takes two `Ordering` arguments to describe the memory
+    /// ordering of this operation. `success` describes the required ordering for the
+    /// read-modify-write operation that takes place if the comparison with `current` succeeds.
+    /// `failure` describes the required ordering for the load operation that takes place when
+    /// the comparison fails. Using `Acquire` as success ordering makes the store part
+    /// of this operation `Relaxed`, and using `Release` makes the successful load
+    /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
+    /// and must be equivalent to or weaker than the success ordering.
     #[inline]
-    pub fn compare_and_swap(&self, current: Epoch, new: Epoch, ord: Ordering) -> Epoch {
-        let data = self.data.compare_and_swap(current.data, new.data, ord);
-        Epoch { data }
+    pub(crate) fn compare_exchange(
+        &self,
+        current: Epoch,
+        new: Epoch,
+        success: Ordering,
+        failure: Ordering,
+    ) -> Result<Epoch, Epoch> {
+        match self
+            .data
+            .compare_exchange(current.data, new.data, success, failure)
+        {
+            Ok(data) => Ok(Epoch { data }),
+            Err(data) => Err(Epoch { data }),
+        }
     }
 }
diff --git a/src/internal.rs b/src/internal.rs
index bf2dfb8..966bceb 100644
--- a/src/internal.rs
+++ b/src/internal.rs
@@ -35,10 +35,11 @@
 //! Ideally each instance of concurrent data structure may have its own queue that gets fully
 //! destroyed as soon as the data structure gets dropped.
 
-use core::cell::{Cell, UnsafeCell};
+use crate::primitive::cell::UnsafeCell;
+use crate::primitive::sync::atomic;
+use core::cell::Cell;
 use core::mem::{self, ManuallyDrop};
 use core::num::Wrapping;
-use core::sync::atomic;
 use core::sync::atomic::Ordering;
 use core::{fmt, ptr};
 
@@ -54,13 +55,13 @@
 use crate::sync::queue::Queue;
 
 /// Maximum number of objects a bag can contain.
-#[cfg(not(feature = "sanitize"))]
+#[cfg(not(crossbeam_sanitize))]
 const MAX_OBJECTS: usize = 62;
-#[cfg(feature = "sanitize")]
+#[cfg(crossbeam_sanitize)]
 const MAX_OBJECTS: usize = 4;
 
 /// A bag of deferred functions.
-pub struct Bag {
+pub(crate) struct Bag {
     /// Stashed objects.
     deferreds: [Deferred; MAX_OBJECTS],
     len: usize,
@@ -71,12 +72,12 @@
 
 impl Bag {
     /// Returns a new, empty bag.
-    pub fn new() -> Self {
+    pub(crate) fn new() -> Self {
         Self::default()
     }
 
     /// Returns `true` if the bag is empty.
-    pub fn is_empty(&self) -> bool {
+    pub(crate) fn is_empty(&self) -> bool {
         self.len == 0
     }
 
@@ -88,7 +89,7 @@
     /// # Safety
     ///
     /// It should be safe for another thread to execute the given function.
-    pub unsafe fn try_push(&mut self, deferred: Deferred) -> Result<(), Deferred> {
+    pub(crate) unsafe fn try_push(&mut self, deferred: Deferred) -> Result<(), Deferred> {
         if self.len < MAX_OBJECTS {
             self.deferreds[self.len] = deferred;
             self.len += 1;
@@ -108,7 +109,7 @@
     #[rustfmt::skip]
     fn default() -> Self {
         // TODO: [no_op; MAX_OBJECTS] syntax blocked by https://github.com/rust-lang/rust/issues/49147
-        #[cfg(not(feature = "sanitize"))]
+        #[cfg(not(crossbeam_sanitize))]
         return Bag {
             len: 0,
             deferreds: [
@@ -176,7 +177,7 @@
                 Deferred::new(no_op_func),
             ],
         };
-        #[cfg(feature = "sanitize")]
+        #[cfg(crossbeam_sanitize)]
         return Bag {
             len: 0,
             deferreds: [
@@ -231,7 +232,7 @@
 }
 
 /// The global data for a garbage collector.
-pub struct Global {
+pub(crate) struct Global {
     /// The intrusive linked list of `Local`s.
     locals: List<Local>,
 
@@ -248,7 +249,7 @@
 
     /// Creates a new global data for garbage collection.
     #[inline]
-    pub fn new() -> Self {
+    pub(crate) fn new() -> Self {
         Self {
             locals: List::new(),
             queue: Queue::new(),
@@ -257,7 +258,7 @@
     }
 
     /// Pushes the bag into the global queue and replaces the bag with a new empty bag.
-    pub fn push_bag(&self, bag: &mut Bag, guard: &Guard) {
+    pub(crate) fn push_bag(&self, bag: &mut Bag, guard: &Guard) {
         let bag = mem::replace(bag, Bag::new());
 
         atomic::fence(Ordering::SeqCst);
@@ -274,10 +275,10 @@
     /// path. In other words, we want the compiler to optimize branching for the case when
     /// `collect()` is not called.
     #[cold]
-    pub fn collect(&self, guard: &Guard) {
+    pub(crate) fn collect(&self, guard: &Guard) {
         let global_epoch = self.try_advance(guard);
 
-        let steps = if cfg!(feature = "sanitize") {
+        let steps = if cfg!(crossbeam_sanitize) {
             usize::max_value()
         } else {
             Self::COLLECT_STEPS
@@ -303,7 +304,7 @@
     ///
     /// `try_advance()` is annotated `#[cold]` because it is rarely called.
     #[cold]
-    pub fn try_advance(&self, guard: &Guard) -> Epoch {
+    pub(crate) fn try_advance(&self, guard: &Guard) -> Epoch {
         let global_epoch = self.epoch.load(Ordering::Relaxed);
         atomic::fence(Ordering::SeqCst);
 
@@ -345,7 +346,7 @@
 }
 
 /// Participant for garbage collection.
-pub struct Local {
+pub(crate) struct Local {
     /// A node in the intrusive linked list of `Local`s.
     entry: Entry,
 
@@ -374,9 +375,13 @@
 
 // Make sure `Local` is less than or equal to 2048 bytes.
 // https://github.com/crossbeam-rs/crossbeam/issues/551
+#[cfg(not(crossbeam_sanitize))] // `crossbeam_sanitize` reduces the size of `Local`
 #[test]
 fn local_size() {
-    assert!(core::mem::size_of::<Local>() <= 2048, "An allocation of `Local` should be <= 2048 bytes.");
+    assert!(
+        core::mem::size_of::<Local>() <= 2048,
+        "An allocation of `Local` should be <= 2048 bytes."
+    );
 }
 
 impl Local {
@@ -385,7 +390,7 @@
     const PINNINGS_BETWEEN_COLLECT: usize = 128;
 
     /// Registers a new `Local` in the provided `Global`.
-    pub fn register(collector: &Collector) -> LocalHandle {
+    pub(crate) fn register(collector: &Collector) -> LocalHandle {
         unsafe {
             // Since we dereference no pointers in this block, it is safe to use `unprotected`.
 
@@ -408,19 +413,19 @@
 
     /// Returns a reference to the `Global` in which this `Local` resides.
     #[inline]
-    pub fn global(&self) -> &Global {
+    pub(crate) fn global(&self) -> &Global {
         &self.collector().global
     }
 
     /// Returns a reference to the `Collector` in which this `Local` resides.
     #[inline]
-    pub fn collector(&self) -> &Collector {
-        unsafe { &**self.collector.get() }
+    pub(crate) fn collector(&self) -> &Collector {
+        self.collector.with(|c| unsafe { &**c })
     }
 
     /// Returns `true` if the current participant is pinned.
     #[inline]
-    pub fn is_pinned(&self) -> bool {
+    pub(crate) fn is_pinned(&self) -> bool {
         self.guard_count.get() > 0
     }
 
@@ -429,8 +434,8 @@
     /// # Safety
     ///
     /// It should be safe for another thread to execute the given function.
-    pub unsafe fn defer(&self, mut deferred: Deferred, guard: &Guard) {
-        let bag = &mut *self.bag.get();
+    pub(crate) unsafe fn defer(&self, mut deferred: Deferred, guard: &Guard) {
+        let bag = self.bag.with_mut(|b| &mut *b);
 
         while let Err(d) = bag.try_push(deferred) {
             self.global().push_bag(bag, guard);
@@ -438,8 +443,8 @@
         }
     }
 
-    pub fn flush(&self, guard: &Guard) {
-        let bag = unsafe { &mut *self.bag.get() };
+    pub(crate) fn flush(&self, guard: &Guard) {
+        let bag = self.bag.with_mut(|b| unsafe { &mut *b });
 
         if !bag.is_empty() {
             self.global().push_bag(bag, guard);
@@ -450,7 +455,7 @@
 
     /// Pins the `Local`.
     #[inline]
-    pub fn pin(&self) -> Guard {
+    pub(crate) fn pin(&self) -> Guard {
         let guard = Guard { local: self };
 
         let guard_count = self.guard_count.get();
@@ -468,7 +473,7 @@
                 // a `SeqCst` fence.
                 //
                 // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction.
-                // 2. `_.compare_and_swap(_, _, SeqCst)`, which compiles into a `lock cmpxchg`
+                // 2. `_.compare_exchange(_, _, SeqCst, SeqCst)`, which compiles into a `lock cmpxchg`
                 //    instruction.
                 //
                 // Both instructions have the effect of a full barrier, but benchmarks have shown
@@ -478,10 +483,13 @@
                 // works fine.  Using inline assembly would be a viable (and correct) alternative,
                 // but alas, that is not possible on stable Rust.
                 let current = Epoch::starting();
-                let previous = self
-                    .epoch
-                    .compare_and_swap(current, new_epoch, Ordering::SeqCst);
-                debug_assert_eq!(current, previous, "participant was expected to be unpinned");
+                let res = self.epoch.compare_exchange(
+                    current,
+                    new_epoch,
+                    Ordering::SeqCst,
+                    Ordering::SeqCst,
+                );
+                debug_assert!(res.is_ok(), "participant was expected to be unpinned");
                 // We add a compiler fence to make it less likely for LLVM to do something wrong
                 // here.  Formally, this is not enough to get rid of data races; practically,
                 // it should go a long way.
@@ -507,7 +515,7 @@
 
     /// Unpins the `Local`.
     #[inline]
-    pub fn unpin(&self) {
+    pub(crate) fn unpin(&self) {
         let guard_count = self.guard_count.get();
         self.guard_count.set(guard_count - 1);
 
@@ -522,7 +530,7 @@
 
     /// Unpins and then pins the `Local`.
     #[inline]
-    pub fn repin(&self) {
+    pub(crate) fn repin(&self) {
         let guard_count = self.guard_count.get();
 
         // Update the local epoch only if there's only one guard.
@@ -545,7 +553,7 @@
 
     /// Increments the handle count.
     #[inline]
-    pub fn acquire_handle(&self) {
+    pub(crate) fn acquire_handle(&self) {
         let handle_count = self.handle_count.get();
         debug_assert!(handle_count >= 1);
         self.handle_count.set(handle_count + 1);
@@ -553,7 +561,7 @@
 
     /// Decrements the handle count.
     #[inline]
-    pub fn release_handle(&self) {
+    pub(crate) fn release_handle(&self) {
         let guard_count = self.guard_count.get();
         let handle_count = self.handle_count.get();
         debug_assert!(handle_count >= 1);
@@ -577,7 +585,8 @@
             // Pin and move the local bag into the global queue. It's important that `push_bag`
             // doesn't defer destruction on any new garbage.
             let guard = &self.pin();
-            self.global().push_bag(&mut *self.bag.get(), guard);
+            self.global()
+                .push_bag(self.bag.with_mut(|b| &mut *b), guard);
         }
         // Revert the handle count back to zero.
         self.handle_count.set(0);
@@ -586,7 +595,7 @@
             // Take the reference to the `Global` out of this `Local`. Since we're not protected
             // by a guard at this time, it's crucial that the reference is read before marking the
             // `Local` as deleted.
-            let collector: Collector = ptr::read(&*(*self.collector.get()));
+            let collector: Collector = ptr::read(self.collector.with(|c| &*(*c)));
 
             // Mark this node in the linked list as deleted.
             self.entry.delete(unprotected());
@@ -617,7 +626,7 @@
     }
 }
 
-#[cfg(test)]
+#[cfg(all(test, not(crossbeam_loom)))]
 mod tests {
     use std::sync::atomic::{AtomicUsize, Ordering};
 
diff --git a/src/lib.rs b/src/lib.rs
index f64d16c..99da911 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -55,15 +55,105 @@
         allow(dead_code, unused_assignments, unused_variables)
     )
 ))]
-#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]
+#![warn(
+    missing_docs,
+    missing_debug_implementations,
+    rust_2018_idioms,
+    unreachable_pub
+)]
 #![cfg_attr(not(feature = "std"), no_std)]
 #![cfg_attr(feature = "nightly", feature(cfg_target_has_atomic))]
 #![cfg_attr(feature = "nightly", feature(const_fn))]
-// matches! requires Rust 1.42
-#![allow(clippy::match_like_matches_macro)]
+
+#[cfg(crossbeam_loom)]
+extern crate loom_crate as loom;
 
 use cfg_if::cfg_if;
 
+#[cfg(crossbeam_loom)]
+#[allow(unused_imports, dead_code)]
+mod primitive {
+    pub(crate) mod cell {
+        pub(crate) use loom::cell::UnsafeCell;
+    }
+    pub(crate) mod sync {
+        pub(crate) mod atomic {
+            use core::sync::atomic::Ordering;
+            pub(crate) use loom::sync::atomic::AtomicUsize;
+            pub(crate) fn fence(ord: Ordering) {
+                if let Ordering::Acquire = ord {
+                } else {
+                    // FIXME: loom only supports acquire fences at the moment.
+                    // https://github.com/tokio-rs/loom/issues/117
+                    // let's at least not panic...
+                    // this may generate some false positives (`SeqCst` is stronger than `Acquire`
+                    // for example), and some false negatives (`Relaxed` is weaker than `Acquire`),
+                    // but it's the best we can do for the time being.
+                }
+                loom::sync::atomic::fence(Ordering::Acquire)
+            }
+
+            // FIXME: loom does not support compiler_fence at the moment.
+            // https://github.com/tokio-rs/loom/issues/117
+            // we use fence as a stand-in for compiler_fence for the time being.
+            // this may miss some races since fence is stronger than compiler_fence,
+            // but it's the best we can do for the time being.
+            pub(crate) use self::fence as compiler_fence;
+        }
+        pub(crate) use loom::sync::Arc;
+    }
+    pub(crate) use loom::lazy_static;
+    pub(crate) use loom::thread_local;
+}
+#[cfg(not(crossbeam_loom))]
+#[allow(unused_imports, dead_code)]
+mod primitive {
+    #[cfg(any(feature = "alloc", feature = "std"))]
+    pub(crate) mod cell {
+        #[derive(Debug)]
+        #[repr(transparent)]
+        pub(crate) struct UnsafeCell<T>(::core::cell::UnsafeCell<T>);
+
+        // loom's UnsafeCell has a slightly different API than the standard library UnsafeCell.
+        // Since we want the rest of the code to be agnostic to whether it's running under loom or
+        // not, we write this small wrapper that provides the loom-supported API for the standard
+        // library UnsafeCell. This is also what the loom documentation recommends:
+        // https://github.com/tokio-rs/loom#handling-loom-api-differences
+        impl<T> UnsafeCell<T> {
+            #[inline]
+            pub(crate) fn new(data: T) -> UnsafeCell<T> {
+                UnsafeCell(::core::cell::UnsafeCell::new(data))
+            }
+
+            #[inline]
+            pub(crate) fn with<R>(&self, f: impl FnOnce(*const T) -> R) -> R {
+                f(self.0.get())
+            }
+
+            #[inline]
+            pub(crate) fn with_mut<R>(&self, f: impl FnOnce(*mut T) -> R) -> R {
+                f(self.0.get())
+            }
+        }
+    }
+    #[cfg(any(feature = "alloc", feature = "std"))]
+    pub(crate) mod sync {
+        pub(crate) mod atomic {
+            pub(crate) use core::sync::atomic::compiler_fence;
+            pub(crate) use core::sync::atomic::fence;
+            pub(crate) use core::sync::atomic::AtomicUsize;
+        }
+        #[cfg_attr(feature = "nightly", cfg(target_has_atomic = "ptr"))]
+        pub(crate) use alloc::sync::Arc;
+    }
+
+    #[cfg(feature = "std")]
+    pub(crate) use std::thread_local;
+
+    #[cfg(feature = "std")]
+    pub(crate) use lazy_static::lazy_static;
+}
+
 #[cfg_attr(feature = "nightly", cfg(target_has_atomic = "ptr"))]
 cfg_if! {
     if #[cfg(feature = "alloc")] {
@@ -77,9 +167,15 @@
         mod internal;
         mod sync;
 
-        pub use self::atomic::{Pointable, Atomic, CompareAndSetError, CompareAndSetOrdering, Owned, Pointer, Shared};
+        pub use self::atomic::{
+            Pointable, Atomic, CompareExchangeError,
+            Owned, Pointer, Shared,
+        };
         pub use self::collector::{Collector, LocalHandle};
         pub use self::guard::{unprotected, Guard};
+
+        #[allow(deprecated)]
+        pub use self::atomic::{CompareAndSetError, CompareAndSetOrdering};
     }
 }
 
diff --git a/src/sync/list.rs b/src/sync/list.rs
index 656e2a8..52ffd6f 100644
--- a/src/sync/list.rs
+++ b/src/sync/list.rs
@@ -13,7 +13,7 @@
 /// An Entry is accessed from multiple threads, so it would be beneficial to put it in a different
 /// cache-line than thread-local data in terms of performance.
 #[derive(Debug)]
-pub struct Entry {
+pub(crate) struct Entry {
     /// The next entry in the linked list.
     /// If the tag is 1, this entry is marked as deleted.
     next: Atomic<Entry>,
@@ -64,7 +64,7 @@
 /// }
 /// ```
 ///
-pub trait IsElement<T> {
+pub(crate) trait IsElement<T> {
     /// Returns a reference to this element's `Entry`.
     fn entry_of(_: &T) -> &Entry;
 
@@ -93,7 +93,7 @@
 
 /// A lock-free, intrusive linked list of type `T`.
 #[derive(Debug)]
-pub struct List<T, C: IsElement<T> = T> {
+pub(crate) struct List<T, C: IsElement<T> = T> {
     /// The head of the linked list.
     head: Atomic<Entry>,
 
@@ -102,7 +102,7 @@
 }
 
 /// An iterator used for retrieving values from the list.
-pub struct Iter<'g, T, C: IsElement<T>> {
+pub(crate) struct Iter<'g, T, C: IsElement<T>> {
     /// The guard that protects the iteration.
     guard: &'g Guard,
 
@@ -122,7 +122,7 @@
 
 /// An error that occurs during iteration over the list.
 #[derive(PartialEq, Debug)]
-pub enum IterError {
+pub(crate) enum IterError {
     /// A concurrent thread modified the state of the list at the same place that this iterator
     /// was inspecting. Subsequent iteration will restart from the beginning of the list.
     Stalled,
@@ -145,14 +145,14 @@
     /// The entry should be a member of a linked list, and it should not have been deleted.
     /// It should be safe to call `C::finalize` on the entry after the `guard` is dropped, where `C`
     /// is the associated helper for the linked list.
-    pub unsafe fn delete(&self, guard: &Guard) {
+    pub(crate) unsafe fn delete(&self, guard: &Guard) {
         self.next.fetch_or(1, Release, guard);
     }
 }
 
 impl<T, C: IsElement<T>> List<T, C> {
     /// Returns a new, empty linked list.
-    pub fn new() -> Self {
+    pub(crate) fn new() -> Self {
         Self {
             head: Atomic::null(),
             _marker: PhantomData,
@@ -169,7 +169,7 @@
     /// - `container` is immovable, e.g. inside an `Owned`
     /// - the same `Entry` is not inserted more than once
     /// - the inserted object will be removed before the list is dropped
-    pub unsafe fn insert<'g>(&'g self, container: Shared<'g, T>, guard: &'g Guard) {
+    pub(crate) unsafe fn insert<'g>(&'g self, container: Shared<'g, T>, guard: &'g Guard) {
         // Insert right after head, i.e. at the beginning of the list.
         let to = &self.head;
         // Get the intrusively stored Entry of the new element to insert.
@@ -183,7 +183,7 @@
             // Set the Entry of the to-be-inserted element to point to the previous successor of
             // `to`.
             entry.next.store(next, Relaxed);
-            match to.compare_and_set_weak(next, entry_ptr, Release, guard) {
+            match to.compare_exchange_weak(next, entry_ptr, Release, Relaxed, guard) {
                 Ok(_) => break,
                 // We lost the race or weak CAS failed spuriously. Update the successor and try
                 // again.
@@ -204,7 +204,7 @@
     /// 2. If an object is deleted during iteration, it may or may not be returned.
     /// 3. The iteration may be aborted when it lost in a race condition. In this case, the winning
     ///    thread will continue to iterate over the same list.
-    pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T, C> {
+    pub(crate) fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T, C> {
         Iter {
             guard,
             pred: &self.head,
@@ -250,7 +250,7 @@
                 // Try to unlink `curr` from the list, and get the new value of `self.pred`.
                 let succ = match self
                     .pred
-                    .compare_and_set(self.curr, succ, Acquire, self.guard)
+                    .compare_exchange(self.curr, succ, Acquire, Acquire, self.guard)
                 {
                     Ok(_) => {
                         // We succeeded in unlinking `curr`, so we have to schedule
@@ -295,7 +295,7 @@
     }
 }
 
-#[cfg(test)]
+#[cfg(all(test, not(crossbeam_loom)))]
 mod tests {
     use super::*;
     use crate::{Collector, Owned};
diff --git a/src/sync/mod.rs b/src/sync/mod.rs
index f8eb259..5c06e76 100644
--- a/src/sync/mod.rs
+++ b/src/sync/mod.rs
@@ -1,4 +1,4 @@
 //! Synchronization primitives.
 
-pub mod list;
-pub mod queue;
+pub(crate) mod list;
+pub(crate) mod queue;
diff --git a/src/sync/queue.rs b/src/sync/queue.rs
index 71ea1bc..67c228d 100644
--- a/src/sync/queue.rs
+++ b/src/sync/queue.rs
@@ -19,7 +19,7 @@
 // the `tail` pointer may lag behind the actual tail. Non-sentinel nodes are either all `Data` or
 // all `Blocked` (requests for data from blocked threads).
 #[derive(Debug)]
-pub struct Queue<T> {
+pub(crate) struct Queue<T> {
     head: CachePadded<Atomic<Node<T>>>,
     tail: CachePadded<Atomic<Node<T>>>,
 }
@@ -42,7 +42,7 @@
 
 impl<T> Queue<T> {
     /// Create a new, empty queue.
-    pub fn new() -> Queue<T> {
+    pub(crate) fn new() -> Queue<T> {
         let q = Queue {
             head: CachePadded::new(Atomic::null()),
             tail: CachePadded::new(Atomic::null()),
@@ -74,24 +74,28 @@
         let next = o.next.load(Acquire, guard);
         if unsafe { next.as_ref().is_some() } {
             // if not, try to "help" by moving the tail pointer forward
-            let _ = self.tail.compare_and_set(onto, next, Release, guard);
+            let _ = self
+                .tail
+                .compare_exchange(onto, next, Release, Relaxed, guard);
             false
         } else {
             // looks like the actual tail; attempt to link in `n`
             let result = o
                 .next
-                .compare_and_set(Shared::null(), new, Release, guard)
+                .compare_exchange(Shared::null(), new, Release, Relaxed, guard)
                 .is_ok();
             if result {
                 // try to move the tail pointer forward
-                let _ = self.tail.compare_and_set(onto, new, Release, guard);
+                let _ = self
+                    .tail
+                    .compare_exchange(onto, new, Release, Relaxed, guard);
             }
             result
         }
     }
 
     /// Adds `t` to the back of the queue, possibly waking up threads blocked on `pop`.
-    pub fn push(&self, t: T, guard: &Guard) {
+    pub(crate) fn push(&self, t: T, guard: &Guard) {
         let new = Owned::new(Node {
             data: MaybeUninit::new(t),
             next: Atomic::null(),
@@ -118,12 +122,14 @@
         match unsafe { next.as_ref() } {
             Some(n) => unsafe {
                 self.head
-                    .compare_and_set(head, next, Release, guard)
+                    .compare_exchange(head, next, Release, Relaxed, guard)
                     .map(|_| {
                         let tail = self.tail.load(Relaxed, guard);
                         // Advance the tail so that we don't retire a pointer to a reachable node.
                         if head == tail {
-                            let _ = self.tail.compare_and_set(tail, next, Release, guard);
+                            let _ = self
+                                .tail
+                                .compare_exchange(tail, next, Release, Relaxed, guard);
                         }
                         guard.defer_destroy(head);
                         // TODO: Replace with MaybeUninit::read when api is stable
@@ -149,12 +155,14 @@
         match unsafe { next.as_ref() } {
             Some(n) if condition(unsafe { &*n.data.as_ptr() }) => unsafe {
                 self.head
-                    .compare_and_set(head, next, Release, guard)
+                    .compare_exchange(head, next, Release, Relaxed, guard)
                     .map(|_| {
                         let tail = self.tail.load(Relaxed, guard);
                         // Advance the tail so that we don't retire a pointer to a reachable node.
                         if head == tail {
-                            let _ = self.tail.compare_and_set(tail, next, Release, guard);
+                            let _ = self
+                                .tail
+                                .compare_exchange(tail, next, Release, Relaxed, guard);
                         }
                         guard.defer_destroy(head);
                         Some(n.data.as_ptr().read())
@@ -168,7 +176,7 @@
     /// Attempts to dequeue from the front.
     ///
     /// Returns `None` if the queue is observed to be empty.
-    pub fn try_pop(&self, guard: &Guard) -> Option<T> {
+    pub(crate) fn try_pop(&self, guard: &Guard) -> Option<T> {
         loop {
             if let Ok(head) = self.pop_internal(guard) {
                 return head;
@@ -180,7 +188,7 @@
     ///
     /// Returns `None` if the queue is observed to be empty, or the head does not satisfy the given
     /// condition.
-    pub fn try_pop_if<F>(&self, condition: F, guard: &Guard) -> Option<T>
+    pub(crate) fn try_pop_if<F>(&self, condition: F, guard: &Guard) -> Option<T>
     where
         T: Sync,
         F: Fn(&T) -> bool,
@@ -207,7 +215,7 @@
     }
 }
 
-#[cfg(test)]
+#[cfg(all(test, not(crossbeam_loom)))]
 mod test {
     use super::*;
     use crate::pin;
@@ -218,30 +226,30 @@
     }
 
     impl<T> Queue<T> {
-        pub fn new() -> Queue<T> {
+        pub(crate) fn new() -> Queue<T> {
             Queue {
                 queue: super::Queue::new(),
             }
         }
 
-        pub fn push(&self, t: T) {
+        pub(crate) fn push(&self, t: T) {
             let guard = &pin();
             self.queue.push(t, guard);
         }
 
-        pub fn is_empty(&self) -> bool {
+        pub(crate) fn is_empty(&self) -> bool {
             let guard = &pin();
             let head = self.queue.head.load(Acquire, guard);
             let h = unsafe { head.deref() };
             h.next.load(Acquire, guard).is_null()
         }
 
-        pub fn try_pop(&self) -> Option<T> {
+        pub(crate) fn try_pop(&self) -> Option<T> {
             let guard = &pin();
             self.queue.try_pop(guard)
         }
 
-        pub fn pop(&self) -> T {
+        pub(crate) fn pop(&self) -> T {
             loop {
                 match self.try_pop() {
                     None => continue,
diff --git a/tests/loom.rs b/tests/loom.rs
new file mode 100644
index 0000000..4e56acd
--- /dev/null
+++ b/tests/loom.rs
@@ -0,0 +1,157 @@
+#![cfg(crossbeam_loom)]
+
+use crossbeam_epoch as epoch;
+use loom_crate as loom;
+
+use epoch::*;
+use epoch::{Atomic, Owned};
+use loom::sync::atomic::Ordering::{self, Acquire, Relaxed, Release};
+use loom::sync::Arc;
+use loom::thread::spawn;
+use std::mem::ManuallyDrop;
+use std::ptr;
+
+#[test]
+fn it_works() {
+    loom::model(|| {
+        let collector = Collector::new();
+        let item: Atomic<String> = Atomic::from(Owned::new(String::from("boom")));
+        let item2 = item.clone();
+        let collector2 = collector.clone();
+        let guard = collector.register().pin();
+
+        let jh = loom::thread::spawn(move || {
+            let guard = collector2.register().pin();
+            guard.defer(move || {
+                // this isn't really safe, since other threads may still have pointers to the
+                // value, but in this limited test scenario it's okay, since we know the test won't
+                // access item after all the pins are released.
+                let mut item = unsafe { item2.into_owned() };
+                // mutate it as a second measure to make sure the assert_eq below would fail
+                item.retain(|c| c == 'o');
+                drop(item);
+            });
+        });
+
+        let item = item.load(Ordering::SeqCst, &guard);
+        // we pinned strictly before the call to defer_destroy,
+        // so item cannot have been dropped yet
+        assert_eq!(*unsafe { item.deref() }, "boom");
+        drop(guard);
+
+        jh.join().unwrap();
+
+        drop(collector);
+    })
+}
+
+#[test]
+fn treiber_stack() {
+    /// Treiber's lock-free stack.
+    ///
+    /// Usable with any number of producers and consumers.
+    #[derive(Debug)]
+    pub struct TreiberStack<T> {
+        head: Atomic<Node<T>>,
+    }
+
+    #[derive(Debug)]
+    struct Node<T> {
+        data: ManuallyDrop<T>,
+        next: Atomic<Node<T>>,
+    }
+
+    impl<T> TreiberStack<T> {
+        /// Creates a new, empty stack.
+        pub fn new() -> TreiberStack<T> {
+            TreiberStack {
+                head: Atomic::null(),
+            }
+        }
+
+        /// Pushes a value on top of the stack.
+        pub fn push(&self, t: T) {
+            let mut n = Owned::new(Node {
+                data: ManuallyDrop::new(t),
+                next: Atomic::null(),
+            });
+
+            let guard = epoch::pin();
+
+            loop {
+                let head = self.head.load(Relaxed, &guard);
+                n.next.store(head, Relaxed);
+
+                match self
+                    .head
+                    .compare_exchange(head, n, Release, Relaxed, &guard)
+                {
+                    Ok(_) => break,
+                    Err(e) => n = e.new,
+                }
+            }
+        }
+
+        /// Attempts to pop the top element from the stack.
+        ///
+        /// Returns `None` if the stack is empty.
+        pub fn pop(&self) -> Option<T> {
+            let guard = epoch::pin();
+            loop {
+                let head = self.head.load(Acquire, &guard);
+
+                match unsafe { head.as_ref() } {
+                    Some(h) => {
+                        let next = h.next.load(Relaxed, &guard);
+
+                        if self
+                            .head
+                            .compare_exchange(head, next, Relaxed, Relaxed, &guard)
+                            .is_ok()
+                        {
+                            unsafe {
+                                guard.defer_destroy(head);
+                                return Some(ManuallyDrop::into_inner(ptr::read(&(*h).data)));
+                            }
+                        }
+                    }
+                    None => return None,
+                }
+            }
+        }
+
+        /// Returns `true` if the stack is empty.
+        pub fn is_empty(&self) -> bool {
+            let guard = epoch::pin();
+            self.head.load(Acquire, &guard).is_null()
+        }
+    }
+
+    impl<T> Drop for TreiberStack<T> {
+        fn drop(&mut self) {
+            while self.pop().is_some() {}
+        }
+    }
+
+    loom::model(|| {
+        let stack1 = Arc::new(TreiberStack::new());
+        let stack2 = Arc::clone(&stack1);
+
+        // use 5 since it's greater than the 4 used for the sanitize feature
+        let jh = spawn(move || {
+            for i in 0..5 {
+                stack2.push(i);
+                assert!(stack2.pop().is_some());
+            }
+        });
+
+        for i in 0..5 {
+            stack1.push(i);
+            assert!(stack1.pop().is_some());
+        }
+
+        jh.join().unwrap();
+        assert!(stack1.pop().is_none());
+        assert!(stack1.is_empty());
+    });
+}