blob: f279ed9a91631cca7f236d20ef5a29d152332932 [file] [log] [blame]
Waiman Long0f8f2aa2013-08-28 18:13:26 -07001#ifndef __LINUX_LOCKREF_H
2#define __LINUX_LOCKREF_H
3
4/*
5 * Locked reference counts.
6 *
7 * These are different from just plain atomic refcounts in that they
8 * are atomic with respect to the spinlock that goes with them. In
9 * particular, there can be implementations that don't actually get
10 * the spinlock for the common decrement/increment operations, but they
11 * still have to check that the operation is done semantically as if
12 * the spinlock had been taken (using a cmpxchg operation that covers
13 * both the lock and the count word, or using memory transactions, for
14 * example).
15 */
16
17#include <linux/spinlock.h>
18
19struct lockref {
Linus Torvaldsbc08b442013-09-02 12:12:15 -070020 union {
21#ifdef CONFIG_CMPXCHG_LOCKREF
22 aligned_u64 lock_count;
23#endif
24 struct {
25 spinlock_t lock;
26 unsigned int count;
27 };
28 };
Waiman Long0f8f2aa2013-08-28 18:13:26 -070029};
30
Linus Torvalds2f4f12e2013-09-02 11:58:20 -070031extern void lockref_get(struct lockref *);
32extern int lockref_get_not_zero(struct lockref *);
33extern int lockref_get_or_lock(struct lockref *);
34extern int lockref_put_or_lock(struct lockref *);
Waiman Long0f8f2aa2013-08-28 18:13:26 -070035
Linus Torvaldse7d33bb2013-09-07 15:49:18 -070036extern void lockref_mark_dead(struct lockref *);
37extern int lockref_get_not_dead(struct lockref *);
38
Waiman Long0f8f2aa2013-08-28 18:13:26 -070039#endif /* __LINUX_LOCKREF_H */