blob: 61b583d7519a9aea329b6cd5192de7a05ea63574 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_BLOCKGROUP_LOCK_H
2#define _LINUX_BLOCKGROUP_LOCK_H
3/*
4 * Per-blockgroup locking for ext2 and ext3.
5 *
6 * Simple hashed spinlocking.
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/spinlock.h>
10#include <linux/cache.h>
11
12#ifdef CONFIG_SMP
13
14/*
15 * We want a power-of-two. Is there a better way than this?
16 */
17
18#if NR_CPUS >= 32
19#define NR_BG_LOCKS 128
20#elif NR_CPUS >= 16
21#define NR_BG_LOCKS 64
22#elif NR_CPUS >= 8
23#define NR_BG_LOCKS 32
24#elif NR_CPUS >= 4
25#define NR_BG_LOCKS 16
26#elif NR_CPUS >= 2
27#define NR_BG_LOCKS 8
28#else
29#define NR_BG_LOCKS 4
30#endif
31
32#else /* CONFIG_SMP */
33#define NR_BG_LOCKS 1
34#endif /* CONFIG_SMP */
35
36struct bgl_lock {
37 spinlock_t lock;
38} ____cacheline_aligned_in_smp;
39
40struct blockgroup_lock {
41 struct bgl_lock locks[NR_BG_LOCKS];
42};
43
44static inline void bgl_lock_init(struct blockgroup_lock *bgl)
45{
46 int i;
47
48 for (i = 0; i < NR_BG_LOCKS; i++)
49 spin_lock_init(&bgl->locks[i].lock);
50}
51
Pekka Enbergc644f0e2009-01-04 12:00:48 -080052static inline spinlock_t *
53bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group)
54{
Eric Biggers9e5ab852016-09-15 18:25:07 -040055 return &bgl->locks[block_group & (NR_BG_LOCKS-1)].lock;
Pekka Enbergc644f0e2009-01-04 12:00:48 -080056}
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58#endif