blob: 1be04f8c563a0c60bdfca72a36c120ec96ef327c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_CACHE_H
2#define __LINUX_CACHE_H
3
Joe Perchesc28aa1f2014-01-23 15:54:16 -08004#include <uapi/linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <asm/cache.h>
6
7#ifndef L1_CACHE_ALIGN
Joe Perchesc28aa1f2014-01-23 15:54:16 -08008#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#endif
10
11#ifndef SMP_CACHE_BYTES
12#define SMP_CACHE_BYTES L1_CACHE_BYTES
13#endif
14
Kees Cookc74ba8b2016-02-17 14:41:15 -080015/*
16 * __read_mostly is used to keep rarely changing variables out of frequently
17 * updated cachelines. If an architecture doesn't support it, ignore the
18 * hint.
19 */
Kyle McMartin804f1592006-03-23 03:00:16 -080020#ifndef __read_mostly
Christoph Lameter6c036522005-07-07 17:56:59 -070021#define __read_mostly
22#endif
23
Kees Cookc74ba8b2016-02-17 14:41:15 -080024/*
25 * __ro_after_init is used to mark things that are read-only after init (i.e.
26 * after mark_rodata_ro() has been called). These are effectively read-only,
27 * but may get written to during init, so can't live in .rodata (via "const").
28 */
29#ifndef __ro_after_init
30#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
31#endif
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#ifndef ____cacheline_aligned
34#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
35#endif
36
37#ifndef ____cacheline_aligned_in_smp
38#ifdef CONFIG_SMP
39#define ____cacheline_aligned_in_smp ____cacheline_aligned
40#else
41#define ____cacheline_aligned_in_smp
42#endif /* CONFIG_SMP */
43#endif
44
45#ifndef __cacheline_aligned
46#define __cacheline_aligned \
47 __attribute__((__aligned__(SMP_CACHE_BYTES), \
Tim Abbott4af57b72010-02-20 01:03:34 +010048 __section__(".data..cacheline_aligned")))
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#endif /* __cacheline_aligned */
50
51#ifndef __cacheline_aligned_in_smp
52#ifdef CONFIG_SMP
53#define __cacheline_aligned_in_smp __cacheline_aligned
54#else
55#define __cacheline_aligned_in_smp
56#endif /* CONFIG_SMP */
57#endif
58
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080059/*
60 * The maximum alignment needed for some critical structures
61 * These could be inter-node cacheline sizes/L3 cacheline
62 * size etc. Define this in asm/cache.h for your arch
63 */
64#ifndef INTERNODE_CACHE_SHIFT
65#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
66#endif
67
68#if !defined(____cacheline_internodealigned_in_smp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#if defined(CONFIG_SMP)
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080070#define ____cacheline_internodealigned_in_smp \
71 __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#else
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080073#define ____cacheline_internodealigned_in_smp
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#endif
75#endif
76
Pekka Enberg1b27d052008-04-28 02:12:22 -070077#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
78#define cache_line_size() L1_CACHE_BYTES
79#endif
80
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#endif /* __LINUX_CACHE_H */