blob: 750621e41d1c1ccaab7f8a1b1dcca4bd7d6f289a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __LINUX_CACHE_H
3#define __LINUX_CACHE_H
4
Joe Perchesc28aa1f2014-01-23 15:54:16 -08005#include <uapi/linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <asm/cache.h>
7
8#ifndef L1_CACHE_ALIGN
Joe Perchesc28aa1f2014-01-23 15:54:16 -08009#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#endif
11
12#ifndef SMP_CACHE_BYTES
13#define SMP_CACHE_BYTES L1_CACHE_BYTES
14#endif
15
Kees Cookc74ba8b2016-02-17 14:41:15 -080016/*
17 * __read_mostly is used to keep rarely changing variables out of frequently
18 * updated cachelines. If an architecture doesn't support it, ignore the
19 * hint.
20 */
Kyle McMartin804f1592006-03-23 03:00:16 -080021#ifndef __read_mostly
Christoph Lameter6c036522005-07-07 17:56:59 -070022#define __read_mostly
23#endif
24
Kees Cookc74ba8b2016-02-17 14:41:15 -080025/*
26 * __ro_after_init is used to mark things that are read-only after init (i.e.
27 * after mark_rodata_ro() has been called). These are effectively read-only,
28 * but may get written to during init, so can't live in .rodata (via "const").
29 */
30#ifndef __ro_after_init
31#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
32#endif
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#ifndef ____cacheline_aligned
35#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
36#endif
37
38#ifndef ____cacheline_aligned_in_smp
39#ifdef CONFIG_SMP
40#define ____cacheline_aligned_in_smp ____cacheline_aligned
41#else
42#define ____cacheline_aligned_in_smp
43#endif /* CONFIG_SMP */
44#endif
45
46#ifndef __cacheline_aligned
47#define __cacheline_aligned \
48 __attribute__((__aligned__(SMP_CACHE_BYTES), \
Tim Abbott4af57b72010-02-20 01:03:34 +010049 __section__(".data..cacheline_aligned")))
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#endif /* __cacheline_aligned */
51
52#ifndef __cacheline_aligned_in_smp
53#ifdef CONFIG_SMP
54#define __cacheline_aligned_in_smp __cacheline_aligned
55#else
56#define __cacheline_aligned_in_smp
57#endif /* CONFIG_SMP */
58#endif
59
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080060/*
61 * The maximum alignment needed for some critical structures
62 * These could be inter-node cacheline sizes/L3 cacheline
63 * size etc. Define this in asm/cache.h for your arch
64 */
65#ifndef INTERNODE_CACHE_SHIFT
66#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
67#endif
68
69#if !defined(____cacheline_internodealigned_in_smp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#if defined(CONFIG_SMP)
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080071#define ____cacheline_internodealigned_in_smp \
72 __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#else
Ravikiran G Thirumalai22fc6ec2006-01-08 01:01:27 -080074#define ____cacheline_internodealigned_in_smp
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#endif
76#endif
77
Pekka Enberg1b27d052008-04-28 02:12:22 -070078#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
79#define cache_line_size() L1_CACHE_BYTES
80#endif
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#endif /* __LINUX_CACHE_H */