blob: 0d64397cee58e05344a0d3a2d14038e6224381b7 [file] [log] [blame]
Fenghua Yu113c6092016-10-22 06:19:54 -07001#ifndef _ASM_X86_INTEL_RDT_H
2#define _ASM_X86_INTEL_RDT_H
3
Fenghua Yu4f341a52016-10-28 15:04:48 -07004#ifdef CONFIG_INTEL_RDT_A
5
Ingo Molnar5b825c32017-02-02 17:54:15 +01006#include <linux/sched.h>
Borislav Petkov8ff42c022016-11-02 17:51:17 +01007#include <linux/kernfs.h>
Fenghua Yu5ff193f2016-10-28 15:04:42 -07008#include <linux/jump_label.h>
9
Fenghua Yu4f341a52016-10-28 15:04:48 -070010#include <asm/intel_rdt_common.h>
11
Fenghua Yu5ff193f2016-10-28 15:04:42 -070012#define IA32_L3_QOS_CFG 0xc81
Fenghua Yu113c6092016-10-22 06:19:54 -070013#define IA32_L3_CBM_BASE 0xc90
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -070014#define IA32_L2_CBM_BASE 0xd10
Fenghua Yu113c6092016-10-22 06:19:54 -070015
Fenghua Yu5ff193f2016-10-28 15:04:42 -070016#define L3_QOS_CDP_ENABLE 0x01ULL
17
18/**
19 * struct rdtgroup - store rdtgroup's data in resctrl file system.
20 * @kn: kernfs node
21 * @rdtgroup_list: linked list for all rdtgroups
22 * @closid: closid for this rdtgroup
Tony Luck12e01102016-10-28 15:04:45 -070023 * @cpu_mask: CPUs assigned to this rdtgroup
Fenghua Yu60cf5e12016-10-28 15:04:44 -070024 * @flags: status bits
25 * @waitcount: how many cpus expect to find this
Tony Luck12e01102016-10-28 15:04:45 -070026 * group when they acquire rdtgroup_mutex
Fenghua Yu5ff193f2016-10-28 15:04:42 -070027 */
28struct rdtgroup {
29 struct kernfs_node *kn;
30 struct list_head rdtgroup_list;
31 int closid;
Tony Luck12e01102016-10-28 15:04:45 -070032 struct cpumask cpu_mask;
Fenghua Yu60cf5e12016-10-28 15:04:44 -070033 int flags;
34 atomic_t waitcount;
Fenghua Yu5ff193f2016-10-28 15:04:42 -070035};
36
Fenghua Yu60cf5e12016-10-28 15:04:44 -070037/* rdtgroup.flags */
38#define RDT_DELETED 1
39
Fenghua Yu5ff193f2016-10-28 15:04:42 -070040/* List of all resource groups */
41extern struct list_head rdt_all_groups;
42
43int __init rdtgroup_init(void);
44
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -070045/**
Fenghua Yu4e978d02016-10-28 15:04:43 -070046 * struct rftype - describe each file in the resctrl file system
47 * @name: file name
48 * @mode: access mode
49 * @kf_ops: operations
50 * @seq_show: show content of the file
51 * @write: write to the file
52 */
53struct rftype {
54 char *name;
55 umode_t mode;
56 struct kernfs_ops *kf_ops;
57
58 int (*seq_show)(struct kernfs_open_file *of,
59 struct seq_file *sf, void *v);
60 /*
61 * write() is the generic write callback which maps directly to
62 * kernfs write operation and overrides all other operations.
63 * Maximum write size is determined by ->max_write_len.
64 */
65 ssize_t (*write)(struct kernfs_open_file *of,
66 char *buf, size_t nbytes, loff_t off);
67};
68
69/**
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -070070 * struct rdt_resource - attributes of an RDT resource
71 * @enabled: Is this feature enabled on this machine
72 * @capable: Is this feature available on this machine
73 * @name: Name to use in "schemata" file
74 * @num_closid: Number of CLOSIDs available
75 * @max_cbm: Largest Cache Bit Mask allowed
76 * @min_cbm_bits: Minimum number of consecutive bits to be set
77 * in a cache bit mask
78 * @domains: All domains for this resource
79 * @num_domains: Number of domains active
80 * @msr_base: Base MSR address for CBMs
81 * @tmp_cbms: Scratch space when updating schemata
Tony Luck60ec2442016-10-28 15:04:47 -070082 * @num_tmp_cbms: Number of CBMs in tmp_cbms
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -070083 * @cache_level: Which cache level defines scope of this domain
84 * @cbm_idx_multi: Multiplier of CBM index
85 * @cbm_idx_offset: Offset of CBM index. CBM index is computed by:
86 * closid * cbm_idx_multi + cbm_idx_offset
87 */
88struct rdt_resource {
89 bool enabled;
90 bool capable;
91 char *name;
92 int num_closid;
93 int cbm_len;
94 int min_cbm_bits;
95 u32 max_cbm;
96 struct list_head domains;
97 int num_domains;
98 int msr_base;
99 u32 *tmp_cbms;
Tony Luck60ec2442016-10-28 15:04:47 -0700100 int num_tmp_cbms;
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700101 int cache_level;
102 int cbm_idx_multi;
103 int cbm_idx_offset;
104};
105
Tony Luck2264d9c2016-10-28 15:04:41 -0700106/**
107 * struct rdt_domain - group of cpus sharing an RDT resource
108 * @list: all instances of this resource
109 * @id: unique id for this instance
110 * @cpu_mask: which cpus share this resource
111 * @cbm: array of cache bit masks (indexed by CLOSID)
112 */
113struct rdt_domain {
114 struct list_head list;
115 int id;
116 struct cpumask cpu_mask;
117 u32 *cbm;
118};
119
120/**
121 * struct msr_param - set a range of MSRs from a domain
122 * @res: The resource to use
123 * @low: Beginning index from base MSR
124 * @high: End index
125 */
126struct msr_param {
127 struct rdt_resource *res;
128 int low;
129 int high;
130};
131
132extern struct mutex rdtgroup_mutex;
133
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700134extern struct rdt_resource rdt_resources_all[];
Fenghua Yu5ff193f2016-10-28 15:04:42 -0700135extern struct rdtgroup rdtgroup_default;
136DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
137
138int __init rdtgroup_init(void);
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700139
140enum {
141 RDT_RESOURCE_L3,
142 RDT_RESOURCE_L3DATA,
143 RDT_RESOURCE_L3CODE,
144 RDT_RESOURCE_L2,
145
146 /* Must be the last */
147 RDT_NUM_RESOURCES,
148};
149
150#define for_each_capable_rdt_resource(r) \
151 for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
152 r++) \
153 if (r->capable)
154
Tony Luck2264d9c2016-10-28 15:04:41 -0700155#define for_each_enabled_rdt_resource(r) \
156 for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
157 r++) \
158 if (r->enabled)
159
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700160/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
161union cpuid_0x10_1_eax {
162 struct {
163 unsigned int cbm_len:5;
164 } split;
165 unsigned int full;
166};
167
168/* CPUID.(EAX=10H, ECX=ResID=1).EDX */
169union cpuid_0x10_1_edx {
170 struct {
171 unsigned int cos_max:16;
172 } split;
173 unsigned int full;
174};
Tony Luck2264d9c2016-10-28 15:04:41 -0700175
Tony Luck12e01102016-10-28 15:04:45 -0700176DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
177
Tony Luck2264d9c2016-10-28 15:04:41 -0700178void rdt_cbm_update(void *arg);
Fenghua Yu60cf5e12016-10-28 15:04:44 -0700179struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
180void rdtgroup_kn_unlock(struct kernfs_node *kn);
Tony Luck60ec2442016-10-28 15:04:47 -0700181ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
182 char *buf, size_t nbytes, loff_t off);
183int rdtgroup_schemata_show(struct kernfs_open_file *of,
184 struct seq_file *s, void *v);
Fenghua Yu4f341a52016-10-28 15:04:48 -0700185
186/*
187 * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
188 *
189 * Following considerations are made so that this has minimal impact
190 * on scheduler hot path:
191 * - This will stay as no-op unless we are running on an Intel SKU
192 * which supports resource control and we enable by mounting the
193 * resctrl file system.
194 * - Caches the per cpu CLOSid values and does the MSR write only
195 * when a task with a different CLOSid is scheduled in.
Fenghua Yu74fcdae2016-12-01 12:55:14 -0800196 *
197 * Must be called with preemption disabled.
Fenghua Yu4f341a52016-10-28 15:04:48 -0700198 */
199static inline void intel_rdt_sched_in(void)
200{
201 if (static_branch_likely(&rdt_enable_key)) {
202 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
203 int closid;
204
205 /*
206 * If this task has a closid assigned, use it.
207 * Else use the closid assigned to this cpu.
208 */
209 closid = current->closid;
210 if (closid == 0)
211 closid = this_cpu_read(cpu_closid);
212
213 if (closid != state->closid) {
214 state->closid = closid;
215 wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, closid);
216 }
217 }
218}
219
220#else
221
222static inline void intel_rdt_sched_in(void) {}
223
224#endif /* CONFIG_INTEL_RDT_A */
Fenghua Yu113c6092016-10-22 06:19:54 -0700225#endif /* _ASM_X86_INTEL_RDT_H */