blob: 597dc4995678b2ec82bd6e370fa13afcb8611e5c [file] [log] [blame]
Fenghua Yu113c6092016-10-22 06:19:54 -07001#ifndef _ASM_X86_INTEL_RDT_H
2#define _ASM_X86_INTEL_RDT_H
3
Fenghua Yu4f341a52016-10-28 15:04:48 -07004#ifdef CONFIG_INTEL_RDT_A
5
Ingo Molnar5b825c32017-02-02 17:54:15 +01006#include <linux/sched.h>
Borislav Petkov8ff42c022016-11-02 17:51:17 +01007#include <linux/kernfs.h>
Fenghua Yu5ff193f2016-10-28 15:04:42 -07008#include <linux/jump_label.h>
9
Fenghua Yu4f341a52016-10-28 15:04:48 -070010#include <asm/intel_rdt_common.h>
11
Fenghua Yu5ff193f2016-10-28 15:04:42 -070012#define IA32_L3_QOS_CFG 0xc81
Fenghua Yu113c6092016-10-22 06:19:54 -070013#define IA32_L3_CBM_BASE 0xc90
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -070014#define IA32_L2_CBM_BASE 0xd10
Vikas Shivappa05b93412017-04-07 17:33:53 -070015#define IA32_MBA_THRTL_BASE 0xd50
Fenghua Yu113c6092016-10-22 06:19:54 -070016
Fenghua Yu5ff193f2016-10-28 15:04:42 -070017#define L3_QOS_CDP_ENABLE 0x01ULL
18
19/**
20 * struct rdtgroup - store rdtgroup's data in resctrl file system.
21 * @kn: kernfs node
22 * @rdtgroup_list: linked list for all rdtgroups
23 * @closid: closid for this rdtgroup
Tony Luck12e01102016-10-28 15:04:45 -070024 * @cpu_mask: CPUs assigned to this rdtgroup
Fenghua Yu60cf5e12016-10-28 15:04:44 -070025 * @flags: status bits
26 * @waitcount: how many cpus expect to find this
Tony Luck12e01102016-10-28 15:04:45 -070027 * group when they acquire rdtgroup_mutex
Fenghua Yu5ff193f2016-10-28 15:04:42 -070028 */
29struct rdtgroup {
30 struct kernfs_node *kn;
31 struct list_head rdtgroup_list;
32 int closid;
Tony Luck12e01102016-10-28 15:04:45 -070033 struct cpumask cpu_mask;
Fenghua Yu60cf5e12016-10-28 15:04:44 -070034 int flags;
35 atomic_t waitcount;
Fenghua Yu5ff193f2016-10-28 15:04:42 -070036};
37
Fenghua Yu60cf5e12016-10-28 15:04:44 -070038/* rdtgroup.flags */
39#define RDT_DELETED 1
40
Jiri Olsa4ffa3c92017-04-10 16:52:32 +020041/* rftype.flags */
42#define RFTYPE_FLAGS_CPUS_LIST 1
43
Fenghua Yu5ff193f2016-10-28 15:04:42 -070044/* List of all resource groups */
45extern struct list_head rdt_all_groups;
46
Vikas Shivappade016df2017-04-03 14:44:17 -070047extern int max_name_width, max_data_width;
48
Fenghua Yu5ff193f2016-10-28 15:04:42 -070049int __init rdtgroup_init(void);
50
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -070051/**
Fenghua Yu4e978d02016-10-28 15:04:43 -070052 * struct rftype - describe each file in the resctrl file system
Thomas Gleixner17f8ba12017-04-10 11:50:11 +020053 * @name: File name
54 * @mode: Access mode
55 * @kf_ops: File operations
Jiri Olsa4ffa3c92017-04-10 16:52:32 +020056 * @flags: File specific RFTYPE_FLAGS_* flags
Thomas Gleixner17f8ba12017-04-10 11:50:11 +020057 * @seq_show: Show content of the file
58 * @write: Write to the file
Fenghua Yu4e978d02016-10-28 15:04:43 -070059 */
60struct rftype {
61 char *name;
62 umode_t mode;
63 struct kernfs_ops *kf_ops;
Jiri Olsa4ffa3c92017-04-10 16:52:32 +020064 unsigned long flags;
Fenghua Yu4e978d02016-10-28 15:04:43 -070065
66 int (*seq_show)(struct kernfs_open_file *of,
67 struct seq_file *sf, void *v);
68 /*
69 * write() is the generic write callback which maps directly to
70 * kernfs write operation and overrides all other operations.
71 * Maximum write size is determined by ->max_write_len.
72 */
73 ssize_t (*write)(struct kernfs_open_file *of,
74 char *buf, size_t nbytes, loff_t off);
75};
76
77/**
Tony Luck2264d9c2016-10-28 15:04:41 -070078 * struct rdt_domain - group of cpus sharing an RDT resource
79 * @list: all instances of this resource
80 * @id: unique id for this instance
81 * @cpu_mask: which cpus share this resource
Vikas Shivappa2545e9f2017-04-07 17:33:51 -070082 * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
83 * @new_ctrl: new ctrl value to be loaded
84 * @have_new_ctrl: did user provide new_ctrl for this domain
Tony Luck2264d9c2016-10-28 15:04:41 -070085 */
86struct rdt_domain {
87 struct list_head list;
88 int id;
89 struct cpumask cpu_mask;
Vikas Shivappa2545e9f2017-04-07 17:33:51 -070090 u32 *ctrl_val;
91 u32 new_ctrl;
92 bool have_new_ctrl;
Tony Luck2264d9c2016-10-28 15:04:41 -070093};
94
95/**
96 * struct msr_param - set a range of MSRs from a domain
97 * @res: The resource to use
98 * @low: Beginning index from base MSR
99 * @high: End index
100 */
101struct msr_param {
102 struct rdt_resource *res;
103 int low;
104 int high;
105};
106
Thomas Gleixner0921c542017-04-14 14:14:31 +0200107/**
108 * struct rdt_cache - Cache allocation related data
109 * @cbm_len: Length of the cache bit mask
110 * @min_cbm_bits: Minimum number of consecutive bits to be set
111 * @cbm_idx_mult: Multiplier of CBM index
112 * @cbm_idx_offset: Offset of CBM index. CBM index is computed by:
113 * closid * cbm_idx_multi + cbm_idx_offset
114 * in a cache bit mask
115 */
116struct rdt_cache {
117 unsigned int cbm_len;
118 unsigned int min_cbm_bits;
119 unsigned int cbm_idx_mult;
120 unsigned int cbm_idx_offset;
121};
122
123/**
Vikas Shivappa05b93412017-04-07 17:33:53 -0700124 * struct rdt_membw - Memory bandwidth allocation related data
125 * @max_delay: Max throttle delay. Delay is the hardware
126 * representation for memory bandwidth.
127 * @min_bw: Minimum memory bandwidth percentage user can request
128 * @bw_gran: Granularity at which the memory bandwidth is allocated
129 * @delay_linear: True if memory B/W delay is in linear scale
130 * @mb_map: Mapping of memory B/W percentage to memory B/W delay
131 */
132struct rdt_membw {
133 u32 max_delay;
134 u32 min_bw;
135 u32 bw_gran;
136 u32 delay_linear;
137 u32 *mb_map;
138};
139
140/**
Thomas Gleixner0921c542017-04-14 14:14:31 +0200141 * struct rdt_resource - attributes of an RDT resource
142 * @enabled: Is this feature enabled on this machine
143 * @capable: Is this feature available on this machine
144 * @name: Name to use in "schemata" file
145 * @num_closid: Number of CLOSIDs available
146 * @cache_level: Which cache level defines scope of this resource
147 * @default_ctrl: Specifies default cache cbm or memory B/W percent.
148 * @msr_base: Base MSR address for CBMs
149 * @msr_update: Function pointer to update QOS MSRs
150 * @data_width: Character width of data when displaying
151 * @domains: All domains for this resource
152 * @cache: Cache allocation related data
Vikas Shivappa6a507a62017-04-07 17:33:54 -0700153 * @info_files: resctrl info files for the resource
154 * @nr_info_files: Number of info files
Vikas Shivappac6ea67d2017-04-07 17:33:56 -0700155 * @format_str: Per resource format string to show domain value
156 * @parse_ctrlval: Per resource function pointer to parse control values
Thomas Gleixner0921c542017-04-14 14:14:31 +0200157 */
158struct rdt_resource {
159 bool enabled;
160 bool capable;
161 char *name;
162 int num_closid;
163 int cache_level;
164 u32 default_ctrl;
165 unsigned int msr_base;
166 void (*msr_update) (struct rdt_domain *d, struct msr_param *m,
167 struct rdt_resource *r);
168 int data_width;
169 struct list_head domains;
Thomas Gleixnera83827d2017-04-17 09:57:10 +0200170 struct rdt_cache cache;
171 struct rdt_membw membw;
Vikas Shivappa6a507a62017-04-07 17:33:54 -0700172 struct rftype *info_files;
173 int nr_info_files;
Vikas Shivappac6ea67d2017-04-07 17:33:56 -0700174 const char *format_str;
175 int (*parse_ctrlval) (char *buf, struct rdt_resource *r,
176 struct rdt_domain *d);
Thomas Gleixner0921c542017-04-14 14:14:31 +0200177};
178
Vikas Shivappa6a507a62017-04-07 17:33:54 -0700179void rdt_get_cache_infofile(struct rdt_resource *r);
Vikas Shivappadb69ef62017-04-07 17:33:55 -0700180void rdt_get_mba_infofile(struct rdt_resource *r);
Vikas Shivappac6ea67d2017-04-07 17:33:56 -0700181int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d);
Vikas Shivappa64e8ed32017-04-07 17:33:57 -0700182int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d);
Vikas Shivappa6a507a62017-04-07 17:33:54 -0700183
Tony Luck2264d9c2016-10-28 15:04:41 -0700184extern struct mutex rdtgroup_mutex;
185
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700186extern struct rdt_resource rdt_resources_all[];
Fenghua Yu5ff193f2016-10-28 15:04:42 -0700187extern struct rdtgroup rdtgroup_default;
188DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
189
190int __init rdtgroup_init(void);
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700191
192enum {
193 RDT_RESOURCE_L3,
194 RDT_RESOURCE_L3DATA,
195 RDT_RESOURCE_L3CODE,
196 RDT_RESOURCE_L2,
Vikas Shivappa05b93412017-04-07 17:33:53 -0700197 RDT_RESOURCE_MBA,
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700198
199 /* Must be the last */
200 RDT_NUM_RESOURCES,
201};
202
203#define for_each_capable_rdt_resource(r) \
204 for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
Thomas Gleixner17f8ba12017-04-10 11:50:11 +0200205 r++) \
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700206 if (r->capable)
207
Tony Luck2264d9c2016-10-28 15:04:41 -0700208#define for_each_enabled_rdt_resource(r) \
209 for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
210 r++) \
211 if (r->enabled)
212
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700213/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
214union cpuid_0x10_1_eax {
215 struct {
216 unsigned int cbm_len:5;
217 } split;
218 unsigned int full;
219};
220
Vikas Shivappaab66a332017-04-07 17:33:52 -0700221/* CPUID.(EAX=10H, ECX=ResID=3).EAX */
222union cpuid_0x10_3_eax {
223 struct {
224 unsigned int max_delay:12;
225 } split;
226 unsigned int full;
227};
228
Vikas Shivappa2545e9f2017-04-07 17:33:51 -0700229/* CPUID.(EAX=10H, ECX=ResID).EDX */
230union cpuid_0x10_x_edx {
Fenghua Yuc1c7c3f2016-10-22 06:19:55 -0700231 struct {
232 unsigned int cos_max:16;
233 } split;
234 unsigned int full;
235};
Tony Luck2264d9c2016-10-28 15:04:41 -0700236
Tony Luck12e01102016-10-28 15:04:45 -0700237DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
238
Vikas Shivappa2545e9f2017-04-07 17:33:51 -0700239void rdt_ctrl_update(void *arg);
Fenghua Yu60cf5e12016-10-28 15:04:44 -0700240struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
241void rdtgroup_kn_unlock(struct kernfs_node *kn);
Tony Luck60ec2442016-10-28 15:04:47 -0700242ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
243 char *buf, size_t nbytes, loff_t off);
244int rdtgroup_schemata_show(struct kernfs_open_file *of,
245 struct seq_file *s, void *v);
Fenghua Yu4f341a52016-10-28 15:04:48 -0700246
247/*
248 * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
249 *
250 * Following considerations are made so that this has minimal impact
251 * on scheduler hot path:
252 * - This will stay as no-op unless we are running on an Intel SKU
253 * which supports resource control and we enable by mounting the
254 * resctrl file system.
255 * - Caches the per cpu CLOSid values and does the MSR write only
256 * when a task with a different CLOSid is scheduled in.
Fenghua Yu74fcdae2016-12-01 12:55:14 -0800257 *
258 * Must be called with preemption disabled.
Fenghua Yu4f341a52016-10-28 15:04:48 -0700259 */
260static inline void intel_rdt_sched_in(void)
261{
262 if (static_branch_likely(&rdt_enable_key)) {
263 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
264 int closid;
265
266 /*
267 * If this task has a closid assigned, use it.
268 * Else use the closid assigned to this cpu.
269 */
270 closid = current->closid;
271 if (closid == 0)
272 closid = this_cpu_read(cpu_closid);
273
274 if (closid != state->closid) {
275 state->closid = closid;
276 wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, closid);
277 }
278 }
279}
280
281#else
282
283static inline void intel_rdt_sched_in(void) {}
284
285#endif /* CONFIG_INTEL_RDT_A */
Fenghua Yu113c6092016-10-22 06:19:54 -0700286#endif /* _ASM_X86_INTEL_RDT_H */