blob: 6c8710dd90c9b12d7e746113b43fdc9231c8b8b3 [file] [log] [blame]
Cody P Schafer0e93a6e2014-03-14 16:00:42 +11001/*
2 * Hypervisor supplied "24x7" performance counter support
3 *
4 * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
5 * Copyright 2014 IBM Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#define pr_fmt(fmt) "hv-24x7: " fmt
14
15#include <linux/perf_event.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <asm/firmware.h>
19#include <asm/hvcall.h>
20#include <asm/io.h>
21
22#include "hv-24x7.h"
23#include "hv-24x7-catalog.h"
24#include "hv-common.h"
25
26/*
27 * TODO: Merging events:
28 * - Think of the hcall as an interface to a 4d array of counters:
29 * - x = domains
30 * - y = indexes in the domain (core, chip, vcpu, node, etc)
31 * - z = offset into the counter space
32 * - w = lpars (guest vms, "logical partitions")
33 * - A single request is: x,y,y_last,z,z_last,w,w_last
34 * - this means we can retrieve a rectangle of counters in y,z for a single x.
35 *
36 * - Things to consider (ignoring w):
37 * - input cost_per_request = 16
38 * - output cost_per_result(ys,zs) = 8 + 8 * ys + ys * zs
39 * - limited number of requests per hcall (must fit into 4K bytes)
40 * - 4k = 16 [buffer header] - 16 [request size] * request_count
41 * - 255 requests per hcall
42 * - sometimes it will be more efficient to read extra data and discard
43 */
44
45/*
46 * Example usage:
47 * perf stat -e 'hv_24x7/domain=2,offset=8,starting_index=0,lpar=0xffffffff/'
48 */
49
50/* u3 0-6, one of HV_24X7_PERF_DOMAIN */
51EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
52/* u16 */
53EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 16, 31);
54/* u32, see "data_offset" */
55EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
56/* u16 */
57EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15);
58
59EVENT_DEFINE_RANGE(reserved1, config, 4, 15);
60EVENT_DEFINE_RANGE(reserved2, config1, 16, 63);
61EVENT_DEFINE_RANGE(reserved3, config2, 0, 63);
62
63static struct attribute *format_attrs[] = {
64 &format_attr_domain.attr,
65 &format_attr_offset.attr,
66 &format_attr_starting_index.attr,
67 &format_attr_lpar.attr,
68 NULL,
69};
70
71static struct attribute_group format_group = {
72 .name = "format",
73 .attrs = format_attrs,
74};
75
76static struct kmem_cache *hv_page_cache;
77
Cody P Schafer78d13162014-04-15 10:10:53 -070078static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
79 unsigned long version,
80 unsigned long index)
81{
82 pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
83 phys_4096,
84 version,
85 index);
86 WARN_ON(!IS_ALIGNED(phys_4096, 4096));
87 return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
88 phys_4096,
89 version,
90 index);
91}
92
Cody P Schafer1ee9fcc2014-04-15 10:10:54 -070093static unsigned long h_get_24x7_catalog_page(char page[],
Cody P Schaferbbad3e52014-04-15 10:10:55 -070094 u64 version, u32 index)
Cody P Schafer0e93a6e2014-03-14 16:00:42 +110095{
Cody P Schafer78d13162014-04-15 10:10:53 -070096 return h_get_24x7_catalog_page_(virt_to_phys(page),
97 version, index);
Cody P Schafer0e93a6e2014-03-14 16:00:42 +110098}
99
100static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
101 struct bin_attribute *bin_attr, char *buf,
102 loff_t offset, size_t count)
103{
104 unsigned long hret;
105 ssize_t ret = 0;
sukadev@linux.vnet.ibm.com56f12be2014-09-30 23:03:18 -0700106 size_t catalog_len = 0, catalog_page_len = 0;
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100107 loff_t page_offset = 0;
sukadev@linux.vnet.ibm.com56f12be2014-09-30 23:03:18 -0700108 loff_t offset_in_page;
109 size_t copy_len;
Cody P Schaferbbad3e52014-04-15 10:10:55 -0700110 uint64_t catalog_version_num = 0;
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100111 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
112 struct hv_24x7_catalog_page_0 *page_0 = page;
113 if (!page)
114 return -ENOMEM;
115
116 hret = h_get_24x7_catalog_page(page, 0, 0);
117 if (hret) {
118 ret = -EIO;
119 goto e_free;
120 }
121
Cody P Schaferbbad3e52014-04-15 10:10:55 -0700122 catalog_version_num = be64_to_cpu(page_0->version);
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100123 catalog_page_len = be32_to_cpu(page_0->length);
124 catalog_len = catalog_page_len * 4096;
125
126 page_offset = offset / 4096;
sukadev@linux.vnet.ibm.com56f12be2014-09-30 23:03:18 -0700127 offset_in_page = offset % 4096;
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100128
129 if (page_offset >= catalog_page_len)
130 goto e_free;
131
132 if (page_offset != 0) {
133 hret = h_get_24x7_catalog_page(page, catalog_version_num,
134 page_offset);
135 if (hret) {
136 ret = -EIO;
137 goto e_free;
138 }
139 }
140
sukadev@linux.vnet.ibm.com56f12be2014-09-30 23:03:18 -0700141 copy_len = 4096 - offset_in_page;
142 if (copy_len > count)
143 copy_len = count;
144
145 memcpy(buf, page+offset_in_page, copy_len);
146 ret = copy_len;
147
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100148e_free:
149 if (hret)
Cody P Schaferbbad3e52014-04-15 10:10:55 -0700150 pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
151 " rc=%ld\n",
152 catalog_version_num, page_offset, hret);
Himangi Saraogid6589722014-07-22 23:40:19 +0530153 kmem_cache_free(hv_page_cache, page);
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100154
sukadev@linux.vnet.ibm.com56f12be2014-09-30 23:03:18 -0700155 pr_devel("catalog_read: offset=%lld(%lld) count=%zu "
156 "catalog_len=%zu(%zu) => %zd\n", offset, page_offset,
157 count, catalog_len, catalog_page_len, ret);
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100158
159 return ret;
160}
161
162#define PAGE_0_ATTR(_name, _fmt, _expr) \
163static ssize_t _name##_show(struct device *dev, \
164 struct device_attribute *dev_attr, \
165 char *buf) \
166{ \
167 unsigned long hret; \
168 ssize_t ret = 0; \
169 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); \
170 struct hv_24x7_catalog_page_0 *page_0 = page; \
171 if (!page) \
172 return -ENOMEM; \
173 hret = h_get_24x7_catalog_page(page, 0, 0); \
174 if (hret) { \
175 ret = -EIO; \
176 goto e_free; \
177 } \
178 ret = sprintf(buf, _fmt, _expr); \
179e_free: \
180 kfree(page); \
181 return ret; \
182} \
183static DEVICE_ATTR_RO(_name)
184
185PAGE_0_ATTR(catalog_version, "%lld\n",
Cody P Schaferbbad3e52014-04-15 10:10:55 -0700186 (unsigned long long)be64_to_cpu(page_0->version));
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100187PAGE_0_ATTR(catalog_len, "%lld\n",
188 (unsigned long long)be32_to_cpu(page_0->length) * 4096);
189static BIN_ATTR_RO(catalog, 0/* real length varies */);
190
191static struct bin_attribute *if_bin_attrs[] = {
192 &bin_attr_catalog,
193 NULL,
194};
195
196static struct attribute *if_attrs[] = {
197 &dev_attr_catalog_len.attr,
198 &dev_attr_catalog_version.attr,
199 NULL,
200};
201
202static struct attribute_group if_group = {
203 .name = "interface",
204 .bin_attrs = if_bin_attrs,
205 .attrs = if_attrs,
206};
207
208static const struct attribute_group *attr_groups[] = {
209 &format_group,
210 &if_group,
211 NULL,
212};
213
214static bool is_physical_domain(int domain)
215{
216 return domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CHIP ||
217 domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE;
218}
219
220static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
221 u16 lpar, u64 *res,
222 bool success_expected)
223{
Cody P Schafer48bee8a2014-09-30 23:03:17 -0700224 unsigned long ret = -ENOMEM;
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100225
226 /*
227 * request_buffer and result_buffer are not required to be 4k aligned,
228 * but are not allowed to cross any 4k boundary. Aligning them to 4k is
229 * the simplest way to ensure that.
230 */
231 struct reqb {
232 struct hv_24x7_request_buffer buf;
233 struct hv_24x7_request req;
Cody P Schafer48bee8a2014-09-30 23:03:17 -0700234 } __packed *request_buffer;
235
236 struct {
237 struct hv_24x7_data_result_buffer buf;
238 struct hv_24x7_result res;
239 struct hv_24x7_result_element elem;
240 __be64 result;
241 } __packed *result_buffer;
242
243 BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
244 BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
245
246 request_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER);
247 if (!request_buffer)
248 goto out;
249
250 result_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER);
251 if (!result_buffer)
252 goto out_free_request_buffer;
253
254 *request_buffer = (struct reqb) {
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100255 .buf = {
256 .interface_version = HV_24X7_IF_VERSION_CURRENT,
257 .num_requests = 1,
258 },
259 .req = {
260 .performance_domain = domain,
261 .data_size = cpu_to_be16(8),
262 .data_offset = cpu_to_be32(offset),
263 .starting_lpar_ix = cpu_to_be16(lpar),
264 .max_num_lpars = cpu_to_be16(1),
265 .starting_ix = cpu_to_be16(ix),
266 .max_ix = cpu_to_be16(1),
267 }
268 };
269
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100270 ret = plpar_hcall_norets(H_GET_24X7_DATA,
Cody P Schafer48bee8a2014-09-30 23:03:17 -0700271 virt_to_phys(request_buffer), sizeof(*request_buffer),
272 virt_to_phys(result_buffer), sizeof(*result_buffer));
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100273
274 if (ret) {
275 if (success_expected)
Cody P Schafer48bee8a2014-09-30 23:03:17 -0700276 pr_err_ratelimited("hcall failed: %d %#x %#x %d => "
277 "0x%lx (%ld) detail=0x%x failing ix=%x\n",
278 domain, offset, ix, lpar, ret, ret,
279 result_buffer->buf.detailed_rc,
280 result_buffer->buf.failing_request_ix);
281 goto out_free_result_buffer;
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100282 }
283
Cody P Schafer48bee8a2014-09-30 23:03:17 -0700284 *res = be64_to_cpu(result_buffer->result);
285
286out_free_result_buffer:
287 kfree(result_buffer);
288out_free_request_buffer:
289 kfree(request_buffer);
290out:
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100291 return ret;
292}
293
294static unsigned long event_24x7_request(struct perf_event *event, u64 *res,
295 bool success_expected)
296{
297 return single_24x7_request(event_get_domain(event),
298 event_get_offset(event),
299 event_get_starting_index(event),
300 event_get_lpar(event),
301 res,
302 success_expected);
303}
304
305static int h_24x7_event_init(struct perf_event *event)
306{
307 struct hv_perf_caps caps;
308 unsigned domain;
309 unsigned long hret;
310 u64 ct;
311
312 /* Not our event */
313 if (event->attr.type != event->pmu->type)
314 return -ENOENT;
315
316 /* Unused areas must be 0 */
317 if (event_get_reserved1(event) ||
318 event_get_reserved2(event) ||
319 event_get_reserved3(event)) {
320 pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n",
321 event->attr.config,
322 event_get_reserved1(event),
323 event->attr.config1,
324 event_get_reserved2(event),
325 event->attr.config2,
326 event_get_reserved3(event));
327 return -EINVAL;
328 }
329
330 /* unsupported modes and filters */
331 if (event->attr.exclude_user ||
332 event->attr.exclude_kernel ||
333 event->attr.exclude_hv ||
334 event->attr.exclude_idle ||
335 event->attr.exclude_host ||
Vince Weavercc56d672014-06-19 14:40:09 -0400336 event->attr.exclude_guest)
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100337 return -EINVAL;
338
339 /* no branch sampling */
340 if (has_branch_stack(event))
341 return -EOPNOTSUPP;
342
343 /* offset must be 8 byte aligned */
344 if (event_get_offset(event) % 8) {
345 pr_devel("bad alignment\n");
346 return -EINVAL;
347 }
348
349 /* Domains above 6 are invalid */
350 domain = event_get_domain(event);
351 if (domain > 6) {
352 pr_devel("invalid domain %d\n", domain);
353 return -EINVAL;
354 }
355
356 hret = hv_perf_caps_get(&caps);
357 if (hret) {
358 pr_devel("could not get capabilities: rc=%ld\n", hret);
359 return -EIO;
360 }
361
362 /* PHYSICAL domains & other lpars require extra capabilities */
363 if (!caps.collect_privileged && (is_physical_domain(domain) ||
364 (event_get_lpar(event) != event_get_lpar_max()))) {
365 pr_devel("hv permisions disallow: is_physical_domain:%d, lpar=0x%llx\n",
366 is_physical_domain(domain),
367 event_get_lpar(event));
368 return -EACCES;
369 }
370
371 /* see if the event complains */
372 if (event_24x7_request(event, &ct, false)) {
373 pr_devel("test hcall failed\n");
374 return -EIO;
375 }
376
377 return 0;
378}
379
380static u64 h_24x7_get_value(struct perf_event *event)
381{
382 unsigned long ret;
383 u64 ct;
384 ret = event_24x7_request(event, &ct, true);
385 if (ret)
386 /* We checked this in event init, shouldn't fail here... */
387 return 0;
388
389 return ct;
390}
391
392static void h_24x7_event_update(struct perf_event *event)
393{
394 s64 prev;
395 u64 now;
396 now = h_24x7_get_value(event);
397 prev = local64_xchg(&event->hw.prev_count, now);
398 local64_add(now - prev, &event->count);
399}
400
401static void h_24x7_event_start(struct perf_event *event, int flags)
402{
403 if (flags & PERF_EF_RELOAD)
404 local64_set(&event->hw.prev_count, h_24x7_get_value(event));
405}
406
407static void h_24x7_event_stop(struct perf_event *event, int flags)
408{
409 h_24x7_event_update(event);
410}
411
412static int h_24x7_event_add(struct perf_event *event, int flags)
413{
414 if (flags & PERF_EF_START)
415 h_24x7_event_start(event, flags);
416
417 return 0;
418}
419
420static int h_24x7_event_idx(struct perf_event *event)
421{
422 return 0;
423}
424
425static struct pmu h_24x7_pmu = {
426 .task_ctx_nr = perf_invalid_context,
427
428 .name = "hv_24x7",
429 .attr_groups = attr_groups,
430 .event_init = h_24x7_event_init,
431 .add = h_24x7_event_add,
432 .del = h_24x7_event_stop,
433 .start = h_24x7_event_start,
434 .stop = h_24x7_event_stop,
435 .read = h_24x7_event_update,
436 .event_idx = h_24x7_event_idx,
437};
438
439static int hv_24x7_init(void)
440{
441 int r;
442 unsigned long hret;
443 struct hv_perf_caps caps;
444
445 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
Cody P Schafere98bf002014-04-15 10:10:50 -0700446 pr_debug("not a virtualized system, not enabling\n");
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100447 return -ENODEV;
448 }
449
450 hret = hv_perf_caps_get(&caps);
451 if (hret) {
Cody P Schafere98bf002014-04-15 10:10:50 -0700452 pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100453 hret);
454 return -ENODEV;
455 }
456
457 hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL);
458 if (!hv_page_cache)
459 return -ENOMEM;
460
Vince Weavercc56d672014-06-19 14:40:09 -0400461 /* sampling not supported */
462 h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
463
Cody P Schafer0e93a6e2014-03-14 16:00:42 +1100464 r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
465 if (r)
466 return r;
467
468 return 0;
469}
470
471device_initcall(hv_24x7_init);