blob: 6970b44d37d5d8f5d095c7406fe0679e3166b2f6 [file] [log] [blame]
Ashwin Chaugule337aadf2015-10-02 10:01:19 -04001/*
2 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
3 *
4 * (C) Copyright 2014, 2015 Linaro Ltd.
5 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 *
12 * CPPC describes a few methods for controlling CPU performance using
13 * information from a per CPU table called CPC. This table is described in
14 * the ACPI v5.0+ specification. The table consists of a list of
15 * registers which may be memory mapped or hardware registers and also may
16 * include some static integer values.
17 *
18 * CPU performance is on an abstract continuous scale as against a discretized
19 * P-state scale which is tied to CPU frequency only. In brief, the basic
20 * operation involves:
21 *
22 * - OS makes a CPU performance request. (Can provide min and max bounds)
23 *
24 * - Platform (such as BMC) is free to optimize request within requested bounds
25 * depending on power/thermal budgets etc.
26 *
27 * - Platform conveys its decision back to OS
28 *
29 * The communication between OS and platform occurs through another medium
30 * called (PCC) Platform Communication Channel. This is a generic mailbox like
31 * mechanism which includes doorbell semantics to indicate register updates.
32 * See drivers/mailbox/pcc.c for details on PCC.
33 *
34 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
35 * above specifications.
36 */
37
38#define pr_fmt(fmt) "ACPI CPPC: " fmt
39
40#include <linux/cpufreq.h>
41#include <linux/delay.h>
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -070042#include <linux/ktime.h>
Ashwin Chaugule337aadf2015-10-02 10:01:19 -040043
44#include <acpi/cppc_acpi.h>
45/*
46 * Lock to provide mutually exclusive access to the PCC
47 * channel. e.g. When the remote updates the shared region
48 * with new data, the reader needs to be protected from
49 * other CPUs activity on the same channel.
50 */
51static DEFINE_SPINLOCK(pcc_lock);
52
53/*
54 * The cpc_desc structure contains the ACPI register details
55 * as described in the per CPU _CPC tables. The details
56 * include the type of register (e.g. PCC, System IO, FFH etc.)
57 * and destination addresses which lets us READ/WRITE CPU performance
58 * information using the appropriate I/O methods.
59 */
60static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
61
62/* This layer handles all the PCC specifics for CPPC. */
63static struct mbox_chan *pcc_channel;
64static void __iomem *pcc_comm_addr;
65static u64 comm_base_addr;
66static int pcc_subspace_idx = -1;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -040067static bool pcc_channel_acquired;
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -070068static ktime_t deadline;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -040069
Prakash, Prashanth77e3d862016-02-17 13:21:00 -070070/* pcc mapped address + header size + offset within PCC subspace */
71#define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs))
72
Ashwin Chaugule337aadf2015-10-02 10:01:19 -040073/*
74 * Arbitrary Retries in case the remote processor is slow to respond
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -070075 * to PCC commands. Keeping it high enough to cover emulators where
76 * the processors run painfully slow.
Ashwin Chaugule337aadf2015-10-02 10:01:19 -040077 */
78#define NUM_RETRIES 500
79
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -070080static int check_pcc_chan(void)
81{
82 int ret = -EIO;
83 struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr;
84 ktime_t next_deadline = ktime_add(ktime_get(), deadline);
85
86 /* Retry in case the remote processor was too slow to catch up. */
87 while (!ktime_after(ktime_get(), next_deadline)) {
88 if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
89 ret = 0;
90 break;
91 }
92 /*
93 * Reducing the bus traffic in case this loop takes longer than
94 * a few retries.
95 */
96 udelay(3);
97 }
98
99 return ret;
100}
101
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400102static int send_pcc_cmd(u16 cmd)
103{
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700104 int ret = -EIO;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400105 struct acpi_pcct_shared_memory *generic_comm_base =
106 (struct acpi_pcct_shared_memory *) pcc_comm_addr;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400107
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700108 /*
109 * For CMD_WRITE we know for a fact the caller should have checked
110 * the channel before writing to PCC space
111 */
112 if (cmd == CMD_READ) {
113 ret = check_pcc_chan();
114 if (ret)
115 return ret;
116 }
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400117
118 /* Write to the shared comm region. */
119 writew(cmd, &generic_comm_base->command);
120
121 /* Flip CMD COMPLETE bit */
122 writew(0, &generic_comm_base->status);
123
124 /* Ring doorbell */
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700125 ret = mbox_send_message(pcc_channel, &cmd);
126 if (ret < 0) {
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400127 pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700128 cmd, ret);
129 return ret;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400130 }
131
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700132 /*
133 * For READs we need to ensure the cmd completed to ensure
134 * the ensuing read()s can proceed. For WRITEs we dont care
135 * because the actual write()s are done before coming here
136 * and the next READ or WRITE will check if the channel
137 * is busy/free at the entry of this call.
138 */
139 if (cmd == CMD_READ)
140 ret = check_pcc_chan();
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400141
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700142 mbox_client_txdone(pcc_channel, ret);
143 return ret;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400144}
145
146static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
147{
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700148 if (ret < 0)
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400149 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
150 *(u16 *)msg, ret);
151 else
152 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
153 *(u16 *)msg, ret);
154}
155
156struct mbox_client cppc_mbox_cl = {
157 .tx_done = cppc_chan_tx_done,
158 .knows_txdone = true,
159};
160
161static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
162{
163 int result = -EFAULT;
164 acpi_status status = AE_OK;
165 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
166 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
167 struct acpi_buffer state = {0, NULL};
168 union acpi_object *psd = NULL;
169 struct acpi_psd_package *pdomain;
170
171 status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
172 ACPI_TYPE_PACKAGE);
173 if (ACPI_FAILURE(status))
174 return -ENODEV;
175
176 psd = buffer.pointer;
177 if (!psd || psd->package.count != 1) {
178 pr_debug("Invalid _PSD data\n");
179 goto end;
180 }
181
182 pdomain = &(cpc_ptr->domain_info);
183
184 state.length = sizeof(struct acpi_psd_package);
185 state.pointer = pdomain;
186
187 status = acpi_extract_package(&(psd->package.elements[0]),
188 &format, &state);
189 if (ACPI_FAILURE(status)) {
190 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
191 goto end;
192 }
193
194 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
195 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
196 goto end;
197 }
198
199 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
200 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
201 goto end;
202 }
203
204 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
205 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
206 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
207 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
208 goto end;
209 }
210
211 result = 0;
212end:
213 kfree(buffer.pointer);
214 return result;
215}
216
217/**
218 * acpi_get_psd_map - Map the CPUs in a common freq domain.
219 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
220 *
221 * Return: 0 for success or negative value for err.
222 */
223int acpi_get_psd_map(struct cpudata **all_cpu_data)
224{
225 int count_target;
226 int retval = 0;
227 unsigned int i, j;
228 cpumask_var_t covered_cpus;
229 struct cpudata *pr, *match_pr;
230 struct acpi_psd_package *pdomain;
231 struct acpi_psd_package *match_pdomain;
232 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
233
234 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
235 return -ENOMEM;
236
237 /*
238 * Now that we have _PSD data from all CPUs, lets setup P-state
239 * domain info.
240 */
241 for_each_possible_cpu(i) {
242 pr = all_cpu_data[i];
243 if (!pr)
244 continue;
245
246 if (cpumask_test_cpu(i, covered_cpus))
247 continue;
248
249 cpc_ptr = per_cpu(cpc_desc_ptr, i);
250 if (!cpc_ptr)
251 continue;
252
253 pdomain = &(cpc_ptr->domain_info);
254 cpumask_set_cpu(i, pr->shared_cpu_map);
255 cpumask_set_cpu(i, covered_cpus);
256 if (pdomain->num_processors <= 1)
257 continue;
258
259 /* Validate the Domain info */
260 count_target = pdomain->num_processors;
261 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
262 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
263 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
264 pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
265 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
266 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
267
268 for_each_possible_cpu(j) {
269 if (i == j)
270 continue;
271
272 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
273 if (!match_cpc_ptr)
274 continue;
275
276 match_pdomain = &(match_cpc_ptr->domain_info);
277 if (match_pdomain->domain != pdomain->domain)
278 continue;
279
280 /* Here i and j are in the same domain */
281 if (match_pdomain->num_processors != count_target) {
282 retval = -EFAULT;
283 goto err_ret;
284 }
285
286 if (pdomain->coord_type != match_pdomain->coord_type) {
287 retval = -EFAULT;
288 goto err_ret;
289 }
290
291 cpumask_set_cpu(j, covered_cpus);
292 cpumask_set_cpu(j, pr->shared_cpu_map);
293 }
294
295 for_each_possible_cpu(j) {
296 if (i == j)
297 continue;
298
299 match_pr = all_cpu_data[j];
300 if (!match_pr)
301 continue;
302
303 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
304 if (!match_cpc_ptr)
305 continue;
306
307 match_pdomain = &(match_cpc_ptr->domain_info);
308 if (match_pdomain->domain != pdomain->domain)
309 continue;
310
311 match_pr->shared_type = pr->shared_type;
312 cpumask_copy(match_pr->shared_cpu_map,
313 pr->shared_cpu_map);
314 }
315 }
316
317err_ret:
318 for_each_possible_cpu(i) {
319 pr = all_cpu_data[i];
320 if (!pr)
321 continue;
322
323 /* Assume no coordination on any error parsing domain info */
324 if (retval) {
325 cpumask_clear(pr->shared_cpu_map);
326 cpumask_set_cpu(i, pr->shared_cpu_map);
327 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
328 }
329 }
330
331 free_cpumask_var(covered_cpus);
332 return retval;
333}
334EXPORT_SYMBOL_GPL(acpi_get_psd_map);
335
Dan Carpenter32c0b2f2015-10-22 22:52:59 +0300336static int register_pcc_channel(int pcc_subspace_idx)
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400337{
Ashwin Chauguled29d6732015-11-12 19:52:30 -0500338 struct acpi_pcct_hw_reduced *cppc_ss;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400339 unsigned int len;
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700340 u64 usecs_lat;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400341
342 if (pcc_subspace_idx >= 0) {
343 pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
344 pcc_subspace_idx);
345
346 if (IS_ERR(pcc_channel)) {
347 pr_err("Failed to find PCC communication channel\n");
348 return -ENODEV;
349 }
350
351 /*
352 * The PCC mailbox controller driver should
353 * have parsed the PCCT (global table of all
354 * PCC channels) and stored pointers to the
355 * subspace communication region in con_priv.
356 */
357 cppc_ss = pcc_channel->con_priv;
358
359 if (!cppc_ss) {
360 pr_err("No PCC subspace found for CPPC\n");
361 return -ENODEV;
362 }
363
364 /*
365 * This is the shared communication region
366 * for the OS and Platform to communicate over.
367 */
368 comm_base_addr = cppc_ss->base_address;
369 len = cppc_ss->length;
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700370
371 /*
372 * cppc_ss->latency is just a Nominal value. In reality
373 * the remote processor could be much slower to reply.
374 * So add an arbitrary amount of wait on top of Nominal.
375 */
376 usecs_lat = NUM_RETRIES * cppc_ss->latency;
377 deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400378
379 pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len);
380 if (!pcc_comm_addr) {
381 pr_err("Failed to ioremap PCC comm region mem\n");
382 return -ENOMEM;
383 }
384
385 /* Set flag so that we dont come here for each CPU. */
386 pcc_channel_acquired = true;
387 }
388
389 return 0;
390}
391
392/*
393 * An example CPC table looks like the following.
394 *
395 * Name(_CPC, Package()
396 * {
397 * 17,
398 * NumEntries
399 * 1,
400 * // Revision
401 * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
402 * // Highest Performance
403 * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
404 * // Nominal Performance
405 * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
406 * // Lowest Nonlinear Performance
407 * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
408 * // Lowest Performance
409 * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
410 * // Guaranteed Performance Register
411 * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
412 * // Desired Performance Register
413 * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
414 * ..
415 * ..
416 * ..
417 *
418 * }
419 * Each Register() encodes how to access that specific register.
420 * e.g. a sample PCC entry has the following encoding:
421 *
422 * Register (
423 * PCC,
424 * AddressSpaceKeyword
425 * 8,
426 * //RegisterBitWidth
427 * 8,
428 * //RegisterBitOffset
429 * 0x30,
430 * //RegisterAddress
431 * 9
432 * //AccessSize (subspace ID)
433 * 0
434 * )
435 * }
436 */
437
438/**
439 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
440 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
441 *
442 * Return: 0 for success or negative value for err.
443 */
444int acpi_cppc_processor_probe(struct acpi_processor *pr)
445{
446 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
447 union acpi_object *out_obj, *cpc_obj;
448 struct cpc_desc *cpc_ptr;
449 struct cpc_reg *gas_t;
450 acpi_handle handle = pr->handle;
451 unsigned int num_ent, i, cpc_rev;
452 acpi_status status;
453 int ret = -EFAULT;
454
455 /* Parse the ACPI _CPC table for this cpu. */
456 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
457 ACPI_TYPE_PACKAGE);
458 if (ACPI_FAILURE(status)) {
459 ret = -ENODEV;
460 goto out_buf_free;
461 }
462
463 out_obj = (union acpi_object *) output.pointer;
464
465 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
466 if (!cpc_ptr) {
467 ret = -ENOMEM;
468 goto out_buf_free;
469 }
470
471 /* First entry is NumEntries. */
472 cpc_obj = &out_obj->package.elements[0];
473 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
474 num_ent = cpc_obj->integer.value;
475 } else {
476 pr_debug("Unexpected entry type(%d) for NumEntries\n",
477 cpc_obj->type);
478 goto out_free;
479 }
480
481 /* Only support CPPCv2. Bail otherwise. */
482 if (num_ent != CPPC_NUM_ENT) {
483 pr_debug("Firmware exports %d entries. Expected: %d\n",
484 num_ent, CPPC_NUM_ENT);
485 goto out_free;
486 }
487
488 /* Second entry should be revision. */
489 cpc_obj = &out_obj->package.elements[1];
490 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
491 cpc_rev = cpc_obj->integer.value;
492 } else {
493 pr_debug("Unexpected entry type(%d) for Revision\n",
494 cpc_obj->type);
495 goto out_free;
496 }
497
498 if (cpc_rev != CPPC_REV) {
499 pr_debug("Firmware exports revision:%d. Expected:%d\n",
500 cpc_rev, CPPC_REV);
501 goto out_free;
502 }
503
504 /* Iterate through remaining entries in _CPC */
505 for (i = 2; i < num_ent; i++) {
506 cpc_obj = &out_obj->package.elements[i];
507
508 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
509 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
510 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
511 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
512 gas_t = (struct cpc_reg *)
513 cpc_obj->buffer.pointer;
514
515 /*
516 * The PCC Subspace index is encoded inside
517 * the CPC table entries. The same PCC index
518 * will be used for all the PCC entries,
519 * so extract it only once.
520 */
521 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
522 if (pcc_subspace_idx < 0)
523 pcc_subspace_idx = gas_t->access_width;
524 else if (pcc_subspace_idx != gas_t->access_width) {
525 pr_debug("Mismatched PCC ids.\n");
526 goto out_free;
527 }
528 } else if (gas_t->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
529 /* Support only PCC and SYS MEM type regs */
530 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
531 goto out_free;
532 }
533
534 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
535 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
536 } else {
537 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
538 goto out_free;
539 }
540 }
541 /* Store CPU Logical ID */
542 cpc_ptr->cpu_id = pr->id;
543
544 /* Plug it into this CPUs CPC descriptor. */
545 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
546
547 /* Parse PSD data for this CPU */
548 ret = acpi_get_psd(cpc_ptr, handle);
549 if (ret)
550 goto out_free;
551
552 /* Register PCC channel once for all CPUs. */
553 if (!pcc_channel_acquired) {
554 ret = register_pcc_channel(pcc_subspace_idx);
555 if (ret)
556 goto out_free;
557 }
558
559 /* Everything looks okay */
560 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
561
562 kfree(output.pointer);
563 return 0;
564
565out_free:
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400566 kfree(cpc_ptr);
567
568out_buf_free:
569 kfree(output.pointer);
570 return ret;
571}
572EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
573
574/**
575 * acpi_cppc_processor_exit - Cleanup CPC structs.
576 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
577 *
578 * Return: Void
579 */
580void acpi_cppc_processor_exit(struct acpi_processor *pr)
581{
582 struct cpc_desc *cpc_ptr;
583 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
584 kfree(cpc_ptr);
585}
586EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
587
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700588/*
589 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
590 * as fast as possible. We have already mapped the PCC subspace during init, so
591 * we can directly write to it.
592 */
593
594static int cpc_read(struct cpc_reg *reg, u64 *val)
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400595{
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700596 int ret_val = 0;
597
598 *val = 0;
599 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
600 void __iomem *vaddr = GET_PCC_VADDR(reg->address);
601
602 switch (reg->bit_width) {
603 case 8:
604 *val = readb(vaddr);
605 break;
606 case 16:
607 *val = readw(vaddr);
608 break;
609 case 32:
610 *val = readl(vaddr);
611 break;
612 case 64:
613 *val = readq(vaddr);
614 break;
615 default:
616 pr_debug("Error: Cannot read %u bit width from PCC\n",
617 reg->bit_width);
618 ret_val = -EFAULT;
619 }
620 } else
621 ret_val = acpi_os_read_memory((acpi_physical_address)reg->address,
622 val, reg->bit_width);
623 return ret_val;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400624}
625
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700626static int cpc_write(struct cpc_reg *reg, u64 val)
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400627{
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700628 int ret_val = 0;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400629
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700630 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
631 void __iomem *vaddr = GET_PCC_VADDR(reg->address);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400632
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700633 switch (reg->bit_width) {
634 case 8:
635 writeb(val, vaddr);
636 break;
637 case 16:
638 writew(val, vaddr);
639 break;
640 case 32:
641 writel(val, vaddr);
642 break;
643 case 64:
644 writeq(val, vaddr);
645 break;
646 default:
647 pr_debug("Error: Cannot write %u bit width to PCC\n",
648 reg->bit_width);
649 ret_val = -EFAULT;
650 break;
651 }
652 } else
653 ret_val = acpi_os_write_memory((acpi_physical_address)reg->address,
654 val, reg->bit_width);
655 return ret_val;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400656}
657
658/**
659 * cppc_get_perf_caps - Get a CPUs performance capabilities.
660 * @cpunum: CPU from which to get capabilities info.
661 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
662 *
663 * Return: 0 for success with perf_caps populated else -ERRNO.
664 */
665int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
666{
667 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
668 struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf,
669 *nom_perf;
670 u64 high, low, ref, nom;
671 int ret = 0;
672
673 if (!cpc_desc) {
674 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
675 return -ENODEV;
676 }
677
678 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
679 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
680 ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF];
681 nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF];
682
683 spin_lock(&pcc_lock);
684
685 /* Are any of the regs PCC ?*/
686 if ((highest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
687 (lowest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
688 (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
689 (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
690 /* Ring doorbell once to update PCC subspace */
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700691 if (send_pcc_cmd(CMD_READ) < 0) {
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400692 ret = -EIO;
693 goto out_err;
694 }
695 }
696
697 cpc_read(&highest_reg->cpc_entry.reg, &high);
698 perf_caps->highest_perf = high;
699
700 cpc_read(&lowest_reg->cpc_entry.reg, &low);
701 perf_caps->lowest_perf = low;
702
703 cpc_read(&ref_perf->cpc_entry.reg, &ref);
704 perf_caps->reference_perf = ref;
705
706 cpc_read(&nom_perf->cpc_entry.reg, &nom);
707 perf_caps->nominal_perf = nom;
708
709 if (!ref)
710 perf_caps->reference_perf = perf_caps->nominal_perf;
711
712 if (!high || !low || !nom)
713 ret = -EFAULT;
714
715out_err:
716 spin_unlock(&pcc_lock);
717 return ret;
718}
719EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
720
721/**
722 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
723 * @cpunum: CPU from which to read counters.
724 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
725 *
726 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
727 */
728int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
729{
730 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
731 struct cpc_register_resource *delivered_reg, *reference_reg;
732 u64 delivered, reference;
733 int ret = 0;
734
735 if (!cpc_desc) {
736 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
737 return -ENODEV;
738 }
739
740 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
741 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
742
743 spin_lock(&pcc_lock);
744
745 /* Are any of the regs PCC ?*/
746 if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
747 (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
748 /* Ring doorbell once to update PCC subspace */
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700749 if (send_pcc_cmd(CMD_READ) < 0) {
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400750 ret = -EIO;
751 goto out_err;
752 }
753 }
754
755 cpc_read(&delivered_reg->cpc_entry.reg, &delivered);
756 cpc_read(&reference_reg->cpc_entry.reg, &reference);
757
758 if (!delivered || !reference) {
759 ret = -EFAULT;
760 goto out_err;
761 }
762
763 perf_fb_ctrs->delivered = delivered;
764 perf_fb_ctrs->reference = reference;
765
766 perf_fb_ctrs->delivered -= perf_fb_ctrs->prev_delivered;
767 perf_fb_ctrs->reference -= perf_fb_ctrs->prev_reference;
768
769 perf_fb_ctrs->prev_delivered = delivered;
770 perf_fb_ctrs->prev_reference = reference;
771
772out_err:
773 spin_unlock(&pcc_lock);
774 return ret;
775}
776EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
777
778/**
779 * cppc_set_perf - Set a CPUs performance controls.
780 * @cpu: CPU for which to set performance controls.
781 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
782 *
783 * Return: 0 for success, -ERRNO otherwise.
784 */
785int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
786{
787 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
788 struct cpc_register_resource *desired_reg;
789 int ret = 0;
790
791 if (!cpc_desc) {
792 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
793 return -ENODEV;
794 }
795
796 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
797
798 spin_lock(&pcc_lock);
799
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700800 /* If this is PCC reg, check if channel is free before writing */
801 if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
802 ret = check_pcc_chan();
803 if (ret)
804 goto busy_channel;
805 }
806
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400807 /*
808 * Skip writing MIN/MAX until Linux knows how to come up with
809 * useful values.
810 */
811 cpc_write(&desired_reg->cpc_entry.reg, perf_ctrls->desired_perf);
812
813 /* Is this a PCC reg ?*/
814 if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
815 /* Ring doorbell so Remote can get our perf request. */
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700816 if (send_pcc_cmd(CMD_WRITE) < 0)
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400817 ret = -EIO;
818 }
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700819busy_channel:
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400820 spin_unlock(&pcc_lock);
821
822 return ret;
823}
824EXPORT_SYMBOL_GPL(cppc_set_perf);