blob: f00fac363acdc51bc6898a20ff789561362161ed [file] [log] [blame]
Ashwin Chaugule337aadf2015-10-02 10:01:19 -04001/*
2 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
3 *
4 * (C) Copyright 2014, 2015 Linaro Ltd.
5 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 *
12 * CPPC describes a few methods for controlling CPU performance using
13 * information from a per CPU table called CPC. This table is described in
14 * the ACPI v5.0+ specification. The table consists of a list of
15 * registers which may be memory mapped or hardware registers and also may
16 * include some static integer values.
17 *
18 * CPU performance is on an abstract continuous scale as against a discretized
19 * P-state scale which is tied to CPU frequency only. In brief, the basic
20 * operation involves:
21 *
22 * - OS makes a CPU performance request. (Can provide min and max bounds)
23 *
24 * - Platform (such as BMC) is free to optimize request within requested bounds
25 * depending on power/thermal budgets etc.
26 *
27 * - Platform conveys its decision back to OS
28 *
29 * The communication between OS and platform occurs through another medium
30 * called (PCC) Platform Communication Channel. This is a generic mailbox like
31 * mechanism which includes doorbell semantics to indicate register updates.
32 * See drivers/mailbox/pcc.c for details on PCC.
33 *
34 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
35 * above specifications.
36 */
37
38#define pr_fmt(fmt) "ACPI CPPC: " fmt
39
40#include <linux/cpufreq.h>
41#include <linux/delay.h>
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -070042#include <linux/ktime.h>
Prakash, Prashanth80b82862016-08-16 14:39:40 -060043#include <linux/rwsem.h>
44#include <linux/wait.h>
Ashwin Chaugule337aadf2015-10-02 10:01:19 -040045
46#include <acpi/cppc_acpi.h>
Prakash, Prashanth80b82862016-08-16 14:39:40 -060047
Ashwin Chaugule337aadf2015-10-02 10:01:19 -040048/*
Prakash, Prashanth80b82862016-08-16 14:39:40 -060049 * Lock to provide controlled access to the PCC channel.
50 *
51 * For performance critical usecases(currently cppc_set_perf)
52 * We need to take read_lock and check if channel belongs to OSPM before
53 * reading or writing to PCC subspace
54 * We need to take write_lock before transferring the channel ownership to
55 * the platform via a Doorbell
56 * This allows us to batch a number of CPPC requests if they happen to
57 * originate in about the same time
58 *
59 * For non-performance critical usecases(init)
60 * Take write_lock for all purposes which gives exclusive access
Ashwin Chaugule337aadf2015-10-02 10:01:19 -040061 */
Prakash, Prashanth80b82862016-08-16 14:39:40 -060062static DECLARE_RWSEM(pcc_lock);
63
64/* Indicates if there are any pending/batched PCC write commands */
65static bool pending_pcc_write_cmd;
66
67/* Wait queue for CPUs whose requests were batched */
68static DECLARE_WAIT_QUEUE_HEAD(pcc_write_wait_q);
69
70/* Used to identify if a batched request is delivered to platform */
71static unsigned int pcc_write_cnt;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -040072
73/*
74 * The cpc_desc structure contains the ACPI register details
75 * as described in the per CPU _CPC tables. The details
76 * include the type of register (e.g. PCC, System IO, FFH etc.)
77 * and destination addresses which lets us READ/WRITE CPU performance
78 * information using the appropriate I/O methods.
79 */
80static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
81
82/* This layer handles all the PCC specifics for CPPC. */
83static struct mbox_chan *pcc_channel;
84static void __iomem *pcc_comm_addr;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -040085static int pcc_subspace_idx = -1;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -040086static bool pcc_channel_acquired;
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -070087static ktime_t deadline;
Prakash, Prashanthbe8b88d2016-08-16 14:39:41 -060088static unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -040089
Prakash, Prashanth77e3d862016-02-17 13:21:00 -070090/* pcc mapped address + header size + offset within PCC subspace */
91#define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs))
92
Prakash, Prashanth80b82862016-08-16 14:39:40 -060093/* Check if a CPC regsiter is in PCC */
94#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
95 (cpc)->cpc_entry.reg.space_id == \
96 ACPI_ADR_SPACE_PLATFORM_COMM)
97
Ashwin Chaugule158c9982016-08-16 14:39:42 -060098/* Evalutes to True if reg is a NULL register descriptor */
99#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
100 (reg)->address == 0 && \
101 (reg)->bit_width == 0 && \
102 (reg)->bit_offset == 0 && \
103 (reg)->access_width == 0)
104
105/* Evalutes to True if an optional cpc field is supported */
106#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
107 !!(cpc)->cpc_entry.int_value : \
108 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400109/*
110 * Arbitrary Retries in case the remote processor is slow to respond
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700111 * to PCC commands. Keeping it high enough to cover emulators where
112 * the processors run painfully slow.
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400113 */
114#define NUM_RETRIES 500
115
Ashwin Chaugule158c9982016-08-16 14:39:42 -0600116struct cppc_attr {
117 struct attribute attr;
118 ssize_t (*show)(struct kobject *kobj,
119 struct attribute *attr, char *buf);
120 ssize_t (*store)(struct kobject *kobj,
121 struct attribute *attr, const char *c, ssize_t count);
122};
123
124#define define_one_cppc_ro(_name) \
125static struct cppc_attr _name = \
126__ATTR(_name, 0444, show_##_name, NULL)
127
128#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
129
130static ssize_t show_feedback_ctrs(struct kobject *kobj,
131 struct attribute *attr, char *buf)
132{
133 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
134 struct cppc_perf_fb_ctrs fb_ctrs = {0};
135
136 cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
137
138 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
139 fb_ctrs.reference, fb_ctrs.delivered);
140}
141define_one_cppc_ro(feedback_ctrs);
142
143static ssize_t show_reference_perf(struct kobject *kobj,
144 struct attribute *attr, char *buf)
145{
146 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
147 struct cppc_perf_fb_ctrs fb_ctrs = {0};
148
149 cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
150
151 return scnprintf(buf, PAGE_SIZE, "%llu\n",
152 fb_ctrs.reference_perf);
153}
154define_one_cppc_ro(reference_perf);
155
156static ssize_t show_wraparound_time(struct kobject *kobj,
157 struct attribute *attr, char *buf)
158{
159 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
160 struct cppc_perf_fb_ctrs fb_ctrs = {0};
161
162 cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
163
164 return scnprintf(buf, PAGE_SIZE, "%llu\n", fb_ctrs.ctr_wrap_time);
165
166}
167define_one_cppc_ro(wraparound_time);
168
169static struct attribute *cppc_attrs[] = {
170 &feedback_ctrs.attr,
171 &reference_perf.attr,
172 &wraparound_time.attr,
173 NULL
174};
175
176static struct kobj_type cppc_ktype = {
177 .sysfs_ops = &kobj_sysfs_ops,
178 .default_attrs = cppc_attrs,
179};
180
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700181static int check_pcc_chan(void)
182{
183 int ret = -EIO;
184 struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr;
185 ktime_t next_deadline = ktime_add(ktime_get(), deadline);
186
187 /* Retry in case the remote processor was too slow to catch up. */
188 while (!ktime_after(ktime_get(), next_deadline)) {
Prakash, Prashanthf387e5b2016-02-17 13:21:03 -0700189 /*
190 * Per spec, prior to boot the PCC space wil be initialized by
191 * platform and should have set the command completion bit when
192 * PCC can be used by OSPM
193 */
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700194 if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
195 ret = 0;
196 break;
197 }
198 /*
199 * Reducing the bus traffic in case this loop takes longer than
200 * a few retries.
201 */
202 udelay(3);
203 }
204
205 return ret;
206}
207
Prakash, Prashanth80b82862016-08-16 14:39:40 -0600208/*
209 * This function transfers the ownership of the PCC to the platform
210 * So it must be called while holding write_lock(pcc_lock)
211 */
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400212static int send_pcc_cmd(u16 cmd)
213{
Prakash, Prashanth80b82862016-08-16 14:39:40 -0600214 int ret = -EIO, i;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400215 struct acpi_pcct_shared_memory *generic_comm_base =
216 (struct acpi_pcct_shared_memory *) pcc_comm_addr;
Prakash, Prashanthf387e5b2016-02-17 13:21:03 -0700217 static ktime_t last_cmd_cmpl_time, last_mpar_reset;
218 static int mpar_count;
219 unsigned int time_delta;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400220
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700221 /*
222 * For CMD_WRITE we know for a fact the caller should have checked
223 * the channel before writing to PCC space
224 */
225 if (cmd == CMD_READ) {
Prakash, Prashanth80b82862016-08-16 14:39:40 -0600226 /*
227 * If there are pending cpc_writes, then we stole the channel
228 * before write completion, so first send a WRITE command to
229 * platform
230 */
231 if (pending_pcc_write_cmd)
232 send_pcc_cmd(CMD_WRITE);
233
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700234 ret = check_pcc_chan();
235 if (ret)
Prakash, Prashanth80b82862016-08-16 14:39:40 -0600236 goto end;
237 } else /* CMD_WRITE */
238 pending_pcc_write_cmd = FALSE;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400239
Prakash, Prashanthf387e5b2016-02-17 13:21:03 -0700240 /*
241 * Handle the Minimum Request Turnaround Time(MRTT)
242 * "The minimum amount of time that OSPM must wait after the completion
243 * of a command before issuing the next command, in microseconds"
244 */
245 if (pcc_mrtt) {
246 time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
247 if (pcc_mrtt > time_delta)
248 udelay(pcc_mrtt - time_delta);
249 }
250
251 /*
252 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
253 * "The maximum number of periodic requests that the subspace channel can
254 * support, reported in commands per minute. 0 indicates no limitation."
255 *
256 * This parameter should be ideally zero or large enough so that it can
257 * handle maximum number of requests that all the cores in the system can
258 * collectively generate. If it is not, we will follow the spec and just
259 * not send the request to the platform after hitting the MPAR limit in
260 * any 60s window
261 */
262 if (pcc_mpar) {
263 if (mpar_count == 0) {
264 time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
265 if (time_delta < 60 * MSEC_PER_SEC) {
266 pr_debug("PCC cmd not sent due to MPAR limit");
Prakash, Prashanth80b82862016-08-16 14:39:40 -0600267 ret = -EIO;
268 goto end;
Prakash, Prashanthf387e5b2016-02-17 13:21:03 -0700269 }
270 last_mpar_reset = ktime_get();
271 mpar_count = pcc_mpar;
272 }
273 mpar_count--;
274 }
275
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400276 /* Write to the shared comm region. */
Prakash, Prashanthbeee23a2016-02-17 13:21:02 -0700277 writew_relaxed(cmd, &generic_comm_base->command);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400278
279 /* Flip CMD COMPLETE bit */
Prakash, Prashanthbeee23a2016-02-17 13:21:02 -0700280 writew_relaxed(0, &generic_comm_base->status);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400281
282 /* Ring doorbell */
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700283 ret = mbox_send_message(pcc_channel, &cmd);
284 if (ret < 0) {
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400285 pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700286 cmd, ret);
Prakash, Prashanth80b82862016-08-16 14:39:40 -0600287 goto end;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400288 }
289
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700290 /*
291 * For READs we need to ensure the cmd completed to ensure
292 * the ensuing read()s can proceed. For WRITEs we dont care
293 * because the actual write()s are done before coming here
294 * and the next READ or WRITE will check if the channel
295 * is busy/free at the entry of this call.
Prakash, Prashanthf387e5b2016-02-17 13:21:03 -0700296 *
297 * If Minimum Request Turnaround Time is non-zero, we need
298 * to record the completion time of both READ and WRITE
299 * command for proper handling of MRTT, so we need to check
300 * for pcc_mrtt in addition to CMD_READ
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700301 */
Prakash, Prashanthf387e5b2016-02-17 13:21:03 -0700302 if (cmd == CMD_READ || pcc_mrtt) {
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700303 ret = check_pcc_chan();
Prakash, Prashanthf387e5b2016-02-17 13:21:03 -0700304 if (pcc_mrtt)
305 last_cmd_cmpl_time = ktime_get();
306 }
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400307
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700308 mbox_client_txdone(pcc_channel, ret);
Prakash, Prashanth80b82862016-08-16 14:39:40 -0600309
310end:
311 if (cmd == CMD_WRITE) {
312 if (unlikely(ret)) {
313 for_each_possible_cpu(i) {
314 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
315 if (!desc)
316 continue;
317
318 if (desc->write_cmd_id == pcc_write_cnt)
319 desc->write_cmd_status = ret;
320 }
321 }
322 pcc_write_cnt++;
323 wake_up_all(&pcc_write_wait_q);
324 }
325
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700326 return ret;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400327}
328
329static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
330{
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700331 if (ret < 0)
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400332 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
333 *(u16 *)msg, ret);
334 else
335 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
336 *(u16 *)msg, ret);
337}
338
339struct mbox_client cppc_mbox_cl = {
340 .tx_done = cppc_chan_tx_done,
341 .knows_txdone = true,
342};
343
344static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
345{
346 int result = -EFAULT;
347 acpi_status status = AE_OK;
348 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
349 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
350 struct acpi_buffer state = {0, NULL};
351 union acpi_object *psd = NULL;
352 struct acpi_psd_package *pdomain;
353
354 status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
355 ACPI_TYPE_PACKAGE);
356 if (ACPI_FAILURE(status))
357 return -ENODEV;
358
359 psd = buffer.pointer;
360 if (!psd || psd->package.count != 1) {
361 pr_debug("Invalid _PSD data\n");
362 goto end;
363 }
364
365 pdomain = &(cpc_ptr->domain_info);
366
367 state.length = sizeof(struct acpi_psd_package);
368 state.pointer = pdomain;
369
370 status = acpi_extract_package(&(psd->package.elements[0]),
371 &format, &state);
372 if (ACPI_FAILURE(status)) {
373 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
374 goto end;
375 }
376
377 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
378 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
379 goto end;
380 }
381
382 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
383 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
384 goto end;
385 }
386
387 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
388 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
389 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
390 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
391 goto end;
392 }
393
394 result = 0;
395end:
396 kfree(buffer.pointer);
397 return result;
398}
399
400/**
401 * acpi_get_psd_map - Map the CPUs in a common freq domain.
402 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
403 *
404 * Return: 0 for success or negative value for err.
405 */
406int acpi_get_psd_map(struct cpudata **all_cpu_data)
407{
408 int count_target;
409 int retval = 0;
410 unsigned int i, j;
411 cpumask_var_t covered_cpus;
412 struct cpudata *pr, *match_pr;
413 struct acpi_psd_package *pdomain;
414 struct acpi_psd_package *match_pdomain;
415 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
416
417 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
418 return -ENOMEM;
419
420 /*
421 * Now that we have _PSD data from all CPUs, lets setup P-state
422 * domain info.
423 */
424 for_each_possible_cpu(i) {
425 pr = all_cpu_data[i];
426 if (!pr)
427 continue;
428
429 if (cpumask_test_cpu(i, covered_cpus))
430 continue;
431
432 cpc_ptr = per_cpu(cpc_desc_ptr, i);
Hoan Tran8343c402016-06-17 15:16:31 -0700433 if (!cpc_ptr) {
434 retval = -EFAULT;
435 goto err_ret;
436 }
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400437
438 pdomain = &(cpc_ptr->domain_info);
439 cpumask_set_cpu(i, pr->shared_cpu_map);
440 cpumask_set_cpu(i, covered_cpus);
441 if (pdomain->num_processors <= 1)
442 continue;
443
444 /* Validate the Domain info */
445 count_target = pdomain->num_processors;
446 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
447 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
448 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
449 pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
450 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
451 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
452
453 for_each_possible_cpu(j) {
454 if (i == j)
455 continue;
456
457 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
Hoan Tran8343c402016-06-17 15:16:31 -0700458 if (!match_cpc_ptr) {
459 retval = -EFAULT;
460 goto err_ret;
461 }
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400462
463 match_pdomain = &(match_cpc_ptr->domain_info);
464 if (match_pdomain->domain != pdomain->domain)
465 continue;
466
467 /* Here i and j are in the same domain */
468 if (match_pdomain->num_processors != count_target) {
469 retval = -EFAULT;
470 goto err_ret;
471 }
472
473 if (pdomain->coord_type != match_pdomain->coord_type) {
474 retval = -EFAULT;
475 goto err_ret;
476 }
477
478 cpumask_set_cpu(j, covered_cpus);
479 cpumask_set_cpu(j, pr->shared_cpu_map);
480 }
481
482 for_each_possible_cpu(j) {
483 if (i == j)
484 continue;
485
486 match_pr = all_cpu_data[j];
487 if (!match_pr)
488 continue;
489
490 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
Hoan Tran8343c402016-06-17 15:16:31 -0700491 if (!match_cpc_ptr) {
492 retval = -EFAULT;
493 goto err_ret;
494 }
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400495
496 match_pdomain = &(match_cpc_ptr->domain_info);
497 if (match_pdomain->domain != pdomain->domain)
498 continue;
499
500 match_pr->shared_type = pr->shared_type;
501 cpumask_copy(match_pr->shared_cpu_map,
502 pr->shared_cpu_map);
503 }
504 }
505
506err_ret:
507 for_each_possible_cpu(i) {
508 pr = all_cpu_data[i];
509 if (!pr)
510 continue;
511
512 /* Assume no coordination on any error parsing domain info */
513 if (retval) {
514 cpumask_clear(pr->shared_cpu_map);
515 cpumask_set_cpu(i, pr->shared_cpu_map);
516 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
517 }
518 }
519
520 free_cpumask_var(covered_cpus);
521 return retval;
522}
523EXPORT_SYMBOL_GPL(acpi_get_psd_map);
524
Dan Carpenter32c0b2f2015-10-22 22:52:59 +0300525static int register_pcc_channel(int pcc_subspace_idx)
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400526{
Ashwin Chauguled29d6732015-11-12 19:52:30 -0500527 struct acpi_pcct_hw_reduced *cppc_ss;
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700528 u64 usecs_lat;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400529
530 if (pcc_subspace_idx >= 0) {
531 pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
532 pcc_subspace_idx);
533
534 if (IS_ERR(pcc_channel)) {
535 pr_err("Failed to find PCC communication channel\n");
536 return -ENODEV;
537 }
538
539 /*
540 * The PCC mailbox controller driver should
541 * have parsed the PCCT (global table of all
542 * PCC channels) and stored pointers to the
543 * subspace communication region in con_priv.
544 */
545 cppc_ss = pcc_channel->con_priv;
546
547 if (!cppc_ss) {
548 pr_err("No PCC subspace found for CPPC\n");
549 return -ENODEV;
550 }
551
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700552 /*
553 * cppc_ss->latency is just a Nominal value. In reality
554 * the remote processor could be much slower to reply.
555 * So add an arbitrary amount of wait on top of Nominal.
556 */
557 usecs_lat = NUM_RETRIES * cppc_ss->latency;
558 deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
Prakash, Prashanthf387e5b2016-02-17 13:21:03 -0700559 pcc_mrtt = cppc_ss->min_turnaround_time;
560 pcc_mpar = cppc_ss->max_access_rate;
Prakash, Prashanthbe8b88d2016-08-16 14:39:41 -0600561 pcc_nominal = cppc_ss->latency;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400562
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600563 pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400564 if (!pcc_comm_addr) {
565 pr_err("Failed to ioremap PCC comm region mem\n");
566 return -ENOMEM;
567 }
568
569 /* Set flag so that we dont come here for each CPU. */
570 pcc_channel_acquired = true;
571 }
572
573 return 0;
574}
575
576/*
577 * An example CPC table looks like the following.
578 *
579 * Name(_CPC, Package()
580 * {
581 * 17,
582 * NumEntries
583 * 1,
584 * // Revision
585 * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
586 * // Highest Performance
587 * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
588 * // Nominal Performance
589 * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
590 * // Lowest Nonlinear Performance
591 * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
592 * // Lowest Performance
593 * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
594 * // Guaranteed Performance Register
595 * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
596 * // Desired Performance Register
597 * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
598 * ..
599 * ..
600 * ..
601 *
602 * }
603 * Each Register() encodes how to access that specific register.
604 * e.g. a sample PCC entry has the following encoding:
605 *
606 * Register (
607 * PCC,
608 * AddressSpaceKeyword
609 * 8,
610 * //RegisterBitWidth
611 * 8,
612 * //RegisterBitOffset
613 * 0x30,
614 * //RegisterAddress
615 * 9
616 * //AccessSize (subspace ID)
617 * 0
618 * )
619 * }
620 */
621
622/**
623 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
624 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
625 *
626 * Return: 0 for success or negative value for err.
627 */
628int acpi_cppc_processor_probe(struct acpi_processor *pr)
629{
630 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
631 union acpi_object *out_obj, *cpc_obj;
632 struct cpc_desc *cpc_ptr;
633 struct cpc_reg *gas_t;
Ashwin Chaugule158c9982016-08-16 14:39:42 -0600634 struct device *cpu_dev;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400635 acpi_handle handle = pr->handle;
636 unsigned int num_ent, i, cpc_rev;
637 acpi_status status;
638 int ret = -EFAULT;
639
640 /* Parse the ACPI _CPC table for this cpu. */
641 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
642 ACPI_TYPE_PACKAGE);
643 if (ACPI_FAILURE(status)) {
644 ret = -ENODEV;
645 goto out_buf_free;
646 }
647
648 out_obj = (union acpi_object *) output.pointer;
649
650 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
651 if (!cpc_ptr) {
652 ret = -ENOMEM;
653 goto out_buf_free;
654 }
655
656 /* First entry is NumEntries. */
657 cpc_obj = &out_obj->package.elements[0];
658 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
659 num_ent = cpc_obj->integer.value;
660 } else {
661 pr_debug("Unexpected entry type(%d) for NumEntries\n",
662 cpc_obj->type);
663 goto out_free;
664 }
665
666 /* Only support CPPCv2. Bail otherwise. */
667 if (num_ent != CPPC_NUM_ENT) {
668 pr_debug("Firmware exports %d entries. Expected: %d\n",
669 num_ent, CPPC_NUM_ENT);
670 goto out_free;
671 }
672
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600673 cpc_ptr->num_entries = num_ent;
674
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400675 /* Second entry should be revision. */
676 cpc_obj = &out_obj->package.elements[1];
677 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
678 cpc_rev = cpc_obj->integer.value;
679 } else {
680 pr_debug("Unexpected entry type(%d) for Revision\n",
681 cpc_obj->type);
682 goto out_free;
683 }
684
685 if (cpc_rev != CPPC_REV) {
686 pr_debug("Firmware exports revision:%d. Expected:%d\n",
687 cpc_rev, CPPC_REV);
688 goto out_free;
689 }
690
691 /* Iterate through remaining entries in _CPC */
692 for (i = 2; i < num_ent; i++) {
693 cpc_obj = &out_obj->package.elements[i];
694
695 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
696 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
697 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
698 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
699 gas_t = (struct cpc_reg *)
700 cpc_obj->buffer.pointer;
701
702 /*
703 * The PCC Subspace index is encoded inside
704 * the CPC table entries. The same PCC index
705 * will be used for all the PCC entries,
706 * so extract it only once.
707 */
708 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
709 if (pcc_subspace_idx < 0)
710 pcc_subspace_idx = gas_t->access_width;
711 else if (pcc_subspace_idx != gas_t->access_width) {
712 pr_debug("Mismatched PCC ids.\n");
713 goto out_free;
714 }
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600715 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
716 if (gas_t->address) {
717 void __iomem *addr;
718
719 addr = ioremap(gas_t->address, gas_t->bit_width/8);
720 if (!addr)
721 goto out_free;
722 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
723 }
724 } else {
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400725 /* Support only PCC and SYS MEM type regs */
726 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
727 goto out_free;
728 }
729
730 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
731 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
732 } else {
733 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
734 goto out_free;
735 }
736 }
737 /* Store CPU Logical ID */
738 cpc_ptr->cpu_id = pr->id;
739
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400740 /* Parse PSD data for this CPU */
741 ret = acpi_get_psd(cpc_ptr, handle);
742 if (ret)
743 goto out_free;
744
745 /* Register PCC channel once for all CPUs. */
746 if (!pcc_channel_acquired) {
747 ret = register_pcc_channel(pcc_subspace_idx);
748 if (ret)
749 goto out_free;
750 }
751
Hoan Tran2324d152016-05-25 12:09:23 -0700752 /* Plug PSD data into this CPUs CPC descriptor. */
753 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
754
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400755 /* Everything looks okay */
756 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
757
Ashwin Chaugule158c9982016-08-16 14:39:42 -0600758 /* Add per logical CPU nodes for reading its feedback counters. */
759 cpu_dev = get_cpu_device(pr->id);
760 if (!cpu_dev)
761 goto out_free;
762
763 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
764 "acpi_cppc");
765 if (ret)
766 goto out_free;
767
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400768 kfree(output.pointer);
769 return 0;
770
771out_free:
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600772 /* Free all the mapped sys mem areas for this CPU */
773 for (i = 2; i < cpc_ptr->num_entries; i++) {
774 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
775
776 if (addr)
777 iounmap(addr);
778 }
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400779 kfree(cpc_ptr);
780
781out_buf_free:
782 kfree(output.pointer);
783 return ret;
784}
785EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
786
787/**
788 * acpi_cppc_processor_exit - Cleanup CPC structs.
789 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
790 *
791 * Return: Void
792 */
793void acpi_cppc_processor_exit(struct acpi_processor *pr)
794{
795 struct cpc_desc *cpc_ptr;
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600796 unsigned int i;
797 void __iomem *addr;
Ashwin Chaugule158c9982016-08-16 14:39:42 -0600798
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400799 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600800
801 /* Free all the mapped sys mem areas for this CPU */
802 for (i = 2; i < cpc_ptr->num_entries; i++) {
803 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
804 if (addr)
805 iounmap(addr);
806 }
807
Ashwin Chaugule158c9982016-08-16 14:39:42 -0600808 kobject_put(&cpc_ptr->kobj);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400809 kfree(cpc_ptr);
810}
811EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
812
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700813/*
814 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
815 * as fast as possible. We have already mapped the PCC subspace during init, so
816 * we can directly write to it.
817 */
818
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600819static int cpc_read(struct cpc_register_resource *reg_res, u64 *val)
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400820{
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700821 int ret_val = 0;
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600822 void __iomem *vaddr = 0;
823 struct cpc_reg *reg = &reg_res->cpc_entry.reg;
824
825 if (reg_res->type == ACPI_TYPE_INTEGER) {
826 *val = reg_res->cpc_entry.int_value;
827 return ret_val;
828 }
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700829
830 *val = 0;
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600831 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
832 vaddr = GET_PCC_VADDR(reg->address);
833 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
834 vaddr = reg_res->sys_mem_vaddr;
835 else
836 return acpi_os_read_memory((acpi_physical_address)reg->address,
837 val, reg->bit_width);
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700838
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600839 switch (reg->bit_width) {
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700840 case 8:
Prakash, Prashanthbeee23a2016-02-17 13:21:02 -0700841 *val = readb_relaxed(vaddr);
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700842 break;
843 case 16:
Prakash, Prashanthbeee23a2016-02-17 13:21:02 -0700844 *val = readw_relaxed(vaddr);
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700845 break;
846 case 32:
Prakash, Prashanthbeee23a2016-02-17 13:21:02 -0700847 *val = readl_relaxed(vaddr);
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700848 break;
849 case 64:
Prakash, Prashanthbeee23a2016-02-17 13:21:02 -0700850 *val = readq_relaxed(vaddr);
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700851 break;
852 default:
853 pr_debug("Error: Cannot read %u bit width from PCC\n",
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600854 reg->bit_width);
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700855 ret_val = -EFAULT;
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600856 }
857
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700858 return ret_val;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400859}
860
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600861static int cpc_write(struct cpc_register_resource *reg_res, u64 val)
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400862{
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700863 int ret_val = 0;
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600864 void __iomem *vaddr = 0;
865 struct cpc_reg *reg = &reg_res->cpc_entry.reg;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400866
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600867 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
868 vaddr = GET_PCC_VADDR(reg->address);
869 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
870 vaddr = reg_res->sys_mem_vaddr;
871 else
872 return acpi_os_write_memory((acpi_physical_address)reg->address,
873 val, reg->bit_width);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400874
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600875 switch (reg->bit_width) {
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700876 case 8:
Prakash, Prashanthbeee23a2016-02-17 13:21:02 -0700877 writeb_relaxed(val, vaddr);
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700878 break;
879 case 16:
Prakash, Prashanthbeee23a2016-02-17 13:21:02 -0700880 writew_relaxed(val, vaddr);
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700881 break;
882 case 32:
Prakash, Prashanthbeee23a2016-02-17 13:21:02 -0700883 writel_relaxed(val, vaddr);
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700884 break;
885 case 64:
Prakash, Prashanthbeee23a2016-02-17 13:21:02 -0700886 writeq_relaxed(val, vaddr);
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700887 break;
888 default:
889 pr_debug("Error: Cannot write %u bit width to PCC\n",
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600890 reg->bit_width);
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700891 ret_val = -EFAULT;
892 break;
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600893 }
894
Prakash, Prashanth77e3d862016-02-17 13:21:00 -0700895 return ret_val;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400896}
897
898/**
899 * cppc_get_perf_caps - Get a CPUs performance capabilities.
900 * @cpunum: CPU from which to get capabilities info.
901 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
902 *
903 * Return: 0 for success with perf_caps populated else -ERRNO.
904 */
905int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
906{
907 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
908 struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf,
909 *nom_perf;
Ashwin Chaugule158c9982016-08-16 14:39:42 -0600910 u64 high, low, nom;
Prakash, Prashanth850d64a2016-08-16 14:39:39 -0600911 int ret = 0, regs_in_pcc = 0;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400912
913 if (!cpc_desc) {
914 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
915 return -ENODEV;
916 }
917
918 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
919 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
920 ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF];
921 nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF];
922
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400923 /* Are any of the regs PCC ?*/
Prakash, Prashanth80b82862016-08-16 14:39:40 -0600924 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
925 CPC_IN_PCC(ref_perf) || CPC_IN_PCC(nom_perf)) {
Prakash, Prashanth850d64a2016-08-16 14:39:39 -0600926 regs_in_pcc = 1;
Prakash, Prashanth80b82862016-08-16 14:39:40 -0600927 down_write(&pcc_lock);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400928 /* Ring doorbell once to update PCC subspace */
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700929 if (send_pcc_cmd(CMD_READ) < 0) {
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400930 ret = -EIO;
931 goto out_err;
932 }
933 }
934
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600935 cpc_read(highest_reg, &high);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400936 perf_caps->highest_perf = high;
937
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600938 cpc_read(lowest_reg, &low);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400939 perf_caps->lowest_perf = low;
940
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600941 cpc_read(nom_perf, &nom);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400942 perf_caps->nominal_perf = nom;
943
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400944 if (!high || !low || !nom)
945 ret = -EFAULT;
946
947out_err:
Prakash, Prashanth850d64a2016-08-16 14:39:39 -0600948 if (regs_in_pcc)
Prakash, Prashanth80b82862016-08-16 14:39:40 -0600949 up_write(&pcc_lock);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400950 return ret;
951}
952EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
953
954/**
955 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
956 * @cpunum: CPU from which to read counters.
957 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
958 *
959 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
960 */
961int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
962{
963 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
Ashwin Chaugule158c9982016-08-16 14:39:42 -0600964 struct cpc_register_resource *delivered_reg, *reference_reg,
965 *ref_perf_reg, *ctr_wrap_reg;
966 u64 delivered, reference, ref_perf, ctr_wrap_time;
Prakash, Prashanth850d64a2016-08-16 14:39:39 -0600967 int ret = 0, regs_in_pcc = 0;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400968
969 if (!cpc_desc) {
970 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
971 return -ENODEV;
972 }
973
974 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
975 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
Ashwin Chaugule158c9982016-08-16 14:39:42 -0600976 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
977 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
978
979 /*
980 * If refernce perf register is not supported then we should
981 * use the nominal perf value
982 */
983 if (!CPC_SUPPORTED(ref_perf_reg))
984 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400985
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400986 /* Are any of the regs PCC ?*/
Ashwin Chaugule158c9982016-08-16 14:39:42 -0600987 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
988 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
Prakash, Prashanth80b82862016-08-16 14:39:40 -0600989 down_write(&pcc_lock);
Prakash, Prashanth850d64a2016-08-16 14:39:39 -0600990 regs_in_pcc = 1;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400991 /* Ring doorbell once to update PCC subspace */
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -0700992 if (send_pcc_cmd(CMD_READ) < 0) {
Ashwin Chaugule337aadf2015-10-02 10:01:19 -0400993 ret = -EIO;
994 goto out_err;
995 }
996 }
997
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -0600998 cpc_read(delivered_reg, &delivered);
999 cpc_read(reference_reg, &reference);
Ashwin Chaugule158c9982016-08-16 14:39:42 -06001000 cpc_read(ref_perf_reg, &ref_perf);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -04001001
Ashwin Chaugule158c9982016-08-16 14:39:42 -06001002 /*
1003 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1004 * performance counters are assumed to never wrap during the lifetime of
1005 * platform
1006 */
1007 ctr_wrap_time = (u64)(~((u64)0));
1008 if (CPC_SUPPORTED(ctr_wrap_reg))
1009 cpc_read(ctr_wrap_reg, &ctr_wrap_time);
1010
1011 if (!delivered || !reference || !ref_perf) {
Ashwin Chaugule337aadf2015-10-02 10:01:19 -04001012 ret = -EFAULT;
1013 goto out_err;
1014 }
1015
1016 perf_fb_ctrs->delivered = delivered;
1017 perf_fb_ctrs->reference = reference;
Ashwin Chaugule158c9982016-08-16 14:39:42 -06001018 perf_fb_ctrs->reference_perf = ref_perf;
1019 perf_fb_ctrs->ctr_wrap_time = ctr_wrap_time;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -04001020out_err:
Prakash, Prashanth850d64a2016-08-16 14:39:39 -06001021 if (regs_in_pcc)
Prakash, Prashanth80b82862016-08-16 14:39:40 -06001022 up_write(&pcc_lock);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -04001023 return ret;
1024}
1025EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1026
1027/**
1028 * cppc_set_perf - Set a CPUs performance controls.
1029 * @cpu: CPU for which to set performance controls.
1030 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1031 *
1032 * Return: 0 for success, -ERRNO otherwise.
1033 */
1034int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1035{
1036 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1037 struct cpc_register_resource *desired_reg;
1038 int ret = 0;
1039
1040 if (!cpc_desc) {
1041 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1042 return -ENODEV;
1043 }
1044
1045 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1046
Prakash, Prashanth80b82862016-08-16 14:39:40 -06001047 /*
1048 * This is Phase-I where we want to write to CPC registers
1049 * -> We want all CPUs to be able to execute this phase in parallel
1050 *
1051 * Since read_lock can be acquired by multiple CPUs simultaneously we
1052 * achieve that goal here
1053 */
1054 if (CPC_IN_PCC(desired_reg)) {
1055 down_read(&pcc_lock); /* BEGIN Phase-I */
1056 /*
1057 * If there are pending write commands i.e pending_pcc_write_cmd
1058 * is TRUE, then we know OSPM owns the channel as another CPU
1059 * has already checked for command completion bit and updated
1060 * the corresponding CPC registers
1061 */
1062 if (!pending_pcc_write_cmd) {
1063 ret = check_pcc_chan();
1064 if (ret) {
1065 up_read(&pcc_lock);
1066 return ret;
1067 }
1068 /*
1069 * Update the pending_write to make sure a PCC CMD_READ
1070 * will not arrive and steal the channel during the
1071 * transition to write lock
1072 */
1073 pending_pcc_write_cmd = TRUE;
1074 }
1075 cpc_desc->write_cmd_id = pcc_write_cnt;
1076 cpc_desc->write_cmd_status = 0;
Ashwin Chaugulead62e1e62016-02-17 13:20:59 -07001077 }
1078
Ashwin Chaugule337aadf2015-10-02 10:01:19 -04001079 /*
1080 * Skip writing MIN/MAX until Linux knows how to come up with
1081 * useful values.
1082 */
Ashwin Chaugule5bbb86a2016-08-16 14:39:38 -06001083 cpc_write(desired_reg, perf_ctrls->desired_perf);
Ashwin Chaugule337aadf2015-10-02 10:01:19 -04001084
Prakash, Prashanth80b82862016-08-16 14:39:40 -06001085 if (CPC_IN_PCC(desired_reg))
1086 up_read(&pcc_lock); /* END Phase-I */
1087 /*
1088 * This is Phase-II where we transfer the ownership of PCC to Platform
1089 *
1090 * Short Summary: Basically if we think of a group of cppc_set_perf
1091 * requests that happened in short overlapping interval. The last CPU to
1092 * come out of Phase-I will enter Phase-II and ring the doorbell.
1093 *
1094 * We have the following requirements for Phase-II:
1095 * 1. We want to execute Phase-II only when there are no CPUs
1096 * currently executing in Phase-I
1097 * 2. Once we start Phase-II we want to avoid all other CPUs from
1098 * entering Phase-I.
1099 * 3. We want only one CPU among all those who went through Phase-I
1100 * to run phase-II
1101 *
1102 * If write_trylock fails to get the lock and doesn't transfer the
1103 * PCC ownership to the platform, then one of the following will be TRUE
1104 * 1. There is at-least one CPU in Phase-I which will later execute
1105 * write_trylock, so the CPUs in Phase-I will be responsible for
1106 * executing the Phase-II.
1107 * 2. Some other CPU has beaten this CPU to successfully execute the
1108 * write_trylock and has already acquired the write_lock. We know for a
1109 * fact it(other CPU acquiring the write_lock) couldn't have happened
1110 * before this CPU's Phase-I as we held the read_lock.
1111 * 3. Some other CPU executing pcc CMD_READ has stolen the
1112 * down_write, in which case, send_pcc_cmd will check for pending
1113 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1114 * So this CPU can be certain that its request will be delivered
1115 * So in all cases, this CPU knows that its request will be delivered
1116 * by another CPU and can return
1117 *
1118 * After getting the down_write we still need to check for
1119 * pending_pcc_write_cmd to take care of the following scenario
1120 * The thread running this code could be scheduled out between
1121 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1122 * could have delivered the request to Platform by triggering the
1123 * doorbell and transferred the ownership of PCC to platform. So this
1124 * avoids triggering an unnecessary doorbell and more importantly before
1125 * triggering the doorbell it makes sure that the PCC channel ownership
1126 * is still with OSPM.
1127 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1128 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1129 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1130 * case during a CMD_READ and if there are pending writes it delivers
1131 * the write command before servicing the read command
1132 */
1133 if (CPC_IN_PCC(desired_reg)) {
1134 if (down_write_trylock(&pcc_lock)) { /* BEGIN Phase-II */
1135 /* Update only if there are pending write commands */
1136 if (pending_pcc_write_cmd)
1137 send_pcc_cmd(CMD_WRITE);
1138 up_write(&pcc_lock); /* END Phase-II */
1139 } else
1140 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1141 wait_event(pcc_write_wait_q,
1142 cpc_desc->write_cmd_id != pcc_write_cnt);
1143
1144 /* send_pcc_cmd updates the status in case of failure */
1145 ret = cpc_desc->write_cmd_status;
Ashwin Chaugule337aadf2015-10-02 10:01:19 -04001146 }
Ashwin Chaugule337aadf2015-10-02 10:01:19 -04001147 return ret;
1148}
1149EXPORT_SYMBOL_GPL(cppc_set_perf);
Prakash, Prashanthbe8b88d2016-08-16 14:39:41 -06001150
1151/**
1152 * cppc_get_transition_latency - returns frequency transition latency in ns
1153 *
1154 * ACPI CPPC does not explicitly specifiy how a platform can specify the
1155 * transition latency for perfromance change requests. The closest we have
1156 * is the timing information from the PCCT tables which provides the info
1157 * on the number and frequency of PCC commands the platform can handle.
1158 */
1159unsigned int cppc_get_transition_latency(int cpu_num)
1160{
1161 /*
1162 * Expected transition latency is based on the PCCT timing values
1163 * Below are definition from ACPI spec:
1164 * pcc_nominal- Expected latency to process a command, in microseconds
1165 * pcc_mpar - The maximum number of periodic requests that the subspace
1166 * channel can support, reported in commands per minute. 0
1167 * indicates no limitation.
1168 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1169 * completion of a command before issuing the next command,
1170 * in microseconds.
1171 */
1172 unsigned int latency_ns = 0;
1173 struct cpc_desc *cpc_desc;
1174 struct cpc_register_resource *desired_reg;
1175
1176 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1177 if (!cpc_desc)
1178 return CPUFREQ_ETERNAL;
1179
1180 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1181 if (!CPC_IN_PCC(desired_reg))
1182 return CPUFREQ_ETERNAL;
1183
1184 if (pcc_mpar)
1185 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_mpar);
1186
1187 latency_ns = max(latency_ns, (pcc_nominal + pcc_mrtt) * 1000);
1188
1189 return latency_ns;
1190}
1191EXPORT_SYMBOL_GPL(cppc_get_transition_latency);