blob: 3a5b489ae892f734fb034c1cffb22c259733a91c [file] [log] [blame]
Kyle Pieferb1027b02017-02-10 13:58:58 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include "kgsl_device.h"
15#include "kgsl_gmu.h"
16#include "adreno.h"
17#include "kgsl_trace.h"
18
19/* Size in below functions are in unit of dwords */
20static int hfi_msgq_read(struct gmu_device *gmu,
21 enum hfi_queue_type queue_idx, void *msg,
22 unsigned int max_size)
23{
24 struct gmu_memdesc *mem_addr = gmu->hfi_mem;
25 struct hfi_queue_table *tbl = mem_addr->hostptr;
26 struct hfi_queue_header *hdr = &tbl->qhdr[queue_idx];
27 uint32_t *queue = HOST_QUEUE_START_ADDR(mem_addr, queue_idx);
28 uint32_t *output = msg;
29 struct hfi_msg_hdr *msg_hdr;
30 int i, read, result = 0;
31
32 if (hdr->read_index == hdr->write_index) {
33 hdr->rx_req = 1;
34 return -ENODATA;
35 }
36
37 msg_hdr = (struct hfi_msg_hdr *)&queue[hdr->read_index];
38
39 if (msg_hdr->size > max_size) {
40 dev_err(&gmu->pdev->dev,
41 "Received invalid msg: size=%d dwords, rd idx=%d, id=%d\n",
42 msg_hdr->size, hdr->read_index, msg_hdr->id);
43 return -EMSGSIZE;
44 }
45
46 read = hdr->read_index;
47
48 if (read < hdr->queue_size) {
49 for (i = 0; i < msg_hdr->size; i++) {
50 output[i] = queue[read];
51 read = (read + 1)%hdr->queue_size;
52 }
53 result = msg_hdr->size;
54 } else {
55 /* In case FW messed up */
56 dev_err(&gmu->pdev->dev,
57 "Read index %d greater than queue size %d\n",
58 hdr->read_index, hdr->queue_size);
59 result = -ENODATA;
60 }
61 hdr->read_index = read;
62
63 return result;
64}
65
66/* Size in below functions are in unit of dwords */
67static int hfi_cmdq_write(struct gmu_device *gmu,
68 enum hfi_queue_type queue_idx,
69 struct hfi_msg_hdr *msg)
70{
71 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
72 struct hfi_queue_table *tbl = gmu->hfi_mem->hostptr;
73 struct hfi_queue_header *hdr = &tbl->qhdr[queue_idx];
74 uint32_t *queue = HOST_QUEUE_START_ADDR(gmu->hfi_mem, queue_idx);
75 uint32_t *input = (uint32_t *) msg;
76 struct kgsl_hfi *hfi = &gmu->hfi;
77 uint32_t i, write, empty_space;
78
79 if (msg->size > HFI_MAX_MSG_SIZE) {
80 dev_err(&gmu->pdev->dev,
81 "Message too big to send: sz=%d, id=%d\n",
82 msg->size, msg->id);
83 return -EINVAL;
84 }
85
86 trace_kgsl_hfi_send(msg->id, msg->size, msg->seqnum);
87
88 mutex_lock(&hfi->cmdq_mutex);
89
90 empty_space = (hdr->write_index >= hdr->read_index) ?
91 (hdr->queue_size - (hdr->write_index - hdr->read_index))
92 : (hdr->read_index - hdr->write_index);
93
94 if (empty_space < msg->size) {
95 dev_err(&gmu->pdev->dev,
96 "Insufficient bufsize %d for msg id=%d of size %d\n",
97 empty_space, msg->id, msg->size);
98
99 hdr->drop_cnt++;
100 mutex_unlock(&hfi->cmdq_mutex);
101 return -ENOSPC;
102 }
103
104 write = hdr->write_index;
105
106 for (i = 0; i < msg->size; i++) {
107 queue[write] = input[i];
108 write = (write + 1) % hdr->queue_size;
109 }
110
111 hdr->write_index = write;
112
113 mutex_unlock(&hfi->cmdq_mutex);
114
115 /*
116 * Memory barrier to make sure packet and write index are written before
117 * an interrupt is raised
118 */
119 wmb();
120
121 /* Send interrupt to GMU to receive the message */
122 adreno_write_gmureg(ADRENO_DEVICE(device),
123 ADRENO_REG_GMU_HOST2GMU_INTR_SET, 0x1);
124
125 return msg->size;
126}
127
128#define QUEUE_HDR_TYPE(id, prio, rtype, stype) \
129 (((id) & 0xFF) | (((prio) & 0xFF) << 8) | \
130 (((rtype) & 0xFF) << 16) | (((stype) & 0xFF) << 24))
131
132
133/* Sizes of the queue and message are in unit of dwords */
134void hfi_init(struct kgsl_hfi *hfi, struct gmu_memdesc *mem_addr,
135 uint32_t queue_sz_bytes)
136{
137 int i;
138 struct hfi_queue_table *tbl;
139 struct hfi_queue_header *hdr;
140 int queue_prio[HFI_QUEUE_MAX] = {
141 HFI_H2F_QPRI_CMD,
142 HFI_F2H_QPRI_MSG,
143 HFI_F2H_QPRI_DEBUG
144 };
145 int queue_ids[HFI_QUEUE_MAX] = {0, 4, 5};
146
147 /* Fill Table Header */
148 tbl = mem_addr->hostptr;
149 tbl->qtbl_hdr.version = 0;
150 tbl->qtbl_hdr.size = sizeof(struct hfi_queue_table) >> 2;
151 tbl->qtbl_hdr.qhdr0_offset = sizeof(struct hfi_queue_table_header) >> 2;
152 tbl->qtbl_hdr.qhdr_size = sizeof(struct hfi_queue_header) >> 2;
153 tbl->qtbl_hdr.num_q = HFI_QUEUE_MAX;
154 tbl->qtbl_hdr.num_active_q = HFI_QUEUE_MAX;
155
156 /* Fill I dividual Queue Headers */
157 for (i = 0; i < HFI_QUEUE_MAX; i++) {
158 hdr = &tbl->qhdr[i];
159 hdr->start_addr = GMU_QUEUE_START_ADDR(mem_addr, i);
160 hdr->type = QUEUE_HDR_TYPE(queue_ids[i], queue_prio[i], 0, 0);
161 hdr->status = 0x1;
162 hdr->queue_size = queue_sz_bytes >> 2; /* convert to dwords */
163 hdr->msg_size = 0;
164 hdr->drop_cnt = 0;
165 hdr->rx_wm = 0x1;
166 hdr->tx_wm = 0x1;
167 hdr->rx_req = 0x1;
168 hdr->tx_req = 0x0;
169 hdr->read_index = 0x0;
170 hdr->write_index = 0x0;
171 }
172
173 mutex_init(&hfi->cmdq_mutex);
174}
175
176static void receive_ack_msg(struct gmu_device *gmu, struct hfi_msg_rsp *rsp)
177{
178 struct kgsl_hfi *hfi = &gmu->hfi;
179 struct pending_msg *msg = NULL, *next;
George Shenea65e0e2017-06-29 11:16:35 -0700180 bool in_queue = false;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800181
182 trace_kgsl_hfi_receive(rsp->ret_hdr.id,
183 rsp->ret_hdr.size,
184 rsp->ret_hdr.seqnum);
185
Kyle Piefer2aad8882017-11-20 16:06:57 -0800186 spin_lock_bh(&hfi->msglock);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800187 list_for_each_entry_safe(msg, next, &hfi->msglist, node) {
188 if (msg->msg_id == rsp->ret_hdr.id &&
George Shenea65e0e2017-06-29 11:16:35 -0700189 msg->seqnum == rsp->ret_hdr.seqnum) {
190 in_queue = true;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800191 break;
George Shenea65e0e2017-06-29 11:16:35 -0700192 }
Kyle Pieferb1027b02017-02-10 13:58:58 -0800193 }
Kyle Pieferb1027b02017-02-10 13:58:58 -0800194
George Shenea65e0e2017-06-29 11:16:35 -0700195 if (in_queue == false) {
Kyle Piefer2aad8882017-11-20 16:06:57 -0800196 spin_unlock_bh(&hfi->msglock);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800197 dev_err(&gmu->pdev->dev,
198 "Cannot find receiver of ack msg with id=%d\n",
199 rsp->ret_hdr.id);
200 return;
201 }
202
203 memcpy(&msg->results, (void *) rsp, rsp->hdr.size << 2);
204 complete(&msg->msg_complete);
Kyle Piefer2aad8882017-11-20 16:06:57 -0800205 spin_unlock_bh(&hfi->msglock);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800206}
207
208static void receive_err_msg(struct gmu_device *gmu, struct hfi_msg_rsp *rsp)
209{
210 struct hfi_fw_err_msg *err = (struct hfi_fw_err_msg *) rsp;
211
212 dev_err(&gmu->pdev->dev, "FW error with error code %d\n",
213 err->error_code);
214}
215
216static int hfi_send_msg(struct gmu_device *gmu, struct hfi_msg_hdr *msg,
217 unsigned int size, struct pending_msg *ret_msg)
218{
219 int rc = 0;
220 struct kgsl_hfi *hfi = &gmu->hfi;
221
222 msg->seqnum = atomic_inc_return(&hfi->seqnum);
223 if (msg->type != HFI_MSG_CMD) {
224 if (hfi_cmdq_write(gmu, HFI_CMD_QUEUE, msg) != size)
225 rc = -EINVAL;
226 return rc;
227 }
228
229 /* For messages of type HFI_MSG_CMD we must handle the ack */
230 init_completion(&ret_msg->msg_complete);
231 ret_msg->msg_id = msg->id;
232 ret_msg->seqnum = msg->seqnum;
233
Kyle Piefer2aad8882017-11-20 16:06:57 -0800234 spin_lock_bh(&hfi->msglock);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800235 list_add_tail(&ret_msg->node, &hfi->msglist);
Kyle Piefer2aad8882017-11-20 16:06:57 -0800236 spin_unlock_bh(&hfi->msglock);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800237
238 if (hfi_cmdq_write(gmu, HFI_CMD_QUEUE, msg) != size) {
239 rc = -EINVAL;
Kyle Piefere1149412017-06-16 17:46:24 -0700240 goto done;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800241 }
242
243 rc = wait_for_completion_timeout(
244 &ret_msg->msg_complete,
245 msecs_to_jiffies(HFI_RSP_TIMEOUT));
246 if (!rc) {
247 dev_err(&gmu->pdev->dev,
248 "Receiving GMU ack %d timed out\n", msg->id);
249 rc = -ETIMEDOUT;
Kyle Piefere1149412017-06-16 17:46:24 -0700250 goto done;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800251 }
252
Kyle Piefere1149412017-06-16 17:46:24 -0700253 /* If we got here we succeeded */
254 rc = 0;
255done:
Kyle Piefer2aad8882017-11-20 16:06:57 -0800256 spin_lock_bh(&hfi->msglock);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800257 list_del(&ret_msg->node);
Kyle Piefer2aad8882017-11-20 16:06:57 -0800258 spin_unlock_bh(&hfi->msglock);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800259 return rc;
260}
261
262int hfi_send_gmu_init(struct gmu_device *gmu, uint32_t boot_state)
263{
264 struct hfi_gmu_init_cmd init_msg = {
265 .hdr = {
266 .id = H2F_MSG_INIT,
267 .size = sizeof(init_msg) >> 2,
268 .type = HFI_MSG_CMD,
269 },
270 .seg_id = 0,
271 .dbg_buffer_addr = (unsigned int) gmu->dump_mem->gmuaddr,
272 .dbg_buffer_size = (unsigned int) gmu->dump_mem->size,
273 .boot_state = boot_state,
274 };
275
276 struct hfi_msg_rsp *rsp;
277 uint32_t msg_size_dwords = (sizeof(init_msg)) >> 2;
278 int rc = 0;
279 struct pending_msg msg;
280
Archana Sriram535d0e92017-09-26 18:44:07 +0530281 rc = hfi_send_msg(gmu, &init_msg.hdr, msg_size_dwords, &msg);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800282 if (rc)
283 return rc;
284
285 rsp = (struct hfi_msg_rsp *) &msg.results;
286 rc = rsp->error;
287 if (!rc)
288 gmu->hfi.gmu_init_done = true;
289 else
290 dev_err(&gmu->pdev->dev,
291 "gmu init message failed with error=%d\n", rc);
292 return rc;
293}
294
295int hfi_get_fw_version(struct gmu_device *gmu,
296 uint32_t expected_ver, uint32_t *ver)
297{
298 struct hfi_fw_version_cmd fw_ver = {
299 .hdr = {
300 .id = H2F_MSG_FW_VER,
301 .size = sizeof(fw_ver) >> 2,
302 .type = HFI_MSG_CMD
303 },
304 .supported_ver = expected_ver,
305 };
306 struct hfi_msg_rsp *rsp;
307 uint32_t msg_size_dwords = (sizeof(fw_ver)) >> 2;
308 int rc = 0;
309 struct pending_msg msg;
310
Archana Sriram535d0e92017-09-26 18:44:07 +0530311 rc = hfi_send_msg(gmu, &fw_ver.hdr, msg_size_dwords, &msg);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800312 if (rc)
313 return rc;
314
315 rsp = (struct hfi_msg_rsp *) &msg.results;
316 rc = rsp->error;
317 if (!rc)
318 *ver = rsp->payload[0];
319 else
320 dev_err(&gmu->pdev->dev,
321 "gmu get fw ver failed with error=%d\n", rc);
322 return rc;
323}
324
325int hfi_send_lmconfig(struct gmu_device *gmu)
326{
327 struct hfi_lmconfig_cmd lmconfig = {
328 .hdr = {
329 .id = H2F_MSG_LM_CFG,
330 .size = sizeof(lmconfig) >> 2,
331 .type = HFI_MSG_CMD
332 },
333 .limit_conf = gmu->lm_config,
334 .bcl_conf.bcl = gmu->bcl_config
335 };
336 struct hfi_msg_rsp *rsp;
337 uint32_t msg_size_dwords = (sizeof(lmconfig)) >> 2;
338 int rc = 0;
339 struct pending_msg msg;
340
341 if (gmu->lm_dcvs_level > MAX_GX_LEVELS)
342 lmconfig.lm_enable_bitmask = 0;
343 else
344 lmconfig.lm_enable_bitmask =
345 (1 << (gmu->lm_dcvs_level + 1)) - 1;
346
Archana Sriram535d0e92017-09-26 18:44:07 +0530347 rc = hfi_send_msg(gmu, &lmconfig.hdr, msg_size_dwords, &msg);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800348 if (rc)
349 return rc;
350
351 rsp = (struct hfi_msg_rsp *) &msg.results;
352 rc = rsp->error;
353 if (rc)
354 dev_err(&gmu->pdev->dev,
355 "gmu send lmconfig failed with error=%d\n", rc);
356 return rc;
357}
358
359int hfi_send_perftbl(struct gmu_device *gmu)
360{
361 struct hfi_dcvstable_cmd dcvstbl = {
362 .hdr = {
363 .id = H2F_MSG_PERF_TBL,
364 .size = sizeof(dcvstbl) >> 2,
365 .type = HFI_MSG_CMD
366 },
367 };
368 struct hfi_msg_rsp *rsp;
369 struct pending_msg msg;
370 uint32_t msg_size = (sizeof(dcvstbl)) >> 2;
371 int i, rc = 0;
372
373 dcvstbl.gpu_level_num = gmu->num_gpupwrlevels;
374 dcvstbl.gmu_level_num = gmu->num_gmupwrlevels;
375
376 for (i = 0; i < gmu->num_gpupwrlevels; i++) {
377 dcvstbl.gx_votes[i].vote = gmu->rpmh_votes.gx_votes[i];
378 /* Divide by 1000 to convert to kHz */
379 dcvstbl.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
380 }
381
382 for (i = 0; i < gmu->num_gmupwrlevels; i++) {
383 dcvstbl.cx_votes[i].vote = gmu->rpmh_votes.cx_votes[i];
384 dcvstbl.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
385
386 }
387
Archana Sriram535d0e92017-09-26 18:44:07 +0530388 rc = hfi_send_msg(gmu, &dcvstbl.hdr, msg_size, &msg);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800389 if (rc)
390 return rc;
391
392 rsp = (struct hfi_msg_rsp *)&msg.results;
393 rc = rsp->error;
394 if (rc)
395 dev_err(&gmu->pdev->dev,
396 "gmu send perf table failed with error=%d\n", rc);
397 return rc;
398}
399
400int hfi_send_bwtbl(struct gmu_device *gmu)
401{
402 struct hfi_bwtable_cmd bwtbl = {
403 .hdr = {
404 .id = H2F_MSG_BW_VOTE_TBL,
405 .size = sizeof(bwtbl) >> 2,
406 .type = HFI_MSG_CMD,
407 },
408 .bw_level_num = gmu->num_bwlevels,
409 .cnoc_cmds_num =
410 gmu->rpmh_votes.cnoc_votes.cmds_per_bw_vote,
411 .cnoc_wait_bitmask =
412 gmu->rpmh_votes.cnoc_votes.cmds_wait_bitmask,
413 .ddr_cmds_num = gmu->rpmh_votes.ddr_votes.cmds_per_bw_vote,
414 .ddr_wait_bitmask = gmu->rpmh_votes.ddr_votes.cmds_wait_bitmask,
415 };
416 struct hfi_msg_rsp *rsp;
417 struct pending_msg msg;
418 uint32_t msg_size_dwords = (sizeof(bwtbl)) >> 2;
419 int i, j, rc = 0;
420
421 for (i = 0; i < bwtbl.ddr_cmds_num; i++)
422 bwtbl.ddr_cmd_addrs[i] = gmu->rpmh_votes.ddr_votes.cmd_addrs[i];
423
424 for (i = 0; i < bwtbl.bw_level_num; i++)
425 for (j = 0; j < bwtbl.ddr_cmds_num; j++)
426 bwtbl.ddr_cmd_data[i][j] =
427 gmu->rpmh_votes.
428 ddr_votes.cmd_data[i][j];
429
430 for (i = 0; i < bwtbl.cnoc_cmds_num; i++)
431 bwtbl.cnoc_cmd_addrs[i] =
432 gmu->rpmh_votes.cnoc_votes.cmd_addrs[i];
433
434 for (i = 0; i < MAX_CNOC_LEVELS; i++)
435 for (j = 0; j < bwtbl.cnoc_cmds_num; j++)
436 bwtbl.cnoc_cmd_data[i][j] =
437 gmu->rpmh_votes.cnoc_votes.
438 cmd_data[i][j];
439
Archana Sriram535d0e92017-09-26 18:44:07 +0530440 rc = hfi_send_msg(gmu, &bwtbl.hdr, msg_size_dwords, &msg);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800441 if (rc)
442 return rc;
443
444 rsp = (struct hfi_msg_rsp *) &msg.results;
445 rc = rsp->error;
446 if (rc)
447 dev_err(&gmu->pdev->dev,
448 "gmu send bw table failed with error=%d\n", rc);
449 return rc;
450}
451
Kyle Piefer950f5922017-07-11 15:11:28 -0700452static int hfi_send_test(struct gmu_device *gmu)
453{
454 struct hfi_test_cmd test_msg = {
455 .hdr = {
456 .id = H2F_MSG_TEST,
457 .size = sizeof(test_msg) >> 2,
458 .type = HFI_MSG_CMD,
459 },
460 };
461 uint32_t msg_size_dwords = (sizeof(test_msg)) >> 2;
462 struct pending_msg msg;
463
464 return hfi_send_msg(gmu, (struct hfi_msg_hdr *)&test_msg.hdr,
465 msg_size_dwords, &msg);
466}
467
Kyle Pieferb1027b02017-02-10 13:58:58 -0800468int hfi_send_dcvs_vote(struct gmu_device *gmu, uint32_t perf_idx,
469 uint32_t bw_idx, enum rpm_ack_type ack_type)
470{
471 struct hfi_dcvs_cmd dcvs_cmd = {
472 .hdr = {
473 .id = H2F_MSG_DCVS_VOTE,
474 .size = sizeof(dcvs_cmd) >> 2,
475 .type = HFI_MSG_CMD,
476 },
477 .ack_type = ack_type,
478 .freq = {
479 .perf_idx = perf_idx,
480 .clkset_opt = OPTION_AT_LEAST,
481 },
482 .bw = {
483 .bw_idx = bw_idx,
484 },
485
486 };
487 struct hfi_msg_rsp *rsp;
488 uint32_t msg_size_dwords = (sizeof(dcvs_cmd)) >> 2;
489 int rc = 0;
490 struct pending_msg msg;
491
Archana Sriram535d0e92017-09-26 18:44:07 +0530492 rc = hfi_send_msg(gmu, &dcvs_cmd.hdr, msg_size_dwords, &msg);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800493 if (rc)
494 return rc;
495
496 rsp = (struct hfi_msg_rsp *)&msg.results;
497 rc = rsp->error;
498 if (rc)
499 dev_err(&gmu->pdev->dev,
500 "gmu send dcvs cmd failed with error=%d\n", rc);
501 return rc;
502}
503
504int hfi_notify_slumber(struct gmu_device *gmu,
505 uint32_t init_perf_idx, uint32_t init_bw_idx)
506{
507 struct hfi_prep_slumber_cmd slumber_cmd = {
508 .hdr = {
509 .id = H2F_MSG_PREPARE_SLUMBER,
510 .size = sizeof(slumber_cmd) >> 2,
511 .type = HFI_MSG_CMD,
512 },
513 .init_bw_idx = init_bw_idx,
514 .init_perf_idx = init_perf_idx,
515 };
516 struct hfi_msg_rsp *rsp;
517 uint32_t msg_size_dwords = (sizeof(slumber_cmd)) >> 2;
518 int rc = 0;
519 struct pending_msg msg;
520
521 if (init_perf_idx >= MAX_GX_LEVELS || init_bw_idx >= MAX_GX_LEVELS)
522 return -EINVAL;
523
Archana Sriram535d0e92017-09-26 18:44:07 +0530524 rc = hfi_send_msg(gmu, &slumber_cmd.hdr, msg_size_dwords, &msg);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800525 if (rc)
526 return rc;
527
528 rsp = (struct hfi_msg_rsp *) &msg.results;
529 rc = rsp->error;
530 if (rc)
531 dev_err(&gmu->pdev->dev,
532 "gmu send slumber notification failed with error=%d\n",
533 rc);
534 return rc;
535}
536
537void hfi_receiver(unsigned long data)
538{
539 struct gmu_device *gmu;
540 struct hfi_msg_rsp response;
541
542 if (!data)
543 return;
544
545 gmu = (struct gmu_device *)data;
546
547 while (hfi_msgq_read(gmu, HFI_MSG_QUEUE,
548 &response, sizeof(response)) > 0) {
549 if (response.hdr.size > (sizeof(response) >> 2)) {
550 dev_err(&gmu->pdev->dev,
551 "Ack is too large, id=%d, size=%d\n",
552 response.ret_hdr.id,
553 response.hdr.size);
554 continue;
555 }
556
557 switch (response.hdr.id) {
558 case F2H_MSG_ACK:
559 receive_ack_msg(gmu, &response);
560 break;
561 case F2H_MSG_ERR:
562 receive_err_msg(gmu, &response);
563 break;
564 default:
565 dev_err(&gmu->pdev->dev,
566 "Invalid packet with id %d\n", response.hdr.id);
567 break;
568 }
569 };
570}
571
572int hfi_start(struct gmu_device *gmu, uint32_t boot_state)
573{
574 struct kgsl_device *device =
575 container_of(gmu, struct kgsl_device, gmu);
576 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
577 struct device *dev = &gmu->pdev->dev;
578 int result;
579 unsigned int ver = 0, major, minor;
580
581 if (test_bit(GMU_HFI_ON, &gmu->flags))
582 return 0;
583
584 result = hfi_send_gmu_init(gmu, boot_state);
585 if (result)
586 return result;
587
Kyle Piefer0a8e6dd2017-05-17 13:40:05 -0700588 major = adreno_dev->gpucore->gpmu_major;
589 minor = adreno_dev->gpucore->gpmu_minor;
590 result = hfi_get_fw_version(gmu,
591 FW_VERSION(major, minor), &ver);
592 if (result)
593 dev_err(dev, "Failed to get FW version via HFI\n");
Kyle Pieferb1027b02017-02-10 13:58:58 -0800594
Kyle Piefer0a8e6dd2017-05-17 13:40:05 -0700595 gmu->ver = ver;
596 if (major != FW_VER_MAJOR(ver))
George Shen7b121082017-11-01 11:43:10 -0700597 WARN_ONCE(1, "FW version major %d error (expect %d)\n",
Kyle Piefer0a8e6dd2017-05-17 13:40:05 -0700598 FW_VER_MAJOR(ver),
599 adreno_dev->gpucore->gpmu_major);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800600
Kyle Piefer0a8e6dd2017-05-17 13:40:05 -0700601 if (minor > FW_VER_MINOR(ver))
George Shen7b121082017-11-01 11:43:10 -0700602 WARN_ONCE(1, "FW version minor %d error (expect %d)\n",
Kyle Piefer0a8e6dd2017-05-17 13:40:05 -0700603 FW_VER_MINOR(ver),
604 adreno_dev->gpucore->gpmu_minor);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800605
Kyle Piefer0a8e6dd2017-05-17 13:40:05 -0700606 result = hfi_send_perftbl(gmu);
607 if (result)
608 return result;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800609
Kyle Piefer0a8e6dd2017-05-17 13:40:05 -0700610 result = hfi_send_bwtbl(gmu);
611 if (result)
612 return result;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800613
Oleg Pereletc2ab7f72017-06-22 16:45:57 -0700614 if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
615 test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
616 gmu->lm_config.lm_type = 1;
617 gmu->lm_config.lm_sensor_type = 1;
618 gmu->lm_config.throttle_config = 1;
619 gmu->lm_config.idle_throttle_en = 0;
620 gmu->lm_config.acd_en = 0;
621 gmu->bcl_config = 0;
622 gmu->lm_dcvs_level = 0;
623
624 result = hfi_send_lmconfig(gmu);
625 if (result) {
Kyle Piefer950f5922017-07-11 15:11:28 -0700626 dev_err(dev, "Failure enabling LM (%d)\n",
627 result);
Oleg Pereletc2ab7f72017-06-22 16:45:57 -0700628 return result;
629 }
630 }
Kyle Pieferb1027b02017-02-10 13:58:58 -0800631
Kyle Piefer950f5922017-07-11 15:11:28 -0700632 /* Tell the GMU we are sending no more HFIs until the next boot */
633 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
634 result = hfi_send_test(gmu);
635 if (result)
636 return result;
637 }
638
Kyle Pieferb1027b02017-02-10 13:58:58 -0800639 set_bit(GMU_HFI_ON, &gmu->flags);
640 return 0;
641}
642
643void hfi_stop(struct gmu_device *gmu)
644{
645 struct gmu_memdesc *mem_addr = gmu->hfi_mem;
646 struct hfi_queue_table *tbl = mem_addr->hostptr;
647 struct hfi_queue_header *hdr;
648 unsigned int i;
649
650
651 if (!test_bit(GMU_HFI_ON, &gmu->flags))
652 return;
653
654 /* Flush HFI queues */
655 for (i = 0; i < HFI_QUEUE_MAX; i++) {
656 hdr = &tbl->qhdr[i];
657
658 if (hdr->read_index != hdr->write_index)
659 dev_err(&gmu->pdev->dev,
660 "HFI queue at idx %d is not empty before close: rd=%d,wt=%d",
661 i, hdr->read_index, hdr->write_index);
662
663 hdr->read_index = 0x0;
664 hdr->write_index = 0x0;
665 }
666
667 clear_bit(GMU_HFI_ON, &gmu->flags);
668}