blob: 2d388bf5742b3ace8b5c4855420b7dbc5dc16060 [file] [log] [blame]
Bryan Huntsman9a702c52019-01-11 16:49:48 -08001// SPDX-License-Identifier: GPL-2.0-only
Urvashi Agrawal0a883122018-10-08 13:58:31 -07002/*
Carter Cooper08e9a902019-01-11 10:52:58 -07003 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
Urvashi Agrawal0a883122018-10-08 13:58:31 -07004 */
5
6#include "kgsl_device.h"
7#include "kgsl_hfi.h"
8#include "kgsl_gmu.h"
9#include "adreno.h"
10#include "kgsl_trace.h"
Carter Cooper7ebf9c32018-09-17 16:11:47 -060011#include "kgsl_pwrctrl.h"
Urvashi Agrawal0a883122018-10-08 13:58:31 -070012
13#define HFI_QUEUE_OFFSET(i) \
14 (ALIGN(sizeof(struct hfi_queue_table), SZ_16) + \
15 ((i) * HFI_QUEUE_SIZE))
16
17#define HOST_QUEUE_START_ADDR(hfi_mem, i) \
18 ((hfi_mem)->hostptr + HFI_QUEUE_OFFSET(i))
19
20#define GMU_QUEUE_START_ADDR(hfi_mem, i) \
21 ((hfi_mem)->gmuaddr + HFI_QUEUE_OFFSET(i))
22
23#define MSG_HDR_GET_ID(hdr) ((hdr) & 0xFF)
24#define MSG_HDR_GET_SIZE(hdr) (((hdr) >> 8) & 0xFF)
25#define MSG_HDR_GET_TYPE(hdr) (((hdr) >> 16) & 0xF)
26#define MSG_HDR_GET_SEQNUM(hdr) (((hdr) >> 20) & 0xFFF)
27
28/* Size is converted from Bytes to DWords */
29#define CREATE_MSG_HDR(id, size, type) \
30 (((type) << 16) | ((((size) >> 2) & 0xFF) << 8) | ((id) & 0xFF))
31#define CMD_MSG_HDR(id, size) CREATE_MSG_HDR(id, size, HFI_MSG_CMD)
32#define ACK_MSG_HDR(id, size) CREATE_MSG_HDR(id, size, HFI_MSG_ACK)
33
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -060034static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx,
35 struct pending_cmd *ret_cmd);
Urvashi Agrawal0a883122018-10-08 13:58:31 -070036
37/* Size in below functions are in unit of dwords */
38static int hfi_queue_read(struct gmu_device *gmu, uint32_t queue_idx,
39 unsigned int *output, unsigned int max_size)
40{
Urvashi Agrawal0a883122018-10-08 13:58:31 -070041 struct gmu_memdesc *mem_addr = gmu->hfi_mem;
42 struct hfi_queue_table *tbl = mem_addr->hostptr;
43 struct hfi_queue_header *hdr = &tbl->qhdr[queue_idx];
44 uint32_t *queue;
45 uint32_t msg_hdr;
46 uint32_t i, read;
47 uint32_t size;
48 int result = 0;
49
50 if (hdr->status == HFI_QUEUE_STATUS_DISABLED)
51 return -EINVAL;
52
Carter Cooper341d20f2018-10-26 14:51:48 -060053 if (hdr->read_index == hdr->write_index)
54 return -ENODATA;
Urvashi Agrawal0a883122018-10-08 13:58:31 -070055
56 /* Clear the output data before populating */
57 memset(output, 0, max_size);
58
59 queue = HOST_QUEUE_START_ADDR(mem_addr, queue_idx);
60 msg_hdr = queue[hdr->read_index];
61 size = MSG_HDR_GET_SIZE(msg_hdr);
62
63 if (size > (max_size >> 2)) {
64 dev_err(&gmu->pdev->dev,
65 "HFI message too big: hdr:0x%x rd idx=%d\n",
66 msg_hdr, hdr->read_index);
67 result = -EMSGSIZE;
68 goto done;
69 }
70
71 read = hdr->read_index;
72
73 if (read < hdr->queue_size) {
74 for (i = 0; i < size && i < (max_size >> 2); i++) {
75 output[i] = queue[read];
76 read = (read + 1)%hdr->queue_size;
77 }
78 result = size;
79 } else {
80 /* In case FW messed up */
81 dev_err(&gmu->pdev->dev,
82 "Read index %d greater than queue size %d\n",
83 hdr->read_index, hdr->queue_size);
84 result = -ENODATA;
85 }
86
Carter Cooperd9e15b32018-07-05 11:07:23 -060087 if (GMU_VER_MAJOR(gmu->ver.hfi) >= 2)
Urvashi Agrawal0a883122018-10-08 13:58:31 -070088 read = ALIGN(read, SZ_4) % hdr->queue_size;
89
90 hdr->read_index = read;
91
92done:
Urvashi Agrawal0a883122018-10-08 13:58:31 -070093 return result;
94}
95
96/* Size in below functions are in unit of dwords */
97static int hfi_queue_write(struct gmu_device *gmu, uint32_t queue_idx,
98 uint32_t *msg)
99{
100 struct hfi_queue_table *tbl = gmu->hfi_mem->hostptr;
101 struct hfi_queue_header *hdr = &tbl->qhdr[queue_idx];
102 uint32_t *queue;
103 struct kgsl_hfi *hfi = &gmu->hfi;
104 uint32_t i, write, empty_space;
105 uint32_t size = MSG_HDR_GET_SIZE(*msg);
106 uint32_t id = MSG_HDR_GET_ID(*msg);
107
108 if (hdr->status == HFI_QUEUE_STATUS_DISABLED)
109 return -EINVAL;
110
111 if (size > HFI_MAX_MSG_SIZE) {
112 dev_err(&gmu->pdev->dev,
113 "Message too big to send: sz=%d, id=%d\n",
114 size, id);
115 return -EINVAL;
116 }
117
118 queue = HOST_QUEUE_START_ADDR(gmu->hfi_mem, queue_idx);
119
120 trace_kgsl_hfi_send(id, size, MSG_HDR_GET_SEQNUM(*msg));
121
122 mutex_lock(&hfi->cmdq_mutex);
123
124 empty_space = (hdr->write_index >= hdr->read_index) ?
125 (hdr->queue_size - (hdr->write_index - hdr->read_index))
126 : (hdr->read_index - hdr->write_index);
127
128 if (empty_space < size) {
129 dev_err(&gmu->pdev->dev,
130 "Insufficient bufsize %d for msg id=%d of size %d\n",
131 empty_space, id, size);
132
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700133 mutex_unlock(&hfi->cmdq_mutex);
134 return -ENOSPC;
135 }
136
137 write = hdr->write_index;
138
139 for (i = 0; i < size; i++) {
140 queue[write] = msg[i];
141 write = (write + 1) % hdr->queue_size;
142 }
143
144 /* Cookify any non used data at the end of the write buffer */
Carter Cooperd9e15b32018-07-05 11:07:23 -0600145 if (GMU_VER_MAJOR(gmu->ver.hfi) >= 2) {
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700146 for (; write % 4; write = (write + 1) % hdr->queue_size)
147 queue[write] = 0xFAFAFAFA;
148 }
149
150 hdr->write_index = write;
151
152 mutex_unlock(&hfi->cmdq_mutex);
153
154 /*
155 * Memory barrier to make sure packet and write index are written before
156 * an interrupt is raised
157 */
158 wmb();
159
160 /* Send interrupt to GMU to receive the message */
161 adreno_write_gmureg(ADRENO_DEVICE(hfi->kgsldev),
162 ADRENO_REG_GMU_HOST2GMU_INTR_SET, 0x1);
163
164 return 0;
165}
166
167#define QUEUE_HDR_TYPE(id, prio, rtype, stype) \
168 (((id) & 0xFF) | (((prio) & 0xFF) << 8) | \
169 (((rtype) & 0xFF) << 16) | (((stype) & 0xFF) << 24))
170
171
172/* Sizes of the queue and message are in unit of dwords */
Carter Coopera5fae982018-09-26 10:13:38 -0600173void hfi_init(struct gmu_device *gmu)
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700174{
Carter Coopera5fae982018-09-26 10:13:38 -0600175 struct kgsl_hfi *hfi = &gmu->hfi;
Deepak Kumarcadf57a2018-09-17 13:35:12 +0530176 struct adreno_device *adreno_dev = ADRENO_DEVICE(hfi->kgsldev);
Carter Coopera5fae982018-09-26 10:13:38 -0600177 struct gmu_memdesc *mem_addr = gmu->hfi_mem;
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700178 int i;
179 struct hfi_queue_table *tbl;
180 struct hfi_queue_header *hdr;
181 struct {
182 unsigned int idx;
183 unsigned int pri;
184 unsigned int status;
185 } queue[HFI_QUEUE_MAX] = {
186 { HFI_CMD_IDX, HFI_CMD_PRI, HFI_QUEUE_STATUS_ENABLED },
187 { HFI_MSG_IDX, HFI_MSG_PRI, HFI_QUEUE_STATUS_ENABLED },
188 { HFI_DBG_IDX, HFI_DBG_PRI, HFI_QUEUE_STATUS_ENABLED },
189 { HFI_DSP_IDX_0, HFI_DSP_PRI_0, HFI_QUEUE_STATUS_DISABLED },
190 };
191
Deepak Kumarcadf57a2018-09-17 13:35:12 +0530192 /*
193 * Overwrite the queue IDs for A630, A615 and A616 as they use
194 * legacy firmware. Legacy firmware has different queue IDs for
195 * message, debug and dispatch queues.
196 */
197 if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) {
198 queue[HFI_MSG_ID].idx = HFI_MSG_IDX_LEGACY;
199 queue[HFI_DBG_ID].idx = HFI_DBG_IDX_LEGACY;
200 queue[HFI_DSP_ID_0].idx = HFI_DSP_IDX_0_LEGACY;
201 }
202
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700203 /* Fill Table Header */
204 tbl = mem_addr->hostptr;
205 tbl->qtbl_hdr.version = 0;
206 tbl->qtbl_hdr.size = sizeof(struct hfi_queue_table) >> 2;
207 tbl->qtbl_hdr.qhdr0_offset = sizeof(struct hfi_queue_table_header) >> 2;
208 tbl->qtbl_hdr.qhdr_size = sizeof(struct hfi_queue_header) >> 2;
209 tbl->qtbl_hdr.num_q = HFI_QUEUE_MAX;
210 tbl->qtbl_hdr.num_active_q = HFI_QUEUE_MAX;
211
Carter Cooper341d20f2018-10-26 14:51:48 -0600212 memset(&tbl->qhdr[0], 0, sizeof(tbl->qhdr));
213
214 /* Fill Individual Queue Headers */
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700215 for (i = 0; i < HFI_QUEUE_MAX; i++) {
216 hdr = &tbl->qhdr[i];
217 hdr->start_addr = GMU_QUEUE_START_ADDR(mem_addr, i);
218 hdr->type = QUEUE_HDR_TYPE(queue[i].idx, queue[i].pri, 0, 0);
219 hdr->status = queue[i].status;
Carter Coopera5fae982018-09-26 10:13:38 -0600220 hdr->queue_size = HFI_QUEUE_SIZE >> 2; /* convert to dwords */
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700221 }
222
223 mutex_init(&hfi->cmdq_mutex);
224}
225
226#define HDR_CMP_SEQNUM(out_hdr, in_hdr) \
227 (MSG_HDR_GET_SEQNUM(out_hdr) == MSG_HDR_GET_SEQNUM(in_hdr))
228
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600229static void receive_ack_cmd(struct gmu_device *gmu, void *rcvd,
230 struct pending_cmd *ret_cmd)
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700231{
232 uint32_t *ack = rcvd;
233 uint32_t hdr = ack[0];
234 uint32_t req_hdr = ack[1];
235 struct kgsl_hfi *hfi = &gmu->hfi;
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700236
Harshavardhan Reddya034cd22018-10-29 20:02:54 +0530237 if (ret_cmd == NULL)
238 return;
239
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700240 trace_kgsl_hfi_receive(MSG_HDR_GET_ID(req_hdr),
241 MSG_HDR_GET_SIZE(req_hdr),
242 MSG_HDR_GET_SEQNUM(req_hdr));
243
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600244 if (HDR_CMP_SEQNUM(ret_cmd->sent_hdr, req_hdr)) {
245 memcpy(&ret_cmd->results, ack, MSG_HDR_GET_SIZE(hdr) << 2);
246 return;
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700247 }
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700248
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600249 /* Didn't find the sender, list the waiter */
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700250 dev_err_ratelimited(&gmu->pdev->dev,
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600251 "HFI ACK: Cannot find sender for 0x%8.8x Waiter: 0x%8.8x\n",
252 req_hdr, ret_cmd->sent_hdr);
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700253
254 adreno_set_gpu_fault(ADRENO_DEVICE(hfi->kgsldev), ADRENO_GMU_FAULT);
255 adreno_dispatcher_schedule(hfi->kgsldev);
256}
257
258#define MSG_HDR_SET_SEQNUM(hdr, num) \
259 (((hdr) & 0xFFFFF) | ((num) << 20))
260
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600261static int poll_adreno_gmu_reg(struct adreno_device *adreno_dev,
262 enum adreno_regs offset_name, unsigned int expected_val,
263 unsigned int mask, unsigned int timeout_ms)
264{
265 unsigned int val;
266 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
267
268 while (time_is_after_jiffies(timeout)) {
269 adreno_read_gmureg(adreno_dev, offset_name, &val);
270 if ((val & mask) == expected_val)
271 return 0;
272 usleep_range(10, 100);
273 }
274
275 /* Check one last time */
276 adreno_read_gmureg(adreno_dev, offset_name, &val);
277 if ((val & mask) == expected_val)
278 return 0;
279
280 return -ETIMEDOUT;
281}
282
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700283static int hfi_send_cmd(struct gmu_device *gmu, uint32_t queue_idx,
284 void *data, struct pending_cmd *ret_cmd)
285{
286 int rc;
287 uint32_t *cmd = data;
288 struct kgsl_hfi *hfi = &gmu->hfi;
289 unsigned int seqnum = atomic_inc_return(&hfi->seqnum);
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600290 struct adreno_device *adreno_dev = ADRENO_DEVICE(hfi->kgsldev);
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700291
292 *cmd = MSG_HDR_SET_SEQNUM(*cmd, seqnum);
293 if (ret_cmd == NULL)
294 return hfi_queue_write(gmu, queue_idx, cmd);
295
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700296 ret_cmd->sent_hdr = cmd[0];
297
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700298 rc = hfi_queue_write(gmu, queue_idx, cmd);
299 if (rc)
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600300 return rc;
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700301
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600302 rc = poll_adreno_gmu_reg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
303 HFI_IRQ_MSGQ_MASK, HFI_IRQ_MSGQ_MASK, HFI_RSP_TIMEOUT);
304
305 if (rc) {
306 dev_err(&gmu->pdev->dev,
307 "Timed out waiting on ack for 0x%8.8x (id %d, sequence %d)\n",
308 cmd[0], MSG_HDR_GET_ID(*cmd), MSG_HDR_GET_SEQNUM(*cmd));
309 return rc;
310 }
311
312 /* Clear the interrupt */
313 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
314 HFI_IRQ_MSGQ_MASK);
315
316 hfi_process_queue(gmu, HFI_MSG_ID, ret_cmd);
317
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700318 return rc;
319}
320
321static int hfi_send_generic_req(struct gmu_device *gmu, uint32_t queue,
322 void *cmd)
323{
324 struct pending_cmd ret_cmd;
325 int rc;
326
327 memset(&ret_cmd, 0, sizeof(ret_cmd));
328
329 rc = hfi_send_cmd(gmu, queue, cmd, &ret_cmd);
330 if (rc)
331 return rc;
332
333 if (ret_cmd.results[2])
334 dev_err(&gmu->pdev->dev,
335 "HFI ACK failure: Req 0x%8.8X Error 0x%X\n",
336 ret_cmd.results[1],
337 ret_cmd.results[2]);
338
339 return ret_cmd.results[2] ? -EINVAL : 0;
340}
341
342static int hfi_send_gmu_init(struct gmu_device *gmu, uint32_t boot_state)
343{
344 struct hfi_gmu_init_cmd cmd = {
345 .hdr = CMD_MSG_HDR(H2F_MSG_INIT, sizeof(cmd)),
346 .seg_id = 0,
347 .dbg_buffer_addr = (unsigned int) gmu->dump_mem->gmuaddr,
348 .dbg_buffer_size = (unsigned int) gmu->dump_mem->size,
349 .boot_state = boot_state,
350 };
351
352 return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd);
353}
354
355static int hfi_get_fw_version(struct gmu_device *gmu,
356 uint32_t expected_ver, uint32_t *ver)
357{
358 struct hfi_fw_version_cmd cmd = {
359 .hdr = CMD_MSG_HDR(H2F_MSG_FW_VER, sizeof(cmd)),
360 .supported_ver = expected_ver,
361 };
362 int rc;
363 struct pending_cmd ret_cmd;
364
365 memset(&ret_cmd, 0, sizeof(ret_cmd));
366
367 rc = hfi_send_cmd(gmu, HFI_CMD_ID, &cmd, &ret_cmd);
368 if (rc)
369 return rc;
370
371 rc = ret_cmd.results[2];
372 if (!rc)
373 *ver = ret_cmd.results[3];
374 else
375 dev_err(&gmu->pdev->dev,
376 "gmu get fw ver failed with error=%d\n", rc);
377
378 return rc;
379}
380
381static int hfi_send_core_fw_start(struct gmu_device *gmu)
382{
383 struct hfi_core_fw_start_cmd cmd = {
384 .hdr = CMD_MSG_HDR(H2F_MSG_CORE_FW_START, sizeof(cmd)),
385 .handle = 0x0,
386 };
387
388 return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd);
389}
390
391static const char * const hfi_features[] = {
392 [HFI_FEATURE_ECP] = "ECP",
393 [HFI_FEATURE_ACD] = "ACD",
394 [HFI_FEATURE_LM] = "LM",
395};
396
397static const char *feature_to_string(uint32_t feature)
398{
399 if (feature < ARRAY_SIZE(hfi_features) && hfi_features[feature])
400 return hfi_features[feature];
401
402 return "unknown";
403}
404
405static int hfi_send_feature_ctrl(struct gmu_device *gmu,
406 uint32_t feature, uint32_t enable, uint32_t data)
407{
408 struct hfi_feature_ctrl_cmd cmd = {
409 .hdr = CMD_MSG_HDR(H2F_MSG_FEATURE_CTRL, sizeof(cmd)),
410 .feature = feature,
411 .enable = enable,
412 .data = data,
413 };
414 int ret;
415
416 ret = hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd);
417 if (ret)
418 dev_err(&gmu->pdev->dev,
419 "Unable to %s feature %s (%d)\n",
420 enable ? "enable" : "disable",
421 feature_to_string(feature),
422 feature);
423 return ret;
424}
425
426static int hfi_send_dcvstbl_v1(struct gmu_device *gmu)
427{
428 struct hfi_dcvstable_v1_cmd cmd = {
429 .hdr = CMD_MSG_HDR(H2F_MSG_PERF_TBL, sizeof(cmd)),
430 .gpu_level_num = gmu->num_gpupwrlevels,
431 .gmu_level_num = gmu->num_gmupwrlevels,
432 };
433 int i;
434
435 for (i = 0; i < gmu->num_gpupwrlevels; i++) {
436 cmd.gx_votes[i].vote = gmu->rpmh_votes.gx_votes[i];
437 /* Divide by 1000 to convert to kHz */
438 cmd.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
439 }
440
441 for (i = 0; i < gmu->num_gmupwrlevels; i++) {
442 cmd.cx_votes[i].vote = gmu->rpmh_votes.cx_votes[i];
443 cmd.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
444 }
445
446 return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd);
447}
448
449static int hfi_send_get_value(struct gmu_device *gmu,
450 struct hfi_get_value_req *req)
451{
452 struct hfi_get_value_cmd *cmd = &req->cmd;
453 struct pending_cmd ret_cmd;
454 struct hfi_get_value_reply_cmd *reply =
455 (struct hfi_get_value_reply_cmd *)ret_cmd.results;
456 int rc;
457
458 cmd->hdr = CMD_MSG_HDR(H2F_MSG_GET_VALUE, sizeof(*cmd));
459
460 rc = hfi_send_cmd(gmu, HFI_CMD_ID, cmd, &ret_cmd);
461 if (rc)
462 return rc;
463
464 memset(&req->data, 0, sizeof(req->data));
465 memcpy(&req->data, &reply->data,
466 (MSG_HDR_GET_SIZE(reply->hdr) - 2) << 2);
467 return 0;
468}
469
470static int hfi_send_dcvstbl(struct gmu_device *gmu)
471{
472 struct hfi_dcvstable_cmd cmd = {
473 .hdr = CMD_MSG_HDR(H2F_MSG_PERF_TBL, sizeof(cmd)),
474 .gpu_level_num = gmu->num_gpupwrlevels,
475 .gmu_level_num = gmu->num_gmupwrlevels,
476 };
477 int i;
478
479 for (i = 0; i < gmu->num_gpupwrlevels; i++) {
480 cmd.gx_votes[i].vote = gmu->rpmh_votes.gx_votes[i];
Kyle Piefere56ca8b2018-09-26 16:38:19 -0700481 /* Hardcode this to the max threshold since it is not used */
482 cmd.gx_votes[i].acd = 0xFFFFFFFF;
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700483 /* Divide by 1000 to convert to kHz */
484 cmd.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
485 }
486
487 for (i = 0; i < gmu->num_gmupwrlevels; i++) {
488 cmd.cx_votes[i].vote = gmu->rpmh_votes.cx_votes[i];
489 cmd.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
490 }
491
492 return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd);
493}
494
495static int hfi_send_bwtbl(struct gmu_device *gmu)
496{
497 struct hfi_bwtable_cmd *cmd = &gmu->hfi.bwtbl_cmd;
498
499 cmd->hdr = CMD_MSG_HDR(H2F_MSG_BW_VOTE_TBL, sizeof(*cmd));
500
501 return hfi_send_generic_req(gmu, HFI_CMD_ID, cmd);
502}
503
Kyle Piefere56ca8b2018-09-26 16:38:19 -0700504static int hfi_send_acd_tbl(struct gmu_device *gmu)
505{
506 struct hfi_acd_table_cmd *cmd = &gmu->hfi.acd_tbl_cmd;
507
508 cmd->hdr = CMD_MSG_HDR(H2F_MSG_ACD_TBL, sizeof(*cmd));
509
510 return hfi_send_generic_req(gmu, HFI_CMD_IDX, cmd);
511}
512
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700513static int hfi_send_test(struct gmu_device *gmu)
514{
515 struct hfi_test_cmd cmd = {
516 .hdr = CMD_MSG_HDR(H2F_MSG_TEST, sizeof(cmd)),
517 };
518
519 return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd);
520}
521
522static void receive_err_req(struct gmu_device *gmu, void *rcvd)
523{
524 struct hfi_err_cmd *cmd = rcvd;
525
526 dev_err(&gmu->pdev->dev, "HFI Error Received: %d %d %s\n",
527 ((cmd->error_code >> 16) & 0xFFFF),
528 (cmd->error_code & 0xFFFF),
529 (char *) cmd->data);
530}
531
532static void receive_debug_req(struct gmu_device *gmu, void *rcvd)
533{
534 struct hfi_debug_cmd *cmd = rcvd;
535
536 dev_dbg(&gmu->pdev->dev, "HFI Debug Received: %d %d %d\n",
537 cmd->type, cmd->timestamp, cmd->data);
538}
539
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600540static void hfi_v1_receiver(struct gmu_device *gmu, uint32_t *rcvd,
541 struct pending_cmd *ret_cmd)
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700542{
543 /* V1 ACK Handler */
544 if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_V1_MSG_ACK) {
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600545 receive_ack_cmd(gmu, rcvd, ret_cmd);
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700546 return;
547 }
548
549 /* V1 Request Handler */
550 switch (MSG_HDR_GET_ID(rcvd[0])) {
551 case F2H_MSG_ERR: /* No Reply */
552 receive_err_req(gmu, rcvd);
553 break;
554 case F2H_MSG_DEBUG: /* No Reply */
555 receive_debug_req(gmu, rcvd);
556 break;
557 default: /* No Reply */
558 dev_err(&gmu->pdev->dev,
559 "HFI V1 request %d not supported\n",
560 MSG_HDR_GET_ID(rcvd[0]));
561 break;
562 }
563}
564
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600565static void hfi_process_queue(struct gmu_device *gmu, uint32_t queue_idx,
566 struct pending_cmd *ret_cmd)
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700567{
568 uint32_t rcvd[MAX_RCVD_SIZE];
569
570 while (hfi_queue_read(gmu, queue_idx, rcvd, sizeof(rcvd)) > 0) {
571 /* Special case if we're v1 */
Carter Cooperd9e15b32018-07-05 11:07:23 -0600572 if (GMU_VER_MAJOR(gmu->ver.hfi) < 2) {
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600573 hfi_v1_receiver(gmu, rcvd, ret_cmd);
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700574 continue;
575 }
576
577 /* V2 ACK Handler */
578 if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_MSG_ACK) {
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600579 receive_ack_cmd(gmu, rcvd, ret_cmd);
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700580 continue;
581 }
582
583 /* V2 Request Handler */
584 switch (MSG_HDR_GET_ID(rcvd[0])) {
585 case F2H_MSG_ERR: /* No Reply */
586 receive_err_req(gmu, rcvd);
587 break;
588 case F2H_MSG_DEBUG: /* No Reply */
589 receive_debug_req(gmu, rcvd);
590 break;
591 default: /* No Reply */
592 dev_err(&gmu->pdev->dev,
593 "HFI request %d not supported\n",
594 MSG_HDR_GET_ID(rcvd[0]));
595 break;
596 }
597 }
598}
599
600void hfi_receiver(unsigned long data)
601{
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600602 /* Process all asynchronous read (firmware to host) queues */
603 hfi_process_queue((struct gmu_device *) data, HFI_DBG_ID, NULL);
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700604}
605
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700606static int hfi_verify_fw_version(struct kgsl_device *device,
607 struct gmu_device *gmu)
608{
609 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
610 int result;
611 unsigned int ver, major, minor;
612
613 /* GMU version is already known, so don't waste time finding again */
Carter Cooperd9e15b32018-07-05 11:07:23 -0600614 if (gmu->ver.core != 0)
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700615 return 0;
616
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700617 major = adreno_dev->gpucore->gpmu_major;
618 minor = adreno_dev->gpucore->gpmu_minor;
619
620 result = hfi_get_fw_version(gmu, GMU_VERSION(major, minor), &ver);
621 if (result) {
622 dev_err_once(&gmu->pdev->dev,
623 "Failed to get FW version via HFI\n");
624 return result;
625 }
626
627 /* For now, warn once. Could return error later if needed */
628 if (major != GMU_VER_MAJOR(ver))
629 dev_err_once(&gmu->pdev->dev,
630 "FW Major Error: Wanted %d, got %d\n",
631 major, GMU_VER_MAJOR(ver));
632
633 if (minor > GMU_VER_MINOR(ver))
634 dev_err_once(&gmu->pdev->dev,
635 "FW Minor Error: Wanted < %d, got %d\n",
636 GMU_VER_MINOR(ver), minor);
637
638 /* Save the gmu version information */
Carter Cooperd9e15b32018-07-05 11:07:23 -0600639 gmu->ver.core = ver;
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700640
641 return 0;
642}
643
Kyle Piefere56ca8b2018-09-26 16:38:19 -0700644static int hfi_send_acd_feature_ctrl(struct gmu_device *gmu,
645 struct adreno_device *adreno_dev)
646{
647 int ret = 0;
648
649 if (test_bit(ADRENO_ACD_CTRL, &adreno_dev->pwrctrl_flag)) {
650 ret = hfi_send_acd_tbl(gmu);
651 if (!ret)
652 ret = hfi_send_feature_ctrl(gmu, HFI_FEATURE_ACD, 1, 0);
653 }
654
655 return ret;
656}
657
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700658int hfi_start(struct kgsl_device *device,
659 struct gmu_device *gmu, uint32_t boot_state)
660{
661 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
662 struct gmu_memdesc *mem_addr = gmu->hfi_mem;
663 struct hfi_queue_table *tbl = mem_addr->hostptr;
664 struct hfi_queue_header *hdr;
665 int result, i;
666
667 if (test_bit(GMU_HFI_ON, &device->gmu_core.flags))
668 return 0;
669
670 /* Force read_index to the write_index no matter what */
671 for (i = 0; i < HFI_QUEUE_MAX; i++) {
672 hdr = &tbl->qhdr[i];
673 if (hdr->status == HFI_QUEUE_STATUS_DISABLED)
674 continue;
675
676 if (hdr->read_index != hdr->write_index) {
677 dev_err(&gmu->pdev->dev,
678 "HFI Q[%d] Index Error: read:0x%X write:0x%X\n",
679 i, hdr->read_index, hdr->write_index);
680 hdr->read_index = hdr->write_index;
681 }
682 }
683
684 if (!adreno_is_a640(adreno_dev) && !adreno_is_a680(adreno_dev)) {
685 result = hfi_send_gmu_init(gmu, boot_state);
686 if (result)
687 return result;
688 }
689
690 result = hfi_verify_fw_version(device, gmu);
691 if (result)
692 return result;
693
Carter Cooperd9e15b32018-07-05 11:07:23 -0600694 if (GMU_VER_MAJOR(gmu->ver.hfi) < 2)
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700695 result = hfi_send_dcvstbl_v1(gmu);
696 else
697 result = hfi_send_dcvstbl(gmu);
698 if (result)
699 return result;
700
701 result = hfi_send_bwtbl(gmu);
702 if (result)
703 return result;
704
705 /*
706 * If quirk is enabled send H2F_MSG_TEST and tell the GMU
707 * we are sending no more HFIs until the next boot otherwise
708 * send H2F_MSG_CORE_FW_START and features for A640 devices
709 */
Carter Cooperd9e15b32018-07-05 11:07:23 -0600710 if (GMU_VER_MAJOR(gmu->ver.hfi) >= 2) {
Carter Cooper08e9a902019-01-11 10:52:58 -0700711 if (ADRENO_FEATURE(adreno_dev, ADRENO_ECP)) {
712 result = hfi_send_feature_ctrl(gmu,
713 HFI_FEATURE_ECP, 1, 0);
714 if (result)
715 return result;
716 }
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700717
Kyle Piefere56ca8b2018-09-26 16:38:19 -0700718 result = hfi_send_acd_feature_ctrl(gmu, adreno_dev);
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700719 if (result)
720 return result;
721
722 if (test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
Carter Cooper7ebf9c32018-09-17 16:11:47 -0600723 result = hfi_send_feature_ctrl(gmu, HFI_FEATURE_LM, 1,
724 device->pwrctrl.throttle_mask);
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700725 if (result)
726 return result;
727 }
728
729 result = hfi_send_core_fw_start(gmu);
730 if (result)
731 return result;
732 } else {
733 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
734 result = hfi_send_test(gmu);
735 if (result)
736 return result;
737 }
738 }
739 set_bit(GMU_HFI_ON, &device->gmu_core.flags);
740 return 0;
741}
742
743void hfi_stop(struct gmu_device *gmu)
744{
745 struct gmu_memdesc *mem_addr = gmu->hfi_mem;
746 struct hfi_queue_table *tbl = mem_addr->hostptr;
747 struct hfi_queue_header *hdr;
748 struct kgsl_hfi *hfi = &gmu->hfi;
749 struct kgsl_device *device = hfi->kgsldev;
750 unsigned int i;
751
752
753 if (!test_bit(GMU_HFI_ON, &device->gmu_core.flags))
754 return;
755
756 /* Flush HFI queues */
757 for (i = 0; i < HFI_QUEUE_MAX; i++) {
758 hdr = &tbl->qhdr[i];
759 if (hdr->status == HFI_QUEUE_STATUS_DISABLED)
760 continue;
761
762 if (hdr->read_index != hdr->write_index)
763 dev_err(&gmu->pdev->dev,
764 "HFI queue[%d] is not empty before close: rd=%d,wt=%d\n",
765 i, hdr->read_index, hdr->write_index);
766 }
767
768 clear_bit(GMU_HFI_ON, &device->gmu_core.flags);
769}
770
771/* Entry point for external HFI requests */
772int hfi_send_req(struct gmu_device *gmu, unsigned int id, void *data)
773{
774 switch (id) {
775 case H2F_MSG_LM_CFG: {
776 struct hfi_lmconfig_cmd *cmd = data;
777
778 cmd->hdr = CMD_MSG_HDR(H2F_MSG_LM_CFG, sizeof(*cmd));
779
780 return hfi_send_generic_req(gmu, HFI_CMD_ID, &cmd);
781 }
782 case H2F_MSG_GX_BW_PERF_VOTE: {
783 struct hfi_gx_bw_perf_vote_cmd *cmd = data;
784
785 cmd->hdr = CMD_MSG_HDR(id, sizeof(*cmd));
786
787 return hfi_send_generic_req(gmu, HFI_CMD_ID, cmd);
788 }
789 case H2F_MSG_PREPARE_SLUMBER: {
790 struct hfi_prep_slumber_cmd *cmd = data;
791
792 if (cmd->freq >= MAX_GX_LEVELS || cmd->bw >= MAX_GX_LEVELS)
793 return -EINVAL;
794
795 cmd->hdr = CMD_MSG_HDR(id, sizeof(*cmd));
796
797 return hfi_send_generic_req(gmu, HFI_CMD_ID, cmd);
798 }
799 case H2F_MSG_START: {
800 struct hfi_start_cmd *cmd = data;
801
802 cmd->hdr = CMD_MSG_HDR(id, sizeof(*cmd));
803
804 return hfi_send_generic_req(gmu, HFI_CMD_ID, cmd);
805 }
806 case H2F_MSG_GET_VALUE: {
807 return hfi_send_get_value(gmu, data);
808 }
809 case H2F_MSG_SET_VALUE: {
810 struct hfi_set_value_cmd *cmd = data;
811
812 cmd->hdr = CMD_MSG_HDR(id, sizeof(*cmd));
813
814 return hfi_send_generic_req(gmu, HFI_CMD_ID, cmd);
815 }
816 default:
817 break;
818 }
819
820 return -EINVAL;
821}
822
823/* HFI interrupt handler */
824irqreturn_t hfi_irq_handler(int irq, void *data)
825{
826 struct kgsl_device *device = data;
827 struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
828 struct kgsl_hfi *hfi = &gmu->hfi;
829 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
830 unsigned int status = 0;
831
832 adreno_read_gmureg(ADRENO_DEVICE(device),
833 ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status);
834 adreno_write_gmureg(ADRENO_DEVICE(device),
835 ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status);
836
Harshdeep Dhattc5dfb292018-10-01 15:49:17 -0600837 if (status & HFI_IRQ_DBGQ_MASK)
Urvashi Agrawal0a883122018-10-08 13:58:31 -0700838 tasklet_hi_schedule(&hfi->tasklet);
839 if (status & HFI_IRQ_CM3_FAULT_MASK) {
840 dev_err_ratelimited(&gmu->pdev->dev,
841 "GMU CM3 fault interrupt received\n");
842 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
843 adreno_dispatcher_schedule(device);
844 }
845 if (status & ~HFI_IRQ_MASK)
846 dev_err_ratelimited(&gmu->pdev->dev,
847 "Unhandled HFI interrupts 0x%lx\n",
848 status & ~HFI_IRQ_MASK);
849
850 return IRQ_HANDLED;
851}