blob: 2d3a2f31123d3cc8b7f8f227c4e491fbdc51f0e0 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "bmi.h"
19#include "hif.h"
20#include "debug.h"
21#include "htc.h"
Ryan Hsu583a6622017-03-08 13:52:04 +020022#include "hw.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030023
Michal Kazior64d151d2013-07-16 09:38:53 +020024void ath10k_bmi_start(struct ath10k *ar)
25{
Ryan Hsu583a6622017-03-08 13:52:04 +020026 int ret;
27
Michal Kazior7aa7a722014-08-25 12:09:38 +020028 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
Kalle Valof0bbea92013-09-08 17:55:32 +030029
Michal Kazior64d151d2013-07-16 09:38:53 +020030 ar->bmi.done_sent = false;
Ryan Hsu583a6622017-03-08 13:52:04 +020031
32 /* Enable hardware clock to speed up firmware download */
33 if (ar->hw_params.hw_ops->enable_pll_clk) {
34 ret = ar->hw_params.hw_ops->enable_pll_clk(ar);
35 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi enable pll ret %d\n", ret);
36 }
Michal Kazior64d151d2013-07-16 09:38:53 +020037}
38
Kalle Valo5e3dd152013-06-12 20:52:10 +030039int ath10k_bmi_done(struct ath10k *ar)
40{
41 struct bmi_cmd cmd;
42 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
43 int ret;
44
Michal Kazior7aa7a722014-08-25 12:09:38 +020045 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
Kalle Valof0bbea92013-09-08 17:55:32 +030046
Kalle Valo5e3dd152013-06-12 20:52:10 +030047 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +020048 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +030049 return 0;
50 }
51
52 ar->bmi.done_sent = true;
53 cmd.id = __cpu_to_le32(BMI_DONE);
54
55 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
56 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +020057 ath10k_warn(ar, "unable to write to the device: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +030058 return ret;
59 }
60
Kalle Valo5e3dd152013-06-12 20:52:10 +030061 return 0;
62}
63
64int ath10k_bmi_get_target_info(struct ath10k *ar,
65 struct bmi_target_info *target_info)
66{
67 struct bmi_cmd cmd;
68 union bmi_resp resp;
69 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
70 u32 resplen = sizeof(resp.get_target_info);
71 int ret;
72
Michal Kazior7aa7a722014-08-25 12:09:38 +020073 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
Kalle Valof0bbea92013-09-08 17:55:32 +030074
Kalle Valo5e3dd152013-06-12 20:52:10 +030075 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +020076 ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +030077 return -EBUSY;
78 }
79
80 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
81
82 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
83 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +020084 ath10k_warn(ar, "unable to get target info from device\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +030085 return ret;
86 }
87
88 if (resplen < sizeof(resp.get_target_info)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +020089 ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +030090 resplen);
91 return -EIO;
92 }
93
94 target_info->version = __le32_to_cpu(resp.get_target_info.version);
95 target_info->type = __le32_to_cpu(resp.get_target_info.type);
Kalle Valof0bbea92013-09-08 17:55:32 +030096
Kalle Valo5e3dd152013-06-12 20:52:10 +030097 return 0;
98}
99
Erik Stromdahl34dd3982017-04-26 12:17:57 +0300100#define TARGET_VERSION_SENTINAL 0xffffffffu
101
102int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
103 struct bmi_target_info *target_info)
104{
105 struct bmi_cmd cmd;
106 union bmi_resp resp;
107 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
108 u32 resplen, ver_len;
109 __le32 tmp;
110 int ret;
111
112 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
113
114 if (ar->bmi.done_sent) {
115 ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
116 return -EBUSY;
117 }
118
119 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
120
121 /* Step 1: Read 4 bytes of the target info and check if it is
122 * the special sentinal version word or the first word in the
123 * version response.
124 */
125 resplen = sizeof(u32);
126 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
127 if (ret) {
128 ath10k_warn(ar, "unable to read from device\n");
129 return ret;
130 }
131
132 /* Some SDIO boards have a special sentinal byte before the real
133 * version response.
134 */
135 if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
136 /* Step 1b: Read the version length */
137 resplen = sizeof(u32);
138 ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
139 &resplen);
140 if (ret) {
141 ath10k_warn(ar, "unable to read from device\n");
142 return ret;
143 }
144 }
145
146 ver_len = __le32_to_cpu(tmp);
147
148 /* Step 2: Check the target info length */
149 if (ver_len != sizeof(resp.get_target_info)) {
150 ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
151 ver_len, sizeof(resp.get_target_info));
152 return -EINVAL;
153 }
154
155 /* Step 3: Read the rest of the version response */
156 resplen = sizeof(resp.get_target_info) - sizeof(u32);
157 ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
158 &resp.get_target_info.version,
159 &resplen);
160 if (ret) {
161 ath10k_warn(ar, "unable to read from device\n");
162 return ret;
163 }
164
165 target_info->version = __le32_to_cpu(resp.get_target_info.version);
166 target_info->type = __le32_to_cpu(resp.get_target_info.type);
167
168 return 0;
169}
170
Kalle Valo5e3dd152013-06-12 20:52:10 +0300171int ath10k_bmi_read_memory(struct ath10k *ar,
172 u32 address, void *buffer, u32 length)
173{
174 struct bmi_cmd cmd;
175 union bmi_resp resp;
176 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
177 u32 rxlen;
178 int ret;
179
Michal Kazior7aa7a722014-08-25 12:09:38 +0200180 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
Kalle Valof0bbea92013-09-08 17:55:32 +0300181 address, length);
182
Kalle Valo5e3dd152013-06-12 20:52:10 +0300183 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200184 ath10k_warn(ar, "command disallowed\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300185 return -EBUSY;
186 }
187
Kalle Valo5e3dd152013-06-12 20:52:10 +0300188 while (length) {
189 rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
190
191 cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
192 cmd.read_mem.addr = __cpu_to_le32(address);
193 cmd.read_mem.len = __cpu_to_le32(rxlen);
194
195 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
196 &resp, &rxlen);
197 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200198 ath10k_warn(ar, "unable to read from the device (%d)\n",
Michal Kaziored48b352013-07-05 16:15:10 +0300199 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300200 return ret;
201 }
202
203 memcpy(buffer, resp.read_mem.payload, rxlen);
204 address += rxlen;
205 buffer += rxlen;
206 length -= rxlen;
207 }
208
209 return 0;
210}
211
Ryan Hsu583a6622017-03-08 13:52:04 +0200212int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
213{
214 struct bmi_cmd cmd;
215 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
216 int ret;
217
218 ath10k_dbg(ar, ATH10K_DBG_BMI,
219 "bmi write soc register 0x%08x val 0x%08x\n",
220 address, reg_val);
221
222 if (ar->bmi.done_sent) {
223 ath10k_warn(ar, "bmi write soc register command in progress\n");
224 return -EBUSY;
225 }
226
227 cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
228 cmd.write_soc_reg.addr = __cpu_to_le32(address);
229 cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
230
231 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
232 if (ret) {
233 ath10k_warn(ar, "Unable to write soc register to device: %d\n",
234 ret);
235 return ret;
236 }
237
238 return 0;
239}
240
241int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
242{
243 struct bmi_cmd cmd;
244 union bmi_resp resp;
245 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
246 u32 resplen = sizeof(resp.read_soc_reg);
247 int ret;
248
249 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
250 address);
251
252 if (ar->bmi.done_sent) {
253 ath10k_warn(ar, "bmi read soc register command in progress\n");
254 return -EBUSY;
255 }
256
257 cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
258 cmd.read_soc_reg.addr = __cpu_to_le32(address);
259
260 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
261 if (ret) {
262 ath10k_warn(ar, "Unable to read soc register from device: %d\n",
263 ret);
264 return ret;
265 }
266
267 *reg_val = __le32_to_cpu(resp.read_soc_reg.value);
268
269 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
270 *reg_val);
271
272 return 0;
273}
274
Kalle Valo5e3dd152013-06-12 20:52:10 +0300275int ath10k_bmi_write_memory(struct ath10k *ar,
276 u32 address, const void *buffer, u32 length)
277{
278 struct bmi_cmd cmd;
279 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
280 u32 txlen;
281 int ret;
282
Michal Kazior7aa7a722014-08-25 12:09:38 +0200283 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
Kalle Valof0bbea92013-09-08 17:55:32 +0300284 address, length);
285
Kalle Valo5e3dd152013-06-12 20:52:10 +0300286 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200287 ath10k_warn(ar, "command disallowed\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300288 return -EBUSY;
289 }
290
Kalle Valo5e3dd152013-06-12 20:52:10 +0300291 while (length) {
292 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
293
294 /* copy before roundup to avoid reading beyond buffer*/
295 memcpy(cmd.write_mem.payload, buffer, txlen);
296 txlen = roundup(txlen, 4);
297
298 cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
299 cmd.write_mem.addr = __cpu_to_le32(address);
300 cmd.write_mem.len = __cpu_to_le32(txlen);
301
302 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
303 NULL, NULL);
304 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200305 ath10k_warn(ar, "unable to write to the device (%d)\n",
Michal Kaziored48b352013-07-05 16:15:10 +0300306 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300307 return ret;
308 }
309
310 /* fixup roundup() so `length` zeroes out for last chunk */
311 txlen = min(txlen, length);
312
313 address += txlen;
314 buffer += txlen;
315 length -= txlen;
316 }
317
318 return 0;
319}
320
Kalle Valod6d4a582014-03-11 17:33:19 +0200321int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300322{
323 struct bmi_cmd cmd;
324 union bmi_resp resp;
325 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
326 u32 resplen = sizeof(resp.execute);
327 int ret;
328
Michal Kazior7aa7a722014-08-25 12:09:38 +0200329 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
Kalle Valod6d4a582014-03-11 17:33:19 +0200330 address, param);
Kalle Valof0bbea92013-09-08 17:55:32 +0300331
Kalle Valo5e3dd152013-06-12 20:52:10 +0300332 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200333 ath10k_warn(ar, "command disallowed\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300334 return -EBUSY;
335 }
336
Kalle Valo5e3dd152013-06-12 20:52:10 +0300337 cmd.id = __cpu_to_le32(BMI_EXECUTE);
338 cmd.execute.addr = __cpu_to_le32(address);
Kalle Valod6d4a582014-03-11 17:33:19 +0200339 cmd.execute.param = __cpu_to_le32(param);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300340
341 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
342 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200343 ath10k_warn(ar, "unable to read from the device\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300344 return ret;
345 }
346
347 if (resplen < sizeof(resp.execute)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200348 ath10k_warn(ar, "invalid execute response length (%d)\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300349 resplen);
Kalle Valod6d4a582014-03-11 17:33:19 +0200350 return -EIO;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300351 }
352
Kalle Valod6d4a582014-03-11 17:33:19 +0200353 *result = __le32_to_cpu(resp.execute.result);
354
Michal Kazior7aa7a722014-08-25 12:09:38 +0200355 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
Kalle Valod6d4a582014-03-11 17:33:19 +0200356
Kalle Valo5e3dd152013-06-12 20:52:10 +0300357 return 0;
358}
359
360int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
361{
362 struct bmi_cmd cmd;
363 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
364 u32 txlen;
365 int ret;
366
Maharaja Kennadyrajan75b34802016-08-04 19:21:51 +0530367 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
Kalle Valof0bbea92013-09-08 17:55:32 +0300368 buffer, length);
369
Kalle Valo5e3dd152013-06-12 20:52:10 +0300370 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200371 ath10k_warn(ar, "command disallowed\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300372 return -EBUSY;
373 }
374
375 while (length) {
376 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
377
378 WARN_ON_ONCE(txlen & 3);
379
380 cmd.id = __cpu_to_le32(BMI_LZ_DATA);
381 cmd.lz_data.len = __cpu_to_le32(txlen);
382 memcpy(cmd.lz_data.payload, buffer, txlen);
383
384 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
385 NULL, NULL);
386 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200387 ath10k_warn(ar, "unable to write to the device\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300388 return ret;
389 }
390
391 buffer += txlen;
392 length -= txlen;
393 }
394
395 return 0;
396}
397
398int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
399{
400 struct bmi_cmd cmd;
401 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
402 int ret;
403
Michal Kazior7aa7a722014-08-25 12:09:38 +0200404 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
Kalle Valof0bbea92013-09-08 17:55:32 +0300405 address);
406
Kalle Valo5e3dd152013-06-12 20:52:10 +0300407 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200408 ath10k_warn(ar, "command disallowed\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300409 return -EBUSY;
410 }
411
412 cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
413 cmd.lz_start.addr = __cpu_to_le32(address);
414
415 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
416 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200417 ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300418 return ret;
419 }
420
421 return 0;
422}
423
424int ath10k_bmi_fast_download(struct ath10k *ar,
425 u32 address, const void *buffer, u32 length)
426{
427 u8 trailer[4] = {};
428 u32 head_len = rounddown(length, 4);
429 u32 trailer_len = length - head_len;
430 int ret;
431
Michal Kazior7aa7a722014-08-25 12:09:38 +0200432 ath10k_dbg(ar, ATH10K_DBG_BMI,
Maharaja Kennadyrajan75b34802016-08-04 19:21:51 +0530433 "bmi fast download address 0x%x buffer 0x%pK length %d\n",
Kalle Valof0bbea92013-09-08 17:55:32 +0300434 address, buffer, length);
435
Kalle Valo5e3dd152013-06-12 20:52:10 +0300436 ret = ath10k_bmi_lz_stream_start(ar, address);
437 if (ret)
438 return ret;
439
440 /* copy the last word into a zero padded buffer */
441 if (trailer_len > 0)
442 memcpy(trailer, buffer + head_len, trailer_len);
443
444 ret = ath10k_bmi_lz_data(ar, buffer, head_len);
445 if (ret)
446 return ret;
447
448 if (trailer_len > 0)
449 ret = ath10k_bmi_lz_data(ar, trailer, 4);
450
451 if (ret != 0)
452 return ret;
453
454 /*
455 * Close compressed stream and open a new (fake) one.
456 * This serves mainly to flush Target caches.
457 */
458 ret = ath10k_bmi_lz_stream_start(ar, 0x00);
459
460 return ret;
461}