blob: 3d29b0875b3e0e0abf442d8f875fe39b7ddb9171 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "bmi.h"
19#include "hif.h"
20#include "debug.h"
21#include "htc.h"
22
Michal Kazior64d151d2013-07-16 09:38:53 +020023void ath10k_bmi_start(struct ath10k *ar)
24{
Michal Kazior7aa7a722014-08-25 12:09:38 +020025 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
Kalle Valof0bbea92013-09-08 17:55:32 +030026
Michal Kazior64d151d2013-07-16 09:38:53 +020027 ar->bmi.done_sent = false;
28}
29
Kalle Valo5e3dd152013-06-12 20:52:10 +030030int ath10k_bmi_done(struct ath10k *ar)
31{
32 struct bmi_cmd cmd;
33 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
34 int ret;
35
Michal Kazior7aa7a722014-08-25 12:09:38 +020036 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
Kalle Valof0bbea92013-09-08 17:55:32 +030037
Kalle Valo5e3dd152013-06-12 20:52:10 +030038 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +020039 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +030040 return 0;
41 }
42
43 ar->bmi.done_sent = true;
44 cmd.id = __cpu_to_le32(BMI_DONE);
45
46 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
47 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +020048 ath10k_warn(ar, "unable to write to the device: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +030049 return ret;
50 }
51
Kalle Valo5e3dd152013-06-12 20:52:10 +030052 return 0;
53}
54
55int ath10k_bmi_get_target_info(struct ath10k *ar,
56 struct bmi_target_info *target_info)
57{
58 struct bmi_cmd cmd;
59 union bmi_resp resp;
60 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
61 u32 resplen = sizeof(resp.get_target_info);
62 int ret;
63
Michal Kazior7aa7a722014-08-25 12:09:38 +020064 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
Kalle Valof0bbea92013-09-08 17:55:32 +030065
Kalle Valo5e3dd152013-06-12 20:52:10 +030066 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +020067 ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +030068 return -EBUSY;
69 }
70
71 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
72
73 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
74 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +020075 ath10k_warn(ar, "unable to get target info from device\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +030076 return ret;
77 }
78
79 if (resplen < sizeof(resp.get_target_info)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +020080 ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +030081 resplen);
82 return -EIO;
83 }
84
85 target_info->version = __le32_to_cpu(resp.get_target_info.version);
86 target_info->type = __le32_to_cpu(resp.get_target_info.type);
Kalle Valof0bbea92013-09-08 17:55:32 +030087
Kalle Valo5e3dd152013-06-12 20:52:10 +030088 return 0;
89}
90
91int ath10k_bmi_read_memory(struct ath10k *ar,
92 u32 address, void *buffer, u32 length)
93{
94 struct bmi_cmd cmd;
95 union bmi_resp resp;
96 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
97 u32 rxlen;
98 int ret;
99
Michal Kazior7aa7a722014-08-25 12:09:38 +0200100 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
Kalle Valof0bbea92013-09-08 17:55:32 +0300101 address, length);
102
Kalle Valo5e3dd152013-06-12 20:52:10 +0300103 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200104 ath10k_warn(ar, "command disallowed\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300105 return -EBUSY;
106 }
107
Kalle Valo5e3dd152013-06-12 20:52:10 +0300108 while (length) {
109 rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
110
111 cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
112 cmd.read_mem.addr = __cpu_to_le32(address);
113 cmd.read_mem.len = __cpu_to_le32(rxlen);
114
115 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
116 &resp, &rxlen);
117 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200118 ath10k_warn(ar, "unable to read from the device (%d)\n",
Michal Kaziored48b352013-07-05 16:15:10 +0300119 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300120 return ret;
121 }
122
123 memcpy(buffer, resp.read_mem.payload, rxlen);
124 address += rxlen;
125 buffer += rxlen;
126 length -= rxlen;
127 }
128
129 return 0;
130}
131
132int ath10k_bmi_write_memory(struct ath10k *ar,
133 u32 address, const void *buffer, u32 length)
134{
135 struct bmi_cmd cmd;
136 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
137 u32 txlen;
138 int ret;
139
Michal Kazior7aa7a722014-08-25 12:09:38 +0200140 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
Kalle Valof0bbea92013-09-08 17:55:32 +0300141 address, length);
142
Kalle Valo5e3dd152013-06-12 20:52:10 +0300143 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200144 ath10k_warn(ar, "command disallowed\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300145 return -EBUSY;
146 }
147
Kalle Valo5e3dd152013-06-12 20:52:10 +0300148 while (length) {
149 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
150
151 /* copy before roundup to avoid reading beyond buffer*/
152 memcpy(cmd.write_mem.payload, buffer, txlen);
153 txlen = roundup(txlen, 4);
154
155 cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
156 cmd.write_mem.addr = __cpu_to_le32(address);
157 cmd.write_mem.len = __cpu_to_le32(txlen);
158
159 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
160 NULL, NULL);
161 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200162 ath10k_warn(ar, "unable to write to the device (%d)\n",
Michal Kaziored48b352013-07-05 16:15:10 +0300163 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300164 return ret;
165 }
166
167 /* fixup roundup() so `length` zeroes out for last chunk */
168 txlen = min(txlen, length);
169
170 address += txlen;
171 buffer += txlen;
172 length -= txlen;
173 }
174
175 return 0;
176}
177
Kalle Valod6d4a582014-03-11 17:33:19 +0200178int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300179{
180 struct bmi_cmd cmd;
181 union bmi_resp resp;
182 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
183 u32 resplen = sizeof(resp.execute);
184 int ret;
185
Michal Kazior7aa7a722014-08-25 12:09:38 +0200186 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
Kalle Valod6d4a582014-03-11 17:33:19 +0200187 address, param);
Kalle Valof0bbea92013-09-08 17:55:32 +0300188
Kalle Valo5e3dd152013-06-12 20:52:10 +0300189 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200190 ath10k_warn(ar, "command disallowed\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300191 return -EBUSY;
192 }
193
Kalle Valo5e3dd152013-06-12 20:52:10 +0300194 cmd.id = __cpu_to_le32(BMI_EXECUTE);
195 cmd.execute.addr = __cpu_to_le32(address);
Kalle Valod6d4a582014-03-11 17:33:19 +0200196 cmd.execute.param = __cpu_to_le32(param);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300197
198 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
199 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200200 ath10k_warn(ar, "unable to read from the device\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300201 return ret;
202 }
203
204 if (resplen < sizeof(resp.execute)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200205 ath10k_warn(ar, "invalid execute response length (%d)\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +0300206 resplen);
Kalle Valod6d4a582014-03-11 17:33:19 +0200207 return -EIO;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300208 }
209
Kalle Valod6d4a582014-03-11 17:33:19 +0200210 *result = __le32_to_cpu(resp.execute.result);
211
Michal Kazior7aa7a722014-08-25 12:09:38 +0200212 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
Kalle Valod6d4a582014-03-11 17:33:19 +0200213
Kalle Valo5e3dd152013-06-12 20:52:10 +0300214 return 0;
215}
216
217int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
218{
219 struct bmi_cmd cmd;
220 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
221 u32 txlen;
222 int ret;
223
Michal Kazior7aa7a722014-08-25 12:09:38 +0200224 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
Kalle Valof0bbea92013-09-08 17:55:32 +0300225 buffer, length);
226
Kalle Valo5e3dd152013-06-12 20:52:10 +0300227 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200228 ath10k_warn(ar, "command disallowed\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300229 return -EBUSY;
230 }
231
232 while (length) {
233 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
234
235 WARN_ON_ONCE(txlen & 3);
236
237 cmd.id = __cpu_to_le32(BMI_LZ_DATA);
238 cmd.lz_data.len = __cpu_to_le32(txlen);
239 memcpy(cmd.lz_data.payload, buffer, txlen);
240
241 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
242 NULL, NULL);
243 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200244 ath10k_warn(ar, "unable to write to the device\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300245 return ret;
246 }
247
248 buffer += txlen;
249 length -= txlen;
250 }
251
252 return 0;
253}
254
255int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
256{
257 struct bmi_cmd cmd;
258 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
259 int ret;
260
Michal Kazior7aa7a722014-08-25 12:09:38 +0200261 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
Kalle Valof0bbea92013-09-08 17:55:32 +0300262 address);
263
Kalle Valo5e3dd152013-06-12 20:52:10 +0300264 if (ar->bmi.done_sent) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200265 ath10k_warn(ar, "command disallowed\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300266 return -EBUSY;
267 }
268
269 cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
270 cmd.lz_start.addr = __cpu_to_le32(address);
271
272 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
273 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200274 ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300275 return ret;
276 }
277
278 return 0;
279}
280
281int ath10k_bmi_fast_download(struct ath10k *ar,
282 u32 address, const void *buffer, u32 length)
283{
284 u8 trailer[4] = {};
285 u32 head_len = rounddown(length, 4);
286 u32 trailer_len = length - head_len;
287 int ret;
288
Michal Kazior7aa7a722014-08-25 12:09:38 +0200289 ath10k_dbg(ar, ATH10K_DBG_BMI,
Kalle Valof0bbea92013-09-08 17:55:32 +0300290 "bmi fast download address 0x%x buffer 0x%p length %d\n",
291 address, buffer, length);
292
Kalle Valo5e3dd152013-06-12 20:52:10 +0300293 ret = ath10k_bmi_lz_stream_start(ar, address);
294 if (ret)
295 return ret;
296
297 /* copy the last word into a zero padded buffer */
298 if (trailer_len > 0)
299 memcpy(trailer, buffer + head_len, trailer_len);
300
301 ret = ath10k_bmi_lz_data(ar, buffer, head_len);
302 if (ret)
303 return ret;
304
305 if (trailer_len > 0)
306 ret = ath10k_bmi_lz_data(ar, trailer, 4);
307
308 if (ret != 0)
309 return ret;
310
311 /*
312 * Close compressed stream and open a new (fake) one.
313 * This serves mainly to flush Target caches.
314 */
315 ret = ath10k_bmi_lz_stream_start(ar, 0x00);
316
317 return ret;
318}