blob: 17a060e8efa2250bbe8d9d63c0eb104fa5282953 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/pci.h>
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
Kalle Valo650b91f2013-11-20 10:00:49 +020022#include <linux/bitops.h>
Kalle Valo5e3dd152013-06-12 20:52:10 +030023
24#include "core.h"
25#include "debug.h"
26
27#include "targaddrs.h"
28#include "bmi.h"
29
30#include "hif.h"
31#include "htc.h"
32
33#include "ce.h"
34#include "pci.h"
35
Michal Kaziorcfe9c452013-11-25 14:06:27 +010036enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
40};
41
Kalle Valo35098462014-03-28 09:32:27 +020042enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
45};
46
Michal Kaziorcfe9c452013-11-25 14:06:27 +010047static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
Kalle Valo35098462014-03-28 09:32:27 +020048static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
Michal Kaziorcfe9c452013-11-25 14:06:27 +010049
Michal Kaziorcfe9c452013-11-25 14:06:27 +010050module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
51MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
52
Kalle Valo35098462014-03-28 09:32:27 +020053module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
54MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
55
Kalle Valo0399eca2014-03-28 09:32:21 +020056/* how long wait to wait for target to initialise, in ms */
57#define ATH10K_PCI_TARGET_WAIT 3000
Michal Kazior61c95ce2014-05-14 16:56:16 +030058#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
Kalle Valo0399eca2014-03-28 09:32:21 +020059
Kalle Valo5e3dd152013-06-12 20:52:10 +030060#define QCA988X_2_0_DEVICE_ID (0x003c)
Michal Kaziord63955b2015-01-24 12:14:49 +020061#define QCA6174_2_1_DEVICE_ID (0x003e)
Kalle Valo5e3dd152013-06-12 20:52:10 +030062
Benoit Taine9baa3c32014-08-08 15:56:03 +020063static const struct pci_device_id ath10k_pci_id_table[] = {
Kalle Valo5e3dd152013-06-12 20:52:10 +030064 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
Michal Kaziord63955b2015-01-24 12:14:49 +020065 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
Kalle Valo5e3dd152013-06-12 20:52:10 +030066 {0}
67};
68
Michal Kazior7505f7c2014-12-02 10:55:54 +020069static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
70 /* QCA988X pre 2.0 chips are not supported because they need some nasty
71 * hacks. ath10k doesn't have them and these devices crash horribly
72 * because of that.
73 */
74 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
Michal Kaziord63955b2015-01-24 12:14:49 +020075 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
76 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
77 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
78 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
79 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
Michal Kazior7505f7c2014-12-02 10:55:54 +020080};
81
Michal Kazior728f95e2014-08-22 14:33:14 +020082static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +010083static int ath10k_pci_cold_reset(struct ath10k *ar);
84static int ath10k_pci_warm_reset(struct ath10k *ar);
Michal Kaziord7fb47f2013-11-08 08:01:26 +010085static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +010086static int ath10k_pci_init_irq(struct ath10k *ar);
87static int ath10k_pci_deinit_irq(struct ath10k *ar);
88static int ath10k_pci_request_irq(struct ath10k *ar);
89static void ath10k_pci_free_irq(struct ath10k *ar);
Michal Kazior85622cd2013-11-25 14:06:22 +010090static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
91 struct ath10k_ce_pipe *rx_pipe,
92 struct bmi_xfer *xfer);
Kalle Valo5e3dd152013-06-12 20:52:10 +030093
94static const struct ce_attr host_ce_config_wlan[] = {
Kalle Valo48e9c222013-09-01 10:01:32 +030095 /* CE0: host->target HTC control and raw streams */
96 {
97 .flags = CE_ATTR_FLAGS,
98 .src_nentries = 16,
99 .src_sz_max = 256,
100 .dest_nentries = 0,
101 },
102
103 /* CE1: target->host HTT + HTC control */
104 {
105 .flags = CE_ATTR_FLAGS,
106 .src_nentries = 0,
Michal Kazior63838642015-02-09 15:04:55 +0100107 .src_sz_max = 2048,
Kalle Valo48e9c222013-09-01 10:01:32 +0300108 .dest_nentries = 512,
109 },
110
111 /* CE2: target->host WMI */
112 {
113 .flags = CE_ATTR_FLAGS,
114 .src_nentries = 0,
115 .src_sz_max = 2048,
Rajkumar Manoharan30abb332015-03-04 15:43:44 +0200116 .dest_nentries = 128,
Kalle Valo48e9c222013-09-01 10:01:32 +0300117 },
118
119 /* CE3: host->target WMI */
120 {
121 .flags = CE_ATTR_FLAGS,
122 .src_nentries = 32,
123 .src_sz_max = 2048,
124 .dest_nentries = 0,
125 },
126
127 /* CE4: host->target HTT */
128 {
129 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
130 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
131 .src_sz_max = 256,
132 .dest_nentries = 0,
133 },
134
135 /* CE5: unused */
136 {
137 .flags = CE_ATTR_FLAGS,
138 .src_nentries = 0,
139 .src_sz_max = 0,
140 .dest_nentries = 0,
141 },
142
143 /* CE6: target autonomous hif_memcpy */
144 {
145 .flags = CE_ATTR_FLAGS,
146 .src_nentries = 0,
147 .src_sz_max = 0,
148 .dest_nentries = 0,
149 },
150
151 /* CE7: ce_diag, the Diagnostic Window */
152 {
153 .flags = CE_ATTR_FLAGS,
154 .src_nentries = 2,
155 .src_sz_max = DIAG_TRANSFER_LIMIT,
156 .dest_nentries = 2,
157 },
Kalle Valo5e3dd152013-06-12 20:52:10 +0300158};
159
160/* Target firmware's Copy Engine configuration. */
161static const struct ce_pipe_config target_ce_config_wlan[] = {
Kalle Valod88effb2013-09-01 10:01:39 +0300162 /* CE0: host->target HTC control and raw streams */
163 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300164 .pipenum = __cpu_to_le32(0),
165 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
166 .nentries = __cpu_to_le32(32),
167 .nbytes_max = __cpu_to_le32(256),
168 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
169 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300170 },
171
172 /* CE1: target->host HTT + HTC control */
173 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300174 .pipenum = __cpu_to_le32(1),
175 .pipedir = __cpu_to_le32(PIPEDIR_IN),
176 .nentries = __cpu_to_le32(32),
Michal Kazior63838642015-02-09 15:04:55 +0100177 .nbytes_max = __cpu_to_le32(2048),
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300178 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
179 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300180 },
181
182 /* CE2: target->host WMI */
183 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300184 .pipenum = __cpu_to_le32(2),
185 .pipedir = __cpu_to_le32(PIPEDIR_IN),
Rajkumar Manoharan30abb332015-03-04 15:43:44 +0200186 .nentries = __cpu_to_le32(64),
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300187 .nbytes_max = __cpu_to_le32(2048),
188 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
189 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300190 },
191
192 /* CE3: host->target WMI */
193 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300194 .pipenum = __cpu_to_le32(3),
195 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
196 .nentries = __cpu_to_le32(32),
197 .nbytes_max = __cpu_to_le32(2048),
198 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
199 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300200 },
201
202 /* CE4: host->target HTT */
203 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300204 .pipenum = __cpu_to_le32(4),
205 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
206 .nentries = __cpu_to_le32(256),
207 .nbytes_max = __cpu_to_le32(256),
208 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
209 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300210 },
211
Kalle Valo5e3dd152013-06-12 20:52:10 +0300212 /* NB: 50% of src nentries, since tx has 2 frags */
Kalle Valod88effb2013-09-01 10:01:39 +0300213
214 /* CE5: unused */
215 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300216 .pipenum = __cpu_to_le32(5),
217 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
218 .nentries = __cpu_to_le32(32),
219 .nbytes_max = __cpu_to_le32(2048),
220 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
221 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300222 },
223
224 /* CE6: Reserved for target autonomous hif_memcpy */
225 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300226 .pipenum = __cpu_to_le32(6),
227 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
228 .nentries = __cpu_to_le32(32),
229 .nbytes_max = __cpu_to_le32(4096),
230 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
231 .reserved = __cpu_to_le32(0),
Kalle Valod88effb2013-09-01 10:01:39 +0300232 },
233
Kalle Valo5e3dd152013-06-12 20:52:10 +0300234 /* CE7 used only by Host */
235};
236
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300237/*
238 * Map from service/endpoint to Copy Engine.
239 * This table is derived from the CE_PCI TABLE, above.
240 * It is passed to the Target at startup for use by firmware.
241 */
242static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
243 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300244 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
245 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
246 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300247 },
248 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300249 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
250 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
251 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300252 },
253 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300254 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
255 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
256 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300257 },
258 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300259 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
260 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
261 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300262 },
263 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300264 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
265 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
266 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300267 },
268 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300269 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
270 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
271 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300272 },
273 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300274 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
275 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
276 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300277 },
278 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300279 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
280 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
281 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300282 },
283 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300284 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
285 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
286 __cpu_to_le32(3),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300287 },
288 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300289 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
290 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
291 __cpu_to_le32(2),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300292 },
293 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300294 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
295 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
296 __cpu_to_le32(0),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300297 },
298 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300299 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
300 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
301 __cpu_to_le32(1),
302 },
303 { /* not used */
304 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
305 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
306 __cpu_to_le32(0),
307 },
308 { /* not used */
309 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
310 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
311 __cpu_to_le32(1),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300312 },
313 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300314 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
315 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
316 __cpu_to_le32(4),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300317 },
318 {
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300319 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
320 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
321 __cpu_to_le32(1),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300322 },
323
324 /* (Additions here) */
325
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300326 { /* must be last */
327 __cpu_to_le32(0),
328 __cpu_to_le32(0),
329 __cpu_to_le32(0),
Michal Kaziord7bfb7a2014-08-26 19:14:02 +0300330 },
331};
332
Michal Kazior77258d42015-05-18 09:38:18 +0000333static bool ath10k_pci_is_awake(struct ath10k *ar)
334{
335 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
336 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
337 RTC_STATE_ADDRESS);
338
339 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
340}
341
342static void __ath10k_pci_wake(struct ath10k *ar)
343{
344 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
345
346 lockdep_assert_held(&ar_pci->ps_lock);
347
348 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
349 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
350
351 iowrite32(PCIE_SOC_WAKE_V_MASK,
352 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
353 PCIE_SOC_WAKE_ADDRESS);
354}
355
356static void __ath10k_pci_sleep(struct ath10k *ar)
357{
358 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
359
360 lockdep_assert_held(&ar_pci->ps_lock);
361
362 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
363 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
364
365 iowrite32(PCIE_SOC_WAKE_RESET,
366 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
367 PCIE_SOC_WAKE_ADDRESS);
368 ar_pci->ps_awake = false;
369}
370
371static int ath10k_pci_wake_wait(struct ath10k *ar)
372{
373 int tot_delay = 0;
374 int curr_delay = 5;
375
376 while (tot_delay < PCIE_WAKE_TIMEOUT) {
377 if (ath10k_pci_is_awake(ar))
378 return 0;
379
380 udelay(curr_delay);
381 tot_delay += curr_delay;
382
383 if (curr_delay < 50)
384 curr_delay += 5;
385 }
386
387 return -ETIMEDOUT;
388}
389
390static int ath10k_pci_wake(struct ath10k *ar)
391{
392 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
393 unsigned long flags;
394 int ret = 0;
395
396 spin_lock_irqsave(&ar_pci->ps_lock, flags);
397
398 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
399 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
400
401 /* This function can be called very frequently. To avoid excessive
402 * CPU stalls for MMIO reads use a cache var to hold the device state.
403 */
404 if (!ar_pci->ps_awake) {
405 __ath10k_pci_wake(ar);
406
407 ret = ath10k_pci_wake_wait(ar);
408 if (ret == 0)
409 ar_pci->ps_awake = true;
410 }
411
412 if (ret == 0) {
413 ar_pci->ps_wake_refcount++;
414 WARN_ON(ar_pci->ps_wake_refcount == 0);
415 }
416
417 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
418
419 return ret;
420}
421
422static void ath10k_pci_sleep(struct ath10k *ar)
423{
424 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
425 unsigned long flags;
426
427 spin_lock_irqsave(&ar_pci->ps_lock, flags);
428
429 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
430 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
431
432 if (WARN_ON(ar_pci->ps_wake_refcount == 0))
433 goto skip;
434
435 ar_pci->ps_wake_refcount--;
436
437 mod_timer(&ar_pci->ps_timer, jiffies +
438 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
439
440skip:
441 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
442}
443
444static void ath10k_pci_ps_timer(unsigned long ptr)
445{
446 struct ath10k *ar = (void *)ptr;
447 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
448 unsigned long flags;
449
450 spin_lock_irqsave(&ar_pci->ps_lock, flags);
451
452 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
453 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
454
455 if (ar_pci->ps_wake_refcount > 0)
456 goto skip;
457
458 __ath10k_pci_sleep(ar);
459
460skip:
461 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
462}
463
464static void ath10k_pci_sleep_sync(struct ath10k *ar)
465{
466 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
467 unsigned long flags;
468
469 del_timer_sync(&ar_pci->ps_timer);
470
471 spin_lock_irqsave(&ar_pci->ps_lock, flags);
472 WARN_ON(ar_pci->ps_wake_refcount > 0);
473 __ath10k_pci_sleep(ar);
474 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
475}
476
477void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
478{
479 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
480 int ret;
481
482 ret = ath10k_pci_wake(ar);
483 if (ret) {
484 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
485 value, offset, ret);
486 return;
487 }
488
489 iowrite32(value, ar_pci->mem + offset);
490 ath10k_pci_sleep(ar);
491}
492
493u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
494{
495 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
496 u32 val;
497 int ret;
498
499 ret = ath10k_pci_wake(ar);
500 if (ret) {
501 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
502 offset, ret);
503 return 0xffffffff;
504 }
505
506 val = ioread32(ar_pci->mem + offset);
507 ath10k_pci_sleep(ar);
508
509 return val;
510}
511
512u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
513{
514 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
515}
516
517void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
518{
519 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
520}
521
522u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
523{
524 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
525}
526
527void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
528{
529 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
530}
531
Michal Kaziore5398872013-11-25 14:06:20 +0100532static bool ath10k_pci_irq_pending(struct ath10k *ar)
533{
534 u32 cause;
535
536 /* Check if the shared legacy irq is for us */
537 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
538 PCIE_INTR_CAUSE_ADDRESS);
539 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
540 return true;
541
542 return false;
543}
544
Michal Kazior26852182013-11-25 14:06:25 +0100545static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
546{
547 /* IMPORTANT: INTR_CLR register has to be set after
548 * INTR_ENABLE is set to 0, otherwise interrupt can not be
549 * really cleared. */
550 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
551 0);
552 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
553 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
554
555 /* IMPORTANT: this extra read transaction is required to
556 * flush the posted write buffer. */
Kalle Valocfbc06a2014-09-14 12:50:23 +0300557 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
558 PCIE_INTR_ENABLE_ADDRESS);
Michal Kazior26852182013-11-25 14:06:25 +0100559}
560
561static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
562{
563 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
564 PCIE_INTR_ENABLE_ADDRESS,
565 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
566
567 /* IMPORTANT: this extra read transaction is required to
568 * flush the posted write buffer. */
Kalle Valocfbc06a2014-09-14 12:50:23 +0300569 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
570 PCIE_INTR_ENABLE_ADDRESS);
Michal Kazior26852182013-11-25 14:06:25 +0100571}
572
Michal Kazior403d6272014-08-22 14:23:31 +0200573static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
Michal Kaziorab977bd2013-11-25 14:06:26 +0100574{
Michal Kaziorab977bd2013-11-25 14:06:26 +0100575 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
576
Michal Kazior403d6272014-08-22 14:23:31 +0200577 if (ar_pci->num_msi_intrs > 1)
578 return "msi-x";
Kalle Valod8bb26b2014-09-14 12:50:33 +0300579
580 if (ar_pci->num_msi_intrs == 1)
Michal Kazior403d6272014-08-22 14:23:31 +0200581 return "msi";
Kalle Valod8bb26b2014-09-14 12:50:33 +0300582
583 return "legacy";
Michal Kaziorab977bd2013-11-25 14:06:26 +0100584}
585
Michal Kazior728f95e2014-08-22 14:33:14 +0200586static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
Michal Kaziorab977bd2013-11-25 14:06:26 +0100587{
Michal Kazior728f95e2014-08-22 14:33:14 +0200588 struct ath10k *ar = pipe->hif_ce_state;
Michal Kaziorab977bd2013-11-25 14:06:26 +0100589 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior728f95e2014-08-22 14:33:14 +0200590 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
591 struct sk_buff *skb;
592 dma_addr_t paddr;
Michal Kaziorab977bd2013-11-25 14:06:26 +0100593 int ret;
594
Michal Kazior728f95e2014-08-22 14:33:14 +0200595 lockdep_assert_held(&ar_pci->ce_lock);
596
597 skb = dev_alloc_skb(pipe->buf_sz);
598 if (!skb)
599 return -ENOMEM;
600
601 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
602
603 paddr = dma_map_single(ar->dev, skb->data,
604 skb->len + skb_tailroom(skb),
605 DMA_FROM_DEVICE);
606 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200607 ath10k_warn(ar, "failed to dma map pci rx buf\n");
Michal Kazior728f95e2014-08-22 14:33:14 +0200608 dev_kfree_skb_any(skb);
609 return -EIO;
610 }
611
Michal Kazior8582bf32015-01-24 12:14:47 +0200612 ATH10K_SKB_RXCB(skb)->paddr = paddr;
Michal Kazior728f95e2014-08-22 14:33:14 +0200613
614 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
Michal Kaziorab977bd2013-11-25 14:06:26 +0100615 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200616 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
Michal Kazior728f95e2014-08-22 14:33:14 +0200617 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
618 DMA_FROM_DEVICE);
619 dev_kfree_skb_any(skb);
Michal Kaziorab977bd2013-11-25 14:06:26 +0100620 return ret;
621 }
622
623 return 0;
624}
625
Michal Kazior728f95e2014-08-22 14:33:14 +0200626static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
Michal Kaziorab977bd2013-11-25 14:06:26 +0100627{
Michal Kazior728f95e2014-08-22 14:33:14 +0200628 struct ath10k *ar = pipe->hif_ce_state;
629 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
630 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
631 int ret, num;
632
633 lockdep_assert_held(&ar_pci->ce_lock);
634
635 if (pipe->buf_sz == 0)
636 return;
637
638 if (!ce_pipe->dest_ring)
639 return;
640
641 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
642 while (num--) {
643 ret = __ath10k_pci_rx_post_buf(pipe);
644 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200645 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
Michal Kazior728f95e2014-08-22 14:33:14 +0200646 mod_timer(&ar_pci->rx_post_retry, jiffies +
647 ATH10K_PCI_RX_POST_RETRY_MS);
648 break;
649 }
650 }
651}
652
653static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
654{
655 struct ath10k *ar = pipe->hif_ce_state;
656 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
657
658 spin_lock_bh(&ar_pci->ce_lock);
659 __ath10k_pci_rx_post_pipe(pipe);
660 spin_unlock_bh(&ar_pci->ce_lock);
661}
662
663static void ath10k_pci_rx_post(struct ath10k *ar)
664{
665 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
666 int i;
667
668 spin_lock_bh(&ar_pci->ce_lock);
669 for (i = 0; i < CE_COUNT; i++)
670 __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
671 spin_unlock_bh(&ar_pci->ce_lock);
672}
673
674static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
675{
676 struct ath10k *ar = (void *)ptr;
677
678 ath10k_pci_rx_post(ar);
Michal Kaziorab977bd2013-11-25 14:06:26 +0100679}
680
Kalle Valo5e3dd152013-06-12 20:52:10 +0300681/*
682 * Diagnostic read/write access is provided for startup/config/debug usage.
683 * Caller must guarantee proper alignment, when applicable, and single user
684 * at any moment.
685 */
686static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
687 int nbytes)
688{
689 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
690 int ret = 0;
691 u32 buf;
692 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
693 unsigned int id;
694 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200695 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300696 /* Host buffer address in CE space */
697 u32 ce_data;
698 dma_addr_t ce_data_base = 0;
699 void *data_buf = NULL;
700 int i;
701
Kalle Valoeef25402014-09-24 14:16:52 +0300702 spin_lock_bh(&ar_pci->ce_lock);
703
Kalle Valo5e3dd152013-06-12 20:52:10 +0300704 ce_diag = ar_pci->ce_diag;
705
706 /*
707 * Allocate a temporary bounce buffer to hold caller's data
708 * to be DMA'ed from Target. This guarantees
709 * 1) 4-byte alignment
710 * 2) Buffer in DMA-able space
711 */
712 orig_nbytes = nbytes;
Michal Kazior68c03242014-03-28 10:02:35 +0200713 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
714 orig_nbytes,
715 &ce_data_base,
716 GFP_ATOMIC);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300717
718 if (!data_buf) {
719 ret = -ENOMEM;
720 goto done;
721 }
722 memset(data_buf, 0, orig_nbytes);
723
724 remaining_bytes = orig_nbytes;
725 ce_data = ce_data_base;
726 while (remaining_bytes) {
727 nbytes = min_t(unsigned int, remaining_bytes,
728 DIAG_TRANSFER_LIMIT);
729
Kalle Valoeef25402014-09-24 14:16:52 +0300730 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300731 if (ret != 0)
732 goto done;
733
734 /* Request CE to send from Target(!) address to Host buffer */
735 /*
736 * The address supplied by the caller is in the
737 * Target CPU virtual address space.
738 *
739 * In order to use this address with the diagnostic CE,
740 * convert it from Target CPU virtual address space
741 * to CE address space
742 */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300743 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
744 address);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300745
Kalle Valoeef25402014-09-24 14:16:52 +0300746 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
747 0);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300748 if (ret)
749 goto done;
750
751 i = 0;
Kalle Valoeef25402014-09-24 14:16:52 +0300752 while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
753 &completed_nbytes,
754 &id) != 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300755 mdelay(1);
756 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
757 ret = -EBUSY;
758 goto done;
759 }
760 }
761
762 if (nbytes != completed_nbytes) {
763 ret = -EIO;
764 goto done;
765 }
766
Kalle Valocfbc06a2014-09-14 12:50:23 +0300767 if (buf != (u32)address) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300768 ret = -EIO;
769 goto done;
770 }
771
772 i = 0;
Kalle Valoeef25402014-09-24 14:16:52 +0300773 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
774 &completed_nbytes,
775 &id, &flags) != 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300776 mdelay(1);
777
778 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
779 ret = -EBUSY;
780 goto done;
781 }
782 }
783
784 if (nbytes != completed_nbytes) {
785 ret = -EIO;
786 goto done;
787 }
788
789 if (buf != ce_data) {
790 ret = -EIO;
791 goto done;
792 }
793
794 remaining_bytes -= nbytes;
795 address += nbytes;
796 ce_data += nbytes;
797 }
798
799done:
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300800 if (ret == 0)
801 memcpy(data, data_buf, orig_nbytes);
802 else
Michal Kazior7aa7a722014-08-25 12:09:38 +0200803 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
Kalle Valo50f87a62014-03-28 09:32:52 +0200804 address, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300805
806 if (data_buf)
Michal Kazior68c03242014-03-28 10:02:35 +0200807 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
808 ce_data_base);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300809
Kalle Valoeef25402014-09-24 14:16:52 +0300810 spin_unlock_bh(&ar_pci->ce_lock);
811
Kalle Valo5e3dd152013-06-12 20:52:10 +0300812 return ret;
813}
814
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300815static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
816{
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300817 __le32 val = 0;
818 int ret;
819
820 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
821 *value = __le32_to_cpu(val);
822
823 return ret;
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300824}
825
826static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
827 u32 src, u32 len)
828{
829 u32 host_addr, addr;
830 int ret;
831
832 host_addr = host_interest_item_address(src);
833
834 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
835 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200836 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300837 src, ret);
838 return ret;
839 }
840
841 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
842 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +0200843 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300844 addr, len, ret);
845 return ret;
846 }
847
848 return 0;
849}
850
851#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
Kalle Valo8cc7f262014-09-14 12:50:39 +0300852 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
Kalle Valo3d29a3e2014-08-25 08:37:26 +0300853
Kalle Valo5e3dd152013-06-12 20:52:10 +0300854static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
855 const void *data, int nbytes)
856{
857 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
858 int ret = 0;
859 u32 buf;
860 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
861 unsigned int id;
862 unsigned int flags;
Michal Kazior2aa39112013-08-27 13:08:02 +0200863 struct ath10k_ce_pipe *ce_diag;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300864 void *data_buf = NULL;
865 u32 ce_data; /* Host buffer address in CE space */
866 dma_addr_t ce_data_base = 0;
867 int i;
868
Kalle Valoeef25402014-09-24 14:16:52 +0300869 spin_lock_bh(&ar_pci->ce_lock);
870
Kalle Valo5e3dd152013-06-12 20:52:10 +0300871 ce_diag = ar_pci->ce_diag;
872
873 /*
874 * Allocate a temporary bounce buffer to hold caller's data
875 * to be DMA'ed to Target. This guarantees
876 * 1) 4-byte alignment
877 * 2) Buffer in DMA-able space
878 */
879 orig_nbytes = nbytes;
Michal Kazior68c03242014-03-28 10:02:35 +0200880 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
881 orig_nbytes,
882 &ce_data_base,
883 GFP_ATOMIC);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300884 if (!data_buf) {
885 ret = -ENOMEM;
886 goto done;
887 }
888
889 /* Copy caller's data to allocated DMA buf */
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300890 memcpy(data_buf, data, orig_nbytes);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300891
892 /*
893 * The address supplied by the caller is in the
894 * Target CPU virtual address space.
895 *
896 * In order to use this address with the diagnostic CE,
897 * convert it from
898 * Target CPU virtual address space
899 * to
900 * CE address space
901 */
Kalle Valo5e3dd152013-06-12 20:52:10 +0300902 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300903
904 remaining_bytes = orig_nbytes;
905 ce_data = ce_data_base;
906 while (remaining_bytes) {
907 /* FIXME: check cast */
908 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
909
910 /* Set up to receive directly into Target(!) address */
Kalle Valoeef25402014-09-24 14:16:52 +0300911 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300912 if (ret != 0)
913 goto done;
914
915 /*
916 * Request CE to send caller-supplied data that
917 * was copied to bounce buffer to Target(!) address.
918 */
Kalle Valoeef25402014-09-24 14:16:52 +0300919 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
920 nbytes, 0, 0);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300921 if (ret != 0)
922 goto done;
923
924 i = 0;
Kalle Valoeef25402014-09-24 14:16:52 +0300925 while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
926 &completed_nbytes,
927 &id) != 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300928 mdelay(1);
929
930 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
931 ret = -EBUSY;
932 goto done;
933 }
934 }
935
936 if (nbytes != completed_nbytes) {
937 ret = -EIO;
938 goto done;
939 }
940
941 if (buf != ce_data) {
942 ret = -EIO;
943 goto done;
944 }
945
946 i = 0;
Kalle Valoeef25402014-09-24 14:16:52 +0300947 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
948 &completed_nbytes,
949 &id, &flags) != 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +0300950 mdelay(1);
951
952 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
953 ret = -EBUSY;
954 goto done;
955 }
956 }
957
958 if (nbytes != completed_nbytes) {
959 ret = -EIO;
960 goto done;
961 }
962
963 if (buf != address) {
964 ret = -EIO;
965 goto done;
966 }
967
968 remaining_bytes -= nbytes;
969 address += nbytes;
970 ce_data += nbytes;
971 }
972
973done:
974 if (data_buf) {
Michal Kazior68c03242014-03-28 10:02:35 +0200975 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
976 ce_data_base);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300977 }
978
979 if (ret != 0)
Michal Kazior7aa7a722014-08-25 12:09:38 +0200980 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
Kalle Valo50f87a62014-03-28 09:32:52 +0200981 address, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300982
Kalle Valoeef25402014-09-24 14:16:52 +0300983 spin_unlock_bh(&ar_pci->ce_lock);
984
Kalle Valo5e3dd152013-06-12 20:52:10 +0300985 return ret;
986}
987
Michal Kazior0fdc14e42014-08-26 19:14:03 +0300988static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
989{
990 __le32 val = __cpu_to_le32(value);
991
992 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
993}
994
Kalle Valo5e3dd152013-06-12 20:52:10 +0300995/* Called by lower (CE) layer when a send to Target completes. */
Michal Kazior5440ce22013-09-03 15:09:58 +0200996static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300997{
998 struct ath10k *ar = ce_state->ar;
999 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2f5280d2014-02-27 18:50:05 +02001000 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
Michal Kazior1cb86d42014-11-27 11:09:38 +01001001 struct sk_buff_head list;
1002 struct sk_buff *skb;
Michal Kazior5440ce22013-09-03 15:09:58 +02001003 u32 ce_data;
1004 unsigned int nbytes;
1005 unsigned int transfer_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001006
Michal Kazior1cb86d42014-11-27 11:09:38 +01001007 __skb_queue_head_init(&list);
1008 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
1009 &nbytes, &transfer_id) == 0) {
Michal Kaziora16942e2014-02-27 18:50:04 +02001010 /* no need to call tx completion for NULL pointers */
Michal Kazior1cb86d42014-11-27 11:09:38 +01001011 if (skb == NULL)
Michal Kazior726346f2014-02-27 18:50:04 +02001012 continue;
1013
Michal Kazior1cb86d42014-11-27 11:09:38 +01001014 __skb_queue_tail(&list, skb);
Michal Kazior5440ce22013-09-03 15:09:58 +02001015 }
Michal Kazior1cb86d42014-11-27 11:09:38 +01001016
1017 while ((skb = __skb_dequeue(&list)))
1018 cb->tx_completion(ar, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001019}
1020
1021/* Called by lower (CE) layer when data is received from the Target. */
Michal Kazior5440ce22013-09-03 15:09:58 +02001022static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001023{
1024 struct ath10k *ar = ce_state->ar;
1025 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior87263e52013-08-27 13:08:01 +02001026 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
Michal Kazior2f5280d2014-02-27 18:50:05 +02001027 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001028 struct sk_buff *skb;
Michal Kazior1cb86d42014-11-27 11:09:38 +01001029 struct sk_buff_head list;
Michal Kazior5440ce22013-09-03 15:09:58 +02001030 void *transfer_context;
1031 u32 ce_data;
Michal Kazior2f5280d2014-02-27 18:50:05 +02001032 unsigned int nbytes, max_nbytes;
Michal Kazior5440ce22013-09-03 15:09:58 +02001033 unsigned int transfer_id;
1034 unsigned int flags;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001035
Michal Kazior1cb86d42014-11-27 11:09:38 +01001036 __skb_queue_head_init(&list);
Michal Kazior5440ce22013-09-03 15:09:58 +02001037 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1038 &ce_data, &nbytes, &transfer_id,
1039 &flags) == 0) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001040 skb = transfer_context;
Michal Kazior2f5280d2014-02-27 18:50:05 +02001041 max_nbytes = skb->len + skb_tailroom(skb);
Michal Kazior8582bf32015-01-24 12:14:47 +02001042 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
Michal Kazior2f5280d2014-02-27 18:50:05 +02001043 max_nbytes, DMA_FROM_DEVICE);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001044
Michal Kazior2f5280d2014-02-27 18:50:05 +02001045 if (unlikely(max_nbytes < nbytes)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001046 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
Michal Kazior2f5280d2014-02-27 18:50:05 +02001047 nbytes, max_nbytes);
1048 dev_kfree_skb_any(skb);
1049 continue;
1050 }
1051
1052 skb_put(skb, nbytes);
Michal Kazior1cb86d42014-11-27 11:09:38 +01001053 __skb_queue_tail(&list, skb);
1054 }
Michal Kaziora360e542014-09-23 10:22:54 +02001055
Michal Kazior1cb86d42014-11-27 11:09:38 +01001056 while ((skb = __skb_dequeue(&list))) {
Michal Kaziora360e542014-09-23 10:22:54 +02001057 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1058 ce_state->id, skb->len);
1059 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1060 skb->data, skb->len);
1061
Michal Kazior5f07ea42014-11-27 11:09:36 +01001062 cb->rx_completion(ar, skb);
Michal Kazior2f5280d2014-02-27 18:50:05 +02001063 }
Michal Kaziorc29a3802014-07-21 21:03:10 +03001064
Michal Kazior728f95e2014-08-22 14:33:14 +02001065 ath10k_pci_rx_post_pipe(pipe_info);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001066}
1067
Michal Kazior726346f2014-02-27 18:50:04 +02001068static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1069 struct ath10k_hif_sg_item *items, int n_items)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001070{
Kalle Valo5e3dd152013-06-12 20:52:10 +03001071 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior726346f2014-02-27 18:50:04 +02001072 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1073 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1074 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
Michal Kazior7147a132014-05-26 12:02:58 +02001075 unsigned int nentries_mask;
1076 unsigned int sw_index;
1077 unsigned int write_index;
Michal Kazior08b8aa02014-05-26 12:02:59 +02001078 int err, i = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001079
Michal Kazior726346f2014-02-27 18:50:04 +02001080 spin_lock_bh(&ar_pci->ce_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001081
Michal Kazior7147a132014-05-26 12:02:58 +02001082 nentries_mask = src_ring->nentries_mask;
1083 sw_index = src_ring->sw_index;
1084 write_index = src_ring->write_index;
1085
Michal Kazior726346f2014-02-27 18:50:04 +02001086 if (unlikely(CE_RING_DELTA(nentries_mask,
1087 write_index, sw_index - 1) < n_items)) {
1088 err = -ENOBUFS;
Michal Kazior08b8aa02014-05-26 12:02:59 +02001089 goto err;
Michal Kazior726346f2014-02-27 18:50:04 +02001090 }
1091
1092 for (i = 0; i < n_items - 1; i++) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001093 ath10k_dbg(ar, ATH10K_DBG_PCI,
Michal Kazior726346f2014-02-27 18:50:04 +02001094 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1095 i, items[i].paddr, items[i].len, n_items);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001096 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
Michal Kazior726346f2014-02-27 18:50:04 +02001097 items[i].vaddr, items[i].len);
1098
1099 err = ath10k_ce_send_nolock(ce_pipe,
1100 items[i].transfer_context,
1101 items[i].paddr,
1102 items[i].len,
1103 items[i].transfer_id,
1104 CE_SEND_FLAG_GATHER);
1105 if (err)
Michal Kazior08b8aa02014-05-26 12:02:59 +02001106 goto err;
Michal Kazior726346f2014-02-27 18:50:04 +02001107 }
1108
1109 /* `i` is equal to `n_items -1` after for() */
Kalle Valo5e3dd152013-06-12 20:52:10 +03001110
Michal Kazior7aa7a722014-08-25 12:09:38 +02001111 ath10k_dbg(ar, ATH10K_DBG_PCI,
Michal Kazior726346f2014-02-27 18:50:04 +02001112 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1113 i, items[i].paddr, items[i].len, n_items);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001114 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
Michal Kazior726346f2014-02-27 18:50:04 +02001115 items[i].vaddr, items[i].len);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001116
Michal Kazior726346f2014-02-27 18:50:04 +02001117 err = ath10k_ce_send_nolock(ce_pipe,
1118 items[i].transfer_context,
1119 items[i].paddr,
1120 items[i].len,
1121 items[i].transfer_id,
1122 0);
1123 if (err)
Michal Kazior08b8aa02014-05-26 12:02:59 +02001124 goto err;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001125
Michal Kazior08b8aa02014-05-26 12:02:59 +02001126 spin_unlock_bh(&ar_pci->ce_lock);
1127 return 0;
1128
1129err:
1130 for (; i > 0; i--)
1131 __ath10k_ce_send_revert(ce_pipe);
1132
Michal Kazior726346f2014-02-27 18:50:04 +02001133 spin_unlock_bh(&ar_pci->ce_lock);
1134 return err;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001135}
1136
Kalle Valoeef25402014-09-24 14:16:52 +03001137static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1138 size_t buf_len)
1139{
1140 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1141}
1142
Kalle Valo5e3dd152013-06-12 20:52:10 +03001143static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1144{
1145 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo50f87a62014-03-28 09:32:52 +02001146
Michal Kazior7aa7a722014-08-25 12:09:38 +02001147 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02001148
Michal Kazior3efcb3b2013-10-02 11:03:41 +02001149 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001150}
1151
Ben Greear384914b2014-08-25 08:37:32 +03001152static void ath10k_pci_dump_registers(struct ath10k *ar,
1153 struct ath10k_fw_crash_data *crash_data)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001154{
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001155 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1156 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001157
Ben Greear384914b2014-08-25 08:37:32 +03001158 lockdep_assert_held(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001159
Kalle Valo3d29a3e2014-08-25 08:37:26 +03001160 ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1161 hi_failure_state,
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001162 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001163 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001164 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001165 return;
1166 }
1167
1168 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1169
Michal Kazior7aa7a722014-08-25 12:09:38 +02001170 ath10k_err(ar, "firmware register dump:\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001171 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
Michal Kazior7aa7a722014-08-25 12:09:38 +02001172 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001173 i,
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001174 __le32_to_cpu(reg_dump_values[i]),
1175 __le32_to_cpu(reg_dump_values[i + 1]),
1176 __le32_to_cpu(reg_dump_values[i + 2]),
1177 __le32_to_cpu(reg_dump_values[i + 3]));
Michal Kazioraffd3212013-07-16 09:54:35 +02001178
Michal Kazior1bbb1192014-08-25 12:13:14 +02001179 if (!crash_data)
1180 return;
1181
Ben Greear384914b2014-08-25 08:37:32 +03001182 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001183 crash_data->registers[i] = reg_dump_values[i];
Ben Greear384914b2014-08-25 08:37:32 +03001184}
1185
Kalle Valo0e9848c2014-08-25 08:37:37 +03001186static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
Ben Greear384914b2014-08-25 08:37:32 +03001187{
1188 struct ath10k_fw_crash_data *crash_data;
1189 char uuid[50];
1190
1191 spin_lock_bh(&ar->data_lock);
1192
Ben Greearf51dbe72014-09-29 14:41:46 +03001193 ar->stats.fw_crash_counter++;
1194
Ben Greear384914b2014-08-25 08:37:32 +03001195 crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1196
1197 if (crash_data)
1198 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1199 else
1200 scnprintf(uuid, sizeof(uuid), "n/a");
1201
Michal Kazior7aa7a722014-08-25 12:09:38 +02001202 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
Kalle Valo8a0c7972014-08-25 08:37:45 +03001203 ath10k_print_driver_info(ar);
Ben Greear384914b2014-08-25 08:37:32 +03001204 ath10k_pci_dump_registers(ar, crash_data);
1205
Ben Greear384914b2014-08-25 08:37:32 +03001206 spin_unlock_bh(&ar->data_lock);
Michal Kazioraffd3212013-07-16 09:54:35 +02001207
Michal Kazior5e90de82013-10-16 16:46:05 +03001208 queue_work(ar->workqueue, &ar->restart_work);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001209}
1210
1211static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1212 int force)
1213{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001214 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02001215
Kalle Valo5e3dd152013-06-12 20:52:10 +03001216 if (!force) {
1217 int resources;
1218 /*
1219 * Decide whether to actually poll for completions, or just
1220 * wait for a later chance.
1221 * If there seem to be plenty of resources left, then just wait
1222 * since checking involves reading a CE register, which is a
1223 * relatively expensive operation.
1224 */
1225 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1226
1227 /*
1228 * If at least 50% of the total resources are still available,
1229 * don't bother checking again yet.
1230 */
1231 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1232 return;
1233 }
1234 ath10k_ce_per_engine_service(ar, pipe);
1235}
1236
Michal Kaziore799bbf2013-07-05 16:15:12 +03001237static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
1238 struct ath10k_hif_cb *callbacks)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001239{
1240 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1241
Michal Kazior7aa7a722014-08-25 12:09:38 +02001242 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001243
1244 memcpy(&ar_pci->msg_callbacks_current, callbacks,
1245 sizeof(ar_pci->msg_callbacks_current));
1246}
1247
Michal Kazior96a9d0d2013-11-08 08:01:25 +01001248static void ath10k_pci_kill_tasklet(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001249{
1250 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001251 int i;
1252
Kalle Valo5e3dd152013-06-12 20:52:10 +03001253 tasklet_kill(&ar_pci->intr_tq);
Michal Kazior103d4f52013-11-08 08:01:24 +01001254 tasklet_kill(&ar_pci->msi_fw_err);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001255
1256 for (i = 0; i < CE_COUNT; i++)
1257 tasklet_kill(&ar_pci->pipe_info[i].intr);
Michal Kazior728f95e2014-08-22 14:33:14 +02001258
1259 del_timer_sync(&ar_pci->rx_post_retry);
Michal Kazior96a9d0d2013-11-08 08:01:25 +01001260}
1261
Kalle Valo5e3dd152013-06-12 20:52:10 +03001262static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1263 u16 service_id, u8 *ul_pipe,
1264 u8 *dl_pipe, int *ul_is_polled,
1265 int *dl_is_polled)
1266{
Michal Kazior7c6aa252014-08-26 19:14:03 +03001267 const struct service_to_pipe *entry;
1268 bool ul_set = false, dl_set = false;
1269 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001270
Michal Kazior7aa7a722014-08-25 12:09:38 +02001271 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02001272
Kalle Valo5e3dd152013-06-12 20:52:10 +03001273 /* polling for received messages not supported */
1274 *dl_is_polled = 0;
1275
Michal Kazior7c6aa252014-08-26 19:14:03 +03001276 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1277 entry = &target_service_to_ce_map_wlan[i];
Kalle Valo5e3dd152013-06-12 20:52:10 +03001278
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001279 if (__le32_to_cpu(entry->service_id) != service_id)
Michal Kazior7c6aa252014-08-26 19:14:03 +03001280 continue;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001281
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001282 switch (__le32_to_cpu(entry->pipedir)) {
Michal Kazior7c6aa252014-08-26 19:14:03 +03001283 case PIPEDIR_NONE:
1284 break;
1285 case PIPEDIR_IN:
1286 WARN_ON(dl_set);
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001287 *dl_pipe = __le32_to_cpu(entry->pipenum);
Michal Kazior7c6aa252014-08-26 19:14:03 +03001288 dl_set = true;
1289 break;
1290 case PIPEDIR_OUT:
1291 WARN_ON(ul_set);
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001292 *ul_pipe = __le32_to_cpu(entry->pipenum);
Michal Kazior7c6aa252014-08-26 19:14:03 +03001293 ul_set = true;
1294 break;
1295 case PIPEDIR_INOUT:
1296 WARN_ON(dl_set);
1297 WARN_ON(ul_set);
Michal Kazior0fdc14e42014-08-26 19:14:03 +03001298 *dl_pipe = __le32_to_cpu(entry->pipenum);
1299 *ul_pipe = __le32_to_cpu(entry->pipenum);
Michal Kazior7c6aa252014-08-26 19:14:03 +03001300 dl_set = true;
1301 ul_set = true;
1302 break;
1303 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001304 }
Michal Kazior7c6aa252014-08-26 19:14:03 +03001305
1306 if (WARN_ON(!ul_set || !dl_set))
1307 return -ENOENT;
1308
Kalle Valo5e3dd152013-06-12 20:52:10 +03001309 *ul_is_polled =
1310 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1311
Michal Kazior7c6aa252014-08-26 19:14:03 +03001312 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001313}
1314
1315static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
Kalle Valo5b07e072014-09-14 12:50:06 +03001316 u8 *ul_pipe, u8 *dl_pipe)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001317{
1318 int ul_is_polled, dl_is_polled;
1319
Michal Kazior7aa7a722014-08-25 12:09:38 +02001320 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02001321
Kalle Valo5e3dd152013-06-12 20:52:10 +03001322 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1323 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1324 ul_pipe,
1325 dl_pipe,
1326 &ul_is_polled,
1327 &dl_is_polled);
1328}
1329
Michal Kazior7c0f0e32014-10-20 14:14:38 +02001330static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1331{
1332 u32 val;
1333
1334 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
1335 val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1336
1337 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
1338}
1339
1340static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1341{
1342 u32 val;
1343
1344 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
1345 val |= CORE_CTRL_PCIE_REG_31_MASK;
1346
1347 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
1348}
1349
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001350static void ath10k_pci_irq_disable(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001351{
Michal Kazior7c0f0e32014-10-20 14:14:38 +02001352 ath10k_ce_disable_interrupts(ar);
1353 ath10k_pci_disable_and_clear_legacy_irq(ar);
1354 ath10k_pci_irq_msi_fw_mask(ar);
1355}
1356
1357static void ath10k_pci_irq_sync(struct ath10k *ar)
1358{
Kalle Valo5e3dd152013-06-12 20:52:10 +03001359 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001360 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001361
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001362 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1363 synchronize_irq(ar_pci->pdev->irq + i);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001364}
1365
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001366static void ath10k_pci_irq_enable(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001367{
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001368 ath10k_ce_enable_interrupts(ar);
Michal Kaziore75db4e2014-08-28 22:14:16 +03001369 ath10k_pci_enable_legacy_irq(ar);
Michal Kazior7c0f0e32014-10-20 14:14:38 +02001370 ath10k_pci_irq_msi_fw_unmask(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001371}
1372
1373static int ath10k_pci_hif_start(struct ath10k *ar)
1374{
Janusz Dziedzic76d870e2015-05-18 09:38:16 +00001375 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior7aa7a722014-08-25 12:09:38 +02001376 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001377
Michal Kaziorec5ba4d2014-08-22 14:23:33 +02001378 ath10k_pci_irq_enable(ar);
Michal Kazior728f95e2014-08-22 14:33:14 +02001379 ath10k_pci_rx_post(ar);
Kalle Valo50f87a62014-03-28 09:32:52 +02001380
Janusz Dziedzic76d870e2015-05-18 09:38:16 +00001381 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1382 ar_pci->link_ctl);
1383
Kalle Valo5e3dd152013-06-12 20:52:10 +03001384 return 0;
1385}
1386
Michal Kazior099ac7c2014-10-28 10:32:05 +01001387static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001388{
1389 struct ath10k *ar;
Michal Kazior099ac7c2014-10-28 10:32:05 +01001390 struct ath10k_ce_pipe *ce_pipe;
1391 struct ath10k_ce_ring *ce_ring;
1392 struct sk_buff *skb;
1393 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001394
Michal Kazior099ac7c2014-10-28 10:32:05 +01001395 ar = pci_pipe->hif_ce_state;
1396 ce_pipe = pci_pipe->ce_hdl;
1397 ce_ring = ce_pipe->dest_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001398
Michal Kazior099ac7c2014-10-28 10:32:05 +01001399 if (!ce_ring)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001400 return;
1401
Michal Kazior099ac7c2014-10-28 10:32:05 +01001402 if (!pci_pipe->buf_sz)
1403 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001404
Michal Kazior099ac7c2014-10-28 10:32:05 +01001405 for (i = 0; i < ce_ring->nentries; i++) {
1406 skb = ce_ring->per_transfer_context[i];
1407 if (!skb)
1408 continue;
1409
1410 ce_ring->per_transfer_context[i] = NULL;
1411
Michal Kazior8582bf32015-01-24 12:14:47 +02001412 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
Michal Kazior099ac7c2014-10-28 10:32:05 +01001413 skb->len + skb_tailroom(skb),
Kalle Valo5e3dd152013-06-12 20:52:10 +03001414 DMA_FROM_DEVICE);
Michal Kazior099ac7c2014-10-28 10:32:05 +01001415 dev_kfree_skb_any(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001416 }
1417}
1418
Michal Kazior099ac7c2014-10-28 10:32:05 +01001419static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001420{
1421 struct ath10k *ar;
1422 struct ath10k_pci *ar_pci;
Michal Kazior099ac7c2014-10-28 10:32:05 +01001423 struct ath10k_ce_pipe *ce_pipe;
1424 struct ath10k_ce_ring *ce_ring;
1425 struct ce_desc *ce_desc;
1426 struct sk_buff *skb;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001427 unsigned int id;
Michal Kazior099ac7c2014-10-28 10:32:05 +01001428 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001429
Michal Kazior099ac7c2014-10-28 10:32:05 +01001430 ar = pci_pipe->hif_ce_state;
1431 ar_pci = ath10k_pci_priv(ar);
1432 ce_pipe = pci_pipe->ce_hdl;
1433 ce_ring = ce_pipe->src_ring;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001434
Michal Kazior099ac7c2014-10-28 10:32:05 +01001435 if (!ce_ring)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001436 return;
1437
Michal Kazior099ac7c2014-10-28 10:32:05 +01001438 if (!pci_pipe->buf_sz)
1439 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001440
Michal Kazior099ac7c2014-10-28 10:32:05 +01001441 ce_desc = ce_ring->shadow_base;
1442 if (WARN_ON(!ce_desc))
1443 return;
1444
1445 for (i = 0; i < ce_ring->nentries; i++) {
1446 skb = ce_ring->per_transfer_context[i];
1447 if (!skb)
Michal Kazior2415fc12013-11-08 08:01:32 +01001448 continue;
Michal Kazior2415fc12013-11-08 08:01:32 +01001449
Michal Kazior099ac7c2014-10-28 10:32:05 +01001450 ce_ring->per_transfer_context[i] = NULL;
1451 id = MS(__le16_to_cpu(ce_desc[i].flags),
1452 CE_DESC_FLAGS_META_DATA);
1453
Michal Kaziord84a5122014-11-27 11:09:37 +01001454 ar_pci->msg_callbacks_current.tx_completion(ar, skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001455 }
1456}
1457
1458/*
1459 * Cleanup residual buffers for device shutdown:
1460 * buffers that were enqueued for receive
1461 * buffers that were to be sent
1462 * Note: Buffers that had completed but which were
1463 * not yet processed are on a completion queue. They
1464 * are handled when the completion thread shuts down.
1465 */
1466static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1467{
1468 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1469 int pipe_num;
1470
Michal Kaziorfad6ed72013-11-08 08:01:23 +01001471 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
Michal Kazior87263e52013-08-27 13:08:01 +02001472 struct ath10k_pci_pipe *pipe_info;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001473
1474 pipe_info = &ar_pci->pipe_info[pipe_num];
1475 ath10k_pci_rx_pipe_cleanup(pipe_info);
1476 ath10k_pci_tx_pipe_cleanup(pipe_info);
1477 }
1478}
1479
1480static void ath10k_pci_ce_deinit(struct ath10k *ar)
1481{
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001482 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001483
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001484 for (i = 0; i < CE_COUNT; i++)
1485 ath10k_ce_deinit_pipe(ar, i);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001486}
1487
Michal Kazior728f95e2014-08-22 14:33:14 +02001488static void ath10k_pci_flush(struct ath10k *ar)
1489{
1490 ath10k_pci_kill_tasklet(ar);
1491 ath10k_pci_buffer_cleanup(ar);
1492}
1493
Kalle Valo5e3dd152013-06-12 20:52:10 +03001494static void ath10k_pci_hif_stop(struct ath10k *ar)
1495{
Michal Kazior77258d42015-05-18 09:38:18 +00001496 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1497 unsigned long flags;
1498
Michal Kazior7aa7a722014-08-25 12:09:38 +02001499 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
Michal Kazior32270b62013-08-02 09:15:47 +02001500
Michal Kazior10d23db2014-08-22 14:33:15 +02001501 /* Most likely the device has HTT Rx ring configured. The only way to
1502 * prevent the device from accessing (and possible corrupting) host
1503 * memory is to reset the chip now.
Michal Kaziore75db4e2014-08-28 22:14:16 +03001504 *
1505 * There's also no known way of masking MSI interrupts on the device.
1506 * For ranged MSI the CE-related interrupts can be masked. However
1507 * regardless how many MSI interrupts are assigned the first one
1508 * is always used for firmware indications (crashes) and cannot be
1509 * masked. To prevent the device from asserting the interrupt reset it
1510 * before proceeding with cleanup.
Michal Kazior10d23db2014-08-22 14:33:15 +02001511 */
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001512 ath10k_pci_warm_reset(ar);
Michal Kaziore75db4e2014-08-28 22:14:16 +03001513
1514 ath10k_pci_irq_disable(ar);
Michal Kazior7c0f0e32014-10-20 14:14:38 +02001515 ath10k_pci_irq_sync(ar);
Michal Kaziore75db4e2014-08-28 22:14:16 +03001516 ath10k_pci_flush(ar);
Michal Kazior77258d42015-05-18 09:38:18 +00001517
1518 spin_lock_irqsave(&ar_pci->ps_lock, flags);
1519 WARN_ON(ar_pci->ps_wake_refcount > 0);
1520 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001521}
1522
1523static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1524 void *req, u32 req_len,
1525 void *resp, u32 *resp_len)
1526{
1527 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior2aa39112013-08-27 13:08:02 +02001528 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1529 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1530 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1531 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001532 dma_addr_t req_paddr = 0;
1533 dma_addr_t resp_paddr = 0;
1534 struct bmi_xfer xfer = {};
1535 void *treq, *tresp = NULL;
1536 int ret = 0;
1537
Michal Kazior85622cd2013-11-25 14:06:22 +01001538 might_sleep();
1539
Kalle Valo5e3dd152013-06-12 20:52:10 +03001540 if (resp && !resp_len)
1541 return -EINVAL;
1542
1543 if (resp && resp_len && *resp_len == 0)
1544 return -EINVAL;
1545
1546 treq = kmemdup(req, req_len, GFP_KERNEL);
1547 if (!treq)
1548 return -ENOMEM;
1549
1550 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1551 ret = dma_mapping_error(ar->dev, req_paddr);
1552 if (ret)
1553 goto err_dma;
1554
1555 if (resp && resp_len) {
1556 tresp = kzalloc(*resp_len, GFP_KERNEL);
1557 if (!tresp) {
1558 ret = -ENOMEM;
1559 goto err_req;
1560 }
1561
1562 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1563 DMA_FROM_DEVICE);
1564 ret = dma_mapping_error(ar->dev, resp_paddr);
1565 if (ret)
1566 goto err_req;
1567
1568 xfer.wait_for_resp = true;
1569 xfer.resp_len = 0;
1570
Michal Kazior728f95e2014-08-22 14:33:14 +02001571 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001572 }
1573
Kalle Valo5e3dd152013-06-12 20:52:10 +03001574 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1575 if (ret)
1576 goto err_resp;
1577
Michal Kazior85622cd2013-11-25 14:06:22 +01001578 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1579 if (ret) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001580 u32 unused_buffer;
1581 unsigned int unused_nbytes;
1582 unsigned int unused_id;
1583
Kalle Valo5e3dd152013-06-12 20:52:10 +03001584 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1585 &unused_nbytes, &unused_id);
1586 } else {
1587 /* non-zero means we did not time out */
1588 ret = 0;
1589 }
1590
1591err_resp:
1592 if (resp) {
1593 u32 unused_buffer;
1594
1595 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1596 dma_unmap_single(ar->dev, resp_paddr,
1597 *resp_len, DMA_FROM_DEVICE);
1598 }
1599err_req:
1600 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1601
1602 if (ret == 0 && resp_len) {
1603 *resp_len = min(*resp_len, xfer.resp_len);
1604 memcpy(resp, tresp, xfer.resp_len);
1605 }
1606err_dma:
1607 kfree(treq);
1608 kfree(tresp);
1609
1610 return ret;
1611}
1612
Michal Kazior5440ce22013-09-03 15:09:58 +02001613static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001614{
Michal Kazior5440ce22013-09-03 15:09:58 +02001615 struct bmi_xfer *xfer;
1616 u32 ce_data;
1617 unsigned int nbytes;
1618 unsigned int transfer_id;
1619
1620 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1621 &nbytes, &transfer_id))
1622 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001623
Michal Kazior2374b182014-07-14 16:25:25 +03001624 xfer->tx_done = true;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001625}
1626
Michal Kazior5440ce22013-09-03 15:09:58 +02001627static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001628{
Michal Kazior7aa7a722014-08-25 12:09:38 +02001629 struct ath10k *ar = ce_state->ar;
Michal Kazior5440ce22013-09-03 15:09:58 +02001630 struct bmi_xfer *xfer;
1631 u32 ce_data;
1632 unsigned int nbytes;
1633 unsigned int transfer_id;
1634 unsigned int flags;
1635
1636 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1637 &nbytes, &transfer_id, &flags))
1638 return;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001639
Michal Kazior04ed9df2014-10-28 10:34:36 +01001640 if (WARN_ON_ONCE(!xfer))
1641 return;
1642
Kalle Valo5e3dd152013-06-12 20:52:10 +03001643 if (!xfer->wait_for_resp) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001644 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001645 return;
1646 }
1647
1648 xfer->resp_len = nbytes;
Michal Kazior2374b182014-07-14 16:25:25 +03001649 xfer->rx_done = true;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001650}
1651
Michal Kazior85622cd2013-11-25 14:06:22 +01001652static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1653 struct ath10k_ce_pipe *rx_pipe,
1654 struct bmi_xfer *xfer)
1655{
1656 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1657
1658 while (time_before_eq(jiffies, timeout)) {
1659 ath10k_pci_bmi_send_done(tx_pipe);
1660 ath10k_pci_bmi_recv_data(rx_pipe);
1661
Michal Kazior2374b182014-07-14 16:25:25 +03001662 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
Michal Kazior85622cd2013-11-25 14:06:22 +01001663 return 0;
1664
1665 schedule();
1666 }
1667
1668 return -ETIMEDOUT;
1669}
1670
Kalle Valo5e3dd152013-06-12 20:52:10 +03001671/*
Kalle Valo5e3dd152013-06-12 20:52:10 +03001672 * Send an interrupt to the device to wake up the Target CPU
1673 * so it has an opportunity to notice any changed state.
1674 */
1675static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1676{
Michal Kazior9e264942014-09-02 11:00:21 +03001677 u32 addr, val;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001678
Michal Kazior9e264942014-09-02 11:00:21 +03001679 addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1680 val = ath10k_pci_read32(ar, addr);
1681 val |= CORE_CTRL_CPU_INTR_MASK;
1682 ath10k_pci_write32(ar, addr, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001683
Michal Kazior1d2b48d2013-11-08 08:01:34 +01001684 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001685}
1686
Michal Kaziord63955b2015-01-24 12:14:49 +02001687static int ath10k_pci_get_num_banks(struct ath10k *ar)
1688{
1689 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1690
1691 switch (ar_pci->pdev->device) {
1692 case QCA988X_2_0_DEVICE_ID:
1693 return 1;
1694 case QCA6174_2_1_DEVICE_ID:
1695 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
1696 case QCA6174_HW_1_0_CHIP_ID_REV:
1697 case QCA6174_HW_1_1_CHIP_ID_REV:
Michal Kazior11a002e2015-04-20 09:20:41 +00001698 case QCA6174_HW_2_1_CHIP_ID_REV:
1699 case QCA6174_HW_2_2_CHIP_ID_REV:
Michal Kaziord63955b2015-01-24 12:14:49 +02001700 return 3;
1701 case QCA6174_HW_1_3_CHIP_ID_REV:
1702 return 2;
Michal Kaziord63955b2015-01-24 12:14:49 +02001703 case QCA6174_HW_3_0_CHIP_ID_REV:
1704 case QCA6174_HW_3_1_CHIP_ID_REV:
1705 case QCA6174_HW_3_2_CHIP_ID_REV:
1706 return 9;
1707 }
1708 break;
1709 }
1710
1711 ath10k_warn(ar, "unknown number of banks, assuming 1\n");
1712 return 1;
1713}
1714
Kalle Valo5e3dd152013-06-12 20:52:10 +03001715static int ath10k_pci_init_config(struct ath10k *ar)
1716{
1717 u32 interconnect_targ_addr;
1718 u32 pcie_state_targ_addr = 0;
1719 u32 pipe_cfg_targ_addr = 0;
1720 u32 svc_to_pipe_map = 0;
1721 u32 pcie_config_flags = 0;
1722 u32 ealloc_value;
1723 u32 ealloc_targ_addr;
1724 u32 flag2_value;
1725 u32 flag2_targ_addr;
1726 int ret = 0;
1727
1728 /* Download to Target the CE Config and the service-to-CE map */
1729 interconnect_targ_addr =
1730 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1731
1732 /* Supply Target-side CE configuration */
Michal Kazior9e264942014-09-02 11:00:21 +03001733 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
1734 &pcie_state_targ_addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001735 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001736 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001737 return ret;
1738 }
1739
1740 if (pcie_state_targ_addr == 0) {
1741 ret = -EIO;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001742 ath10k_err(ar, "Invalid pcie state addr\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001743 return ret;
1744 }
1745
Michal Kazior9e264942014-09-02 11:00:21 +03001746 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
Kalle Valo5e3dd152013-06-12 20:52:10 +03001747 offsetof(struct pcie_state,
Michal Kazior9e264942014-09-02 11:00:21 +03001748 pipe_cfg_addr)),
1749 &pipe_cfg_targ_addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001750 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001751 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001752 return ret;
1753 }
1754
1755 if (pipe_cfg_targ_addr == 0) {
1756 ret = -EIO;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001757 ath10k_err(ar, "Invalid pipe cfg addr\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001758 return ret;
1759 }
1760
1761 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
Kalle Valo5b07e072014-09-14 12:50:06 +03001762 target_ce_config_wlan,
1763 sizeof(target_ce_config_wlan));
Kalle Valo5e3dd152013-06-12 20:52:10 +03001764
1765 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001766 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001767 return ret;
1768 }
1769
Michal Kazior9e264942014-09-02 11:00:21 +03001770 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
Kalle Valo5e3dd152013-06-12 20:52:10 +03001771 offsetof(struct pcie_state,
Michal Kazior9e264942014-09-02 11:00:21 +03001772 svc_to_pipe_map)),
1773 &svc_to_pipe_map);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001774 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001775 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001776 return ret;
1777 }
1778
1779 if (svc_to_pipe_map == 0) {
1780 ret = -EIO;
Michal Kazior7aa7a722014-08-25 12:09:38 +02001781 ath10k_err(ar, "Invalid svc_to_pipe map\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03001782 return ret;
1783 }
1784
1785 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
Kalle Valo5b07e072014-09-14 12:50:06 +03001786 target_service_to_ce_map_wlan,
1787 sizeof(target_service_to_ce_map_wlan));
Kalle Valo5e3dd152013-06-12 20:52:10 +03001788 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001789 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001790 return ret;
1791 }
1792
Michal Kazior9e264942014-09-02 11:00:21 +03001793 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
Kalle Valo5e3dd152013-06-12 20:52:10 +03001794 offsetof(struct pcie_state,
Michal Kazior9e264942014-09-02 11:00:21 +03001795 config_flags)),
1796 &pcie_config_flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001797 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001798 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001799 return ret;
1800 }
1801
1802 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1803
Michal Kazior9e264942014-09-02 11:00:21 +03001804 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
1805 offsetof(struct pcie_state,
1806 config_flags)),
1807 pcie_config_flags);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001808 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001809 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001810 return ret;
1811 }
1812
1813 /* configure early allocation */
1814 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1815
Michal Kazior9e264942014-09-02 11:00:21 +03001816 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001817 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001818 ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001819 return ret;
1820 }
1821
1822 /* first bank is switched to IRAM */
1823 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1824 HI_EARLY_ALLOC_MAGIC_MASK);
Michal Kaziord63955b2015-01-24 12:14:49 +02001825 ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
1826 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03001827 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1828
Michal Kazior9e264942014-09-02 11:00:21 +03001829 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001830 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001831 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001832 return ret;
1833 }
1834
1835 /* Tell Target to proceed with initialization */
1836 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1837
Michal Kazior9e264942014-09-02 11:00:21 +03001838 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001839 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001840 ath10k_err(ar, "Failed to get option val: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001841 return ret;
1842 }
1843
1844 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1845
Michal Kazior9e264942014-09-02 11:00:21 +03001846 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001847 if (ret != 0) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001848 ath10k_err(ar, "Failed to set option val: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001849 return ret;
1850 }
1851
1852 return 0;
1853}
1854
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001855static int ath10k_pci_alloc_pipes(struct ath10k *ar)
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001856{
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001857 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1858 struct ath10k_pci_pipe *pipe;
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001859 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001860
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001861 for (i = 0; i < CE_COUNT; i++) {
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001862 pipe = &ar_pci->pipe_info[i];
1863 pipe->ce_hdl = &ar_pci->ce_states[i];
1864 pipe->pipe_num = i;
1865 pipe->hif_ce_state = ar;
1866
1867 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
1868 ath10k_pci_ce_send_done,
1869 ath10k_pci_ce_recv_data);
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001870 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001871 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001872 i, ret);
1873 return ret;
1874 }
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001875
1876 /* Last CE is Diagnostic Window */
1877 if (i == CE_COUNT - 1) {
1878 ar_pci->ce_diag = pipe->ce_hdl;
1879 continue;
1880 }
1881
1882 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001883 }
1884
1885 return 0;
1886}
1887
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001888static void ath10k_pci_free_pipes(struct ath10k *ar)
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001889{
1890 int i;
1891
1892 for (i = 0; i < CE_COUNT; i++)
1893 ath10k_ce_free_pipe(ar, i);
1894}
Kalle Valo5e3dd152013-06-12 20:52:10 +03001895
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001896static int ath10k_pci_init_pipes(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001897{
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001898 int i, ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001899
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001900 for (i = 0; i < CE_COUNT; i++) {
1901 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001902 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02001903 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
Michal Kazior84cbf3a2014-10-20 14:14:39 +02001904 i, ret);
Michal Kazior25d0dbc2014-03-28 10:02:38 +02001905 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001906 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001907 }
1908
Kalle Valo5e3dd152013-06-12 20:52:10 +03001909 return 0;
1910}
1911
Michal Kazior5c771e72014-08-22 14:23:34 +02001912static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001913{
Michal Kazior5c771e72014-08-22 14:23:34 +02001914 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
1915 FW_IND_EVENT_PENDING;
1916}
Kalle Valo5e3dd152013-06-12 20:52:10 +03001917
Michal Kazior5c771e72014-08-22 14:23:34 +02001918static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
1919{
1920 u32 val;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001921
Michal Kazior5c771e72014-08-22 14:23:34 +02001922 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1923 val &= ~FW_IND_EVENT_PENDING;
1924 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001925}
1926
Michal Kaziorde013572014-05-14 16:56:16 +03001927/* this function effectively clears target memory controller assert line */
1928static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1929{
1930 u32 val;
1931
1932 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1933 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1934 val | SOC_RESET_CONTROL_SI0_RST_MASK);
1935 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1936
1937 msleep(10);
1938
1939 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1940 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1941 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1942 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1943
1944 msleep(10);
1945}
1946
Michal Kazior61c16482014-10-28 10:32:06 +01001947static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001948{
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001949 u32 val;
1950
Kalle Valob39712c2014-03-28 09:32:46 +02001951 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001952
Michal Kazior61c16482014-10-28 10:32:06 +01001953 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1954 SOC_RESET_CONTROL_ADDRESS);
1955 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1956 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1957}
1958
1959static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
1960{
1961 u32 val;
1962
1963 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1964 SOC_RESET_CONTROL_ADDRESS);
1965
1966 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1967 val | SOC_RESET_CONTROL_CE_RST_MASK);
1968 msleep(10);
1969 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1970 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1971}
1972
1973static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
1974{
1975 u32 val;
1976
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001977 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1978 SOC_LF_TIMER_CONTROL0_ADDRESS);
1979 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1980 SOC_LF_TIMER_CONTROL0_ADDRESS,
1981 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
Michal Kazior61c16482014-10-28 10:32:06 +01001982}
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001983
Michal Kazior61c16482014-10-28 10:32:06 +01001984static int ath10k_pci_warm_reset(struct ath10k *ar)
1985{
1986 int ret;
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001987
Michal Kazior61c16482014-10-28 10:32:06 +01001988 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01001989
Michal Kazior61c16482014-10-28 10:32:06 +01001990 spin_lock_bh(&ar->data_lock);
1991 ar->stats.fw_warm_reset_counter++;
1992 spin_unlock_bh(&ar->data_lock);
1993
1994 ath10k_pci_irq_disable(ar);
1995
1996 /* Make sure the target CPU is not doing anything dangerous, e.g. if it
1997 * were to access copy engine while host performs copy engine reset
1998 * then it is possible for the device to confuse pci-e controller to
1999 * the point of bringing host system to a complete stop (i.e. hang).
2000 */
Michal Kaziorde013572014-05-14 16:56:16 +03002001 ath10k_pci_warm_reset_si0(ar);
Michal Kazior61c16482014-10-28 10:32:06 +01002002 ath10k_pci_warm_reset_cpu(ar);
2003 ath10k_pci_init_pipes(ar);
2004 ath10k_pci_wait_for_target_init(ar);
Michal Kaziorde013572014-05-14 16:56:16 +03002005
Michal Kazior61c16482014-10-28 10:32:06 +01002006 ath10k_pci_warm_reset_clear_lf(ar);
2007 ath10k_pci_warm_reset_ce(ar);
2008 ath10k_pci_warm_reset_cpu(ar);
2009 ath10k_pci_init_pipes(ar);
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002010
Michal Kazior61c16482014-10-28 10:32:06 +01002011 ret = ath10k_pci_wait_for_target_init(ar);
2012 if (ret) {
2013 ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2014 return ret;
2015 }
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002016
Michal Kazior7aa7a722014-08-25 12:09:38 +02002017 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002018
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002019 return 0;
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002020}
2021
Michal Kaziord63955b2015-01-24 12:14:49 +02002022static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
Michal Kazior0bc14d02014-10-28 10:32:07 +01002023{
2024 int i, ret;
2025 u32 val;
2026
Michal Kaziord63955b2015-01-24 12:14:49 +02002027 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
Michal Kazior0bc14d02014-10-28 10:32:07 +01002028
2029 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2030 * It is thus preferred to use warm reset which is safer but may not be
2031 * able to recover the device from all possible fail scenarios.
2032 *
2033 * Warm reset doesn't always work on first try so attempt it a few
2034 * times before giving up.
2035 */
2036 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2037 ret = ath10k_pci_warm_reset(ar);
2038 if (ret) {
2039 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2040 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2041 ret);
2042 continue;
2043 }
2044
2045 /* FIXME: Sometimes copy engine doesn't recover after warm
2046 * reset. In most cases this needs cold reset. In some of these
2047 * cases the device is in such a state that a cold reset may
2048 * lock up the host.
2049 *
2050 * Reading any host interest register via copy engine is
2051 * sufficient to verify if device is capable of booting
2052 * firmware blob.
2053 */
2054 ret = ath10k_pci_init_pipes(ar);
2055 if (ret) {
2056 ath10k_warn(ar, "failed to init copy engine: %d\n",
2057 ret);
2058 continue;
2059 }
2060
2061 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2062 &val);
2063 if (ret) {
2064 ath10k_warn(ar, "failed to poke copy engine: %d\n",
2065 ret);
2066 continue;
2067 }
2068
2069 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2070 return 0;
2071 }
2072
2073 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2074 ath10k_warn(ar, "refusing cold reset as requested\n");
2075 return -EPERM;
2076 }
2077
2078 ret = ath10k_pci_cold_reset(ar);
2079 if (ret) {
2080 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2081 return ret;
2082 }
2083
2084 ret = ath10k_pci_wait_for_target_init(ar);
2085 if (ret) {
2086 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2087 ret);
2088 return ret;
2089 }
2090
Michal Kaziord63955b2015-01-24 12:14:49 +02002091 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
Michal Kazior0bc14d02014-10-28 10:32:07 +01002092
2093 return 0;
2094}
2095
Michal Kaziord63955b2015-01-24 12:14:49 +02002096static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2097{
2098 int ret;
2099
2100 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2101
2102 /* FIXME: QCA6174 requires cold + warm reset to work. */
2103
2104 ret = ath10k_pci_cold_reset(ar);
2105 if (ret) {
2106 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2107 return ret;
2108 }
2109
2110 ret = ath10k_pci_wait_for_target_init(ar);
2111 if (ret) {
2112 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2113 ret);
2114 return ret;
2115 }
2116
2117 ret = ath10k_pci_warm_reset(ar);
2118 if (ret) {
2119 ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2120 return ret;
2121 }
2122
2123 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2124
2125 return 0;
2126}
2127
2128static int ath10k_pci_chip_reset(struct ath10k *ar)
2129{
2130 if (QCA_REV_988X(ar))
2131 return ath10k_pci_qca988x_chip_reset(ar);
2132 else if (QCA_REV_6174(ar))
2133 return ath10k_pci_qca6174_chip_reset(ar);
2134 else
2135 return -ENOTSUPP;
2136}
2137
Michal Kazior0bc14d02014-10-28 10:32:07 +01002138static int ath10k_pci_hif_power_up(struct ath10k *ar)
Michal Kazior8c5c5362013-07-16 09:38:50 +02002139{
Janusz Dziedzic76d870e2015-05-18 09:38:16 +00002140 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002141 int ret;
2142
Michal Kazior0bc14d02014-10-28 10:32:07 +01002143 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2144
Janusz Dziedzic76d870e2015-05-18 09:38:16 +00002145 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2146 &ar_pci->link_ctl);
2147 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2148 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2149
Michal Kazior8c5c5362013-07-16 09:38:50 +02002150 /*
2151 * Bring the target up cleanly.
2152 *
2153 * The target may be in an undefined state with an AUX-powered Target
2154 * and a Host in WoW mode. If the Host crashes, loses power, or is
2155 * restarted (without unloading the driver) then the Target is left
2156 * (aux) powered and running. On a subsequent driver load, the Target
2157 * is in an unexpected state. We try to catch that here in order to
2158 * reset the Target and retry the probe.
2159 */
Michal Kazior0bc14d02014-10-28 10:32:07 +01002160 ret = ath10k_pci_chip_reset(ar);
Michal Kazior5b2589f2013-11-08 08:01:30 +01002161 if (ret) {
Michal Kaziora2fa8802015-01-12 15:29:37 +01002162 if (ath10k_pci_has_fw_crashed(ar)) {
2163 ath10k_warn(ar, "firmware crashed during chip reset\n");
2164 ath10k_pci_fw_crashed_clear(ar);
2165 ath10k_pci_fw_crashed_dump(ar);
2166 }
2167
Michal Kazior0bc14d02014-10-28 10:32:07 +01002168 ath10k_err(ar, "failed to reset chip: %d\n", ret);
Bartosz Markowski707b1bbd2014-10-31 09:03:43 +01002169 goto err_sleep;
Michal Kazior5b2589f2013-11-08 08:01:30 +01002170 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02002171
Michal Kazior84cbf3a2014-10-20 14:14:39 +02002172 ret = ath10k_pci_init_pipes(ar);
Michal Kazior8c5c5362013-07-16 09:38:50 +02002173 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002174 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
Bartosz Markowski707b1bbd2014-10-31 09:03:43 +01002175 goto err_sleep;
Michal Kaziorab977bd2013-11-25 14:06:26 +01002176 }
2177
Michal Kazior98563d52013-11-08 08:01:33 +01002178 ret = ath10k_pci_init_config(ar);
2179 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002180 ath10k_err(ar, "failed to setup init config: %d\n", ret);
Michal Kazior5c771e72014-08-22 14:23:34 +02002181 goto err_ce;
Michal Kazior98563d52013-11-08 08:01:33 +01002182 }
Michal Kazior8c5c5362013-07-16 09:38:50 +02002183
2184 ret = ath10k_pci_wake_target_cpu(ar);
2185 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002186 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
Michal Kazior5c771e72014-08-22 14:23:34 +02002187 goto err_ce;
Michal Kazior8c5c5362013-07-16 09:38:50 +02002188 }
2189
2190 return 0;
2191
2192err_ce:
2193 ath10k_pci_ce_deinit(ar);
Michal Kazior0bc14d02014-10-28 10:32:07 +01002194
Bartosz Markowski707b1bbd2014-10-31 09:03:43 +01002195err_sleep:
Michal Kazior8c5c5362013-07-16 09:38:50 +02002196 return ret;
2197}
2198
2199static void ath10k_pci_hif_power_down(struct ath10k *ar)
2200{
Michal Kazior7aa7a722014-08-25 12:09:38 +02002201 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
Bartosz Markowski8cc8df92013-08-02 09:58:49 +02002202
Michal Kaziorc011b282014-10-28 10:32:08 +01002203 /* Currently hif_power_up performs effectively a reset and hif_stop
2204 * resets the chip as well so there's no point in resetting here.
2205 */
Michal Kazior8c5c5362013-07-16 09:38:50 +02002206}
2207
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002208#ifdef CONFIG_PM
2209
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002210static int ath10k_pci_hif_suspend(struct ath10k *ar)
2211{
Michal Kazior77258d42015-05-18 09:38:18 +00002212 /* The grace timer can still be counting down and ar->ps_awake be true.
2213 * It is known that the device may be asleep after resuming regardless
2214 * of the SoC powersave state before suspending. Hence make sure the
2215 * device is asleep before proceeding.
2216 */
2217 ath10k_pci_sleep_sync(ar);
Michal Kazior320e14b2015-03-02 13:22:13 +01002218
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002219 return 0;
2220}
2221
2222static int ath10k_pci_hif_resume(struct ath10k *ar)
2223{
2224 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2225 struct pci_dev *pdev = ar_pci->pdev;
2226 u32 val;
2227
Michal Kazior9ff4be92015-03-02 13:22:14 +01002228 /* Suspend/Resume resets the PCI configuration space, so we have to
2229 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2230 * from interfering with C3 CPU state. pci_restore_state won't help
2231 * here since it only restores the first 64 bytes pci config header.
2232 */
2233 pci_read_config_dword(pdev, 0x40, &val);
2234 if ((val & 0x0000ff00) != 0)
2235 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002236
Michal Kazior77258d42015-05-18 09:38:18 +00002237 return 0;
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002238}
2239#endif
2240
Kalle Valo5e3dd152013-06-12 20:52:10 +03002241static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
Michal Kazior726346f2014-02-27 18:50:04 +02002242 .tx_sg = ath10k_pci_hif_tx_sg,
Kalle Valoeef25402014-09-24 14:16:52 +03002243 .diag_read = ath10k_pci_hif_diag_read,
Yanbo Li9f65ad22014-11-25 12:24:48 +02002244 .diag_write = ath10k_pci_diag_write_mem,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002245 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2246 .start = ath10k_pci_hif_start,
2247 .stop = ath10k_pci_hif_stop,
2248 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2249 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2250 .send_complete_check = ath10k_pci_hif_send_complete_check,
Michal Kaziore799bbf2013-07-05 16:15:12 +03002251 .set_callbacks = ath10k_pci_hif_set_callbacks,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002252 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
Michal Kazior8c5c5362013-07-16 09:38:50 +02002253 .power_up = ath10k_pci_hif_power_up,
2254 .power_down = ath10k_pci_hif_power_down,
Yanbo Li077a3802014-11-25 12:24:33 +02002255 .read32 = ath10k_pci_read32,
2256 .write32 = ath10k_pci_write32,
Michal Kazior8cd13ca2013-07-16 09:38:54 +02002257#ifdef CONFIG_PM
2258 .suspend = ath10k_pci_hif_suspend,
2259 .resume = ath10k_pci_hif_resume,
2260#endif
Kalle Valo5e3dd152013-06-12 20:52:10 +03002261};
2262
2263static void ath10k_pci_ce_tasklet(unsigned long ptr)
2264{
Michal Kazior87263e52013-08-27 13:08:01 +02002265 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002266 struct ath10k_pci *ar_pci = pipe->ar_pci;
2267
2268 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2269}
2270
2271static void ath10k_msi_err_tasklet(unsigned long data)
2272{
2273 struct ath10k *ar = (struct ath10k *)data;
2274
Michal Kazior5c771e72014-08-22 14:23:34 +02002275 if (!ath10k_pci_has_fw_crashed(ar)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002276 ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
Michal Kazior5c771e72014-08-22 14:23:34 +02002277 return;
2278 }
2279
Michal Kazior6f3b7ff2015-01-24 12:14:52 +02002280 ath10k_pci_irq_disable(ar);
Michal Kazior5c771e72014-08-22 14:23:34 +02002281 ath10k_pci_fw_crashed_clear(ar);
2282 ath10k_pci_fw_crashed_dump(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002283}
2284
2285/*
2286 * Handler for a per-engine interrupt on a PARTICULAR CE.
2287 * This is used in cases where each CE has a private MSI interrupt.
2288 */
2289static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2290{
2291 struct ath10k *ar = arg;
2292 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2293 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2294
Dan Carpentere5742672013-06-18 10:28:46 +03002295 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002296 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
2297 ce_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002298 return IRQ_HANDLED;
2299 }
2300
2301 /*
2302 * NOTE: We are able to derive ce_id from irq because we
2303 * use a one-to-one mapping for CE's 0..5.
2304 * CE's 6 & 7 do not use interrupts at all.
2305 *
2306 * This mapping must be kept in sync with the mapping
2307 * used by firmware.
2308 */
2309 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2310 return IRQ_HANDLED;
2311}
2312
2313static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2314{
2315 struct ath10k *ar = arg;
2316 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2317
2318 tasklet_schedule(&ar_pci->msi_fw_err);
2319 return IRQ_HANDLED;
2320}
2321
2322/*
2323 * Top-level interrupt handler for all PCI interrupts from a Target.
2324 * When a block of MSI interrupts is allocated, this top-level handler
2325 * is not used; instead, we directly call the correct sub-handler.
2326 */
2327static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2328{
2329 struct ath10k *ar = arg;
2330 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2331
2332 if (ar_pci->num_msi_intrs == 0) {
Michal Kaziore5398872013-11-25 14:06:20 +01002333 if (!ath10k_pci_irq_pending(ar))
2334 return IRQ_NONE;
2335
Michal Kazior26852182013-11-25 14:06:25 +01002336 ath10k_pci_disable_and_clear_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002337 }
2338
2339 tasklet_schedule(&ar_pci->intr_tq);
2340
2341 return IRQ_HANDLED;
2342}
2343
2344static void ath10k_pci_tasklet(unsigned long data)
2345{
2346 struct ath10k *ar = (struct ath10k *)data;
2347 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2348
Michal Kazior5c771e72014-08-22 14:23:34 +02002349 if (ath10k_pci_has_fw_crashed(ar)) {
Michal Kazior6f3b7ff2015-01-24 12:14:52 +02002350 ath10k_pci_irq_disable(ar);
Michal Kazior5c771e72014-08-22 14:23:34 +02002351 ath10k_pci_fw_crashed_clear(ar);
2352 ath10k_pci_fw_crashed_dump(ar);
2353 return;
2354 }
2355
Kalle Valo5e3dd152013-06-12 20:52:10 +03002356 ath10k_ce_per_engine_service_any(ar);
2357
Michal Kazior26852182013-11-25 14:06:25 +01002358 /* Re-enable legacy irq that was disabled in the irq handler */
2359 if (ar_pci->num_msi_intrs == 0)
2360 ath10k_pci_enable_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002361}
2362
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002363static int ath10k_pci_request_irq_msix(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002364{
2365 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002366 int ret, i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002367
2368 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2369 ath10k_pci_msi_fw_handler,
2370 IRQF_SHARED, "ath10k_pci", ar);
Michal Kazior591ecdb2013-07-31 10:55:15 +02002371 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002372 ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
Michal Kazior591ecdb2013-07-31 10:55:15 +02002373 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002374 return ret;
Michal Kazior591ecdb2013-07-31 10:55:15 +02002375 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002376
2377 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2378 ret = request_irq(ar_pci->pdev->irq + i,
2379 ath10k_pci_per_engine_handler,
2380 IRQF_SHARED, "ath10k_pci", ar);
2381 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002382 ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03002383 ar_pci->pdev->irq + i, ret);
2384
Michal Kazior87b14232013-06-26 08:50:50 +02002385 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2386 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002387
Michal Kazior87b14232013-06-26 08:50:50 +02002388 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002389 return ret;
2390 }
2391 }
2392
Kalle Valo5e3dd152013-06-12 20:52:10 +03002393 return 0;
2394}
2395
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002396static int ath10k_pci_request_irq_msi(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002397{
2398 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2399 int ret;
2400
2401 ret = request_irq(ar_pci->pdev->irq,
2402 ath10k_pci_interrupt_handler,
2403 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002404 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002405 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002406 ar_pci->pdev->irq, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002407 return ret;
Kalle Valof3782742013-10-17 11:36:15 +03002408 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002409
Kalle Valo5e3dd152013-06-12 20:52:10 +03002410 return 0;
2411}
2412
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002413static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002414{
2415 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002416 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002417
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002418 ret = request_irq(ar_pci->pdev->irq,
2419 ath10k_pci_interrupt_handler,
2420 IRQF_SHARED, "ath10k_pci", ar);
Kalle Valof3782742013-10-17 11:36:15 +03002421 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002422 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002423 ar_pci->pdev->irq, ret);
Kalle Valof3782742013-10-17 11:36:15 +03002424 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002425 }
2426
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002427 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002428}
2429
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002430static int ath10k_pci_request_irq(struct ath10k *ar)
2431{
2432 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2433
2434 switch (ar_pci->num_msi_intrs) {
2435 case 0:
2436 return ath10k_pci_request_irq_legacy(ar);
2437 case 1:
2438 return ath10k_pci_request_irq_msi(ar);
2439 case MSI_NUM_REQUEST:
2440 return ath10k_pci_request_irq_msix(ar);
2441 }
2442
Michal Kazior7aa7a722014-08-25 12:09:38 +02002443 ath10k_warn(ar, "unknown irq configuration upon request\n");
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002444 return -EINVAL;
2445}
2446
2447static void ath10k_pci_free_irq(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002448{
2449 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2450 int i;
2451
2452 /* There's at least one interrupt irregardless whether its legacy INTR
2453 * or MSI or MSI-X */
2454 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2455 free_irq(ar_pci->pdev->irq + i, ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002456}
2457
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002458static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2459{
2460 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2461 int i;
2462
2463 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2464 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2465 (unsigned long)ar);
2466
2467 for (i = 0; i < CE_COUNT; i++) {
2468 ar_pci->pipe_info[i].ar_pci = ar_pci;
2469 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2470 (unsigned long)&ar_pci->pipe_info[i]);
2471 }
2472}
2473
2474static int ath10k_pci_init_irq(struct ath10k *ar)
2475{
2476 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2477 int ret;
2478
2479 ath10k_pci_init_irq_tasklets(ar);
2480
Michal Kazior403d6272014-08-22 14:23:31 +02002481 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002482 ath10k_info(ar, "limiting irq mode to: %d\n",
2483 ath10k_pci_irq_mode);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002484
2485 /* Try MSI-X */
Michal Kazior0edf2572014-08-07 11:03:29 +02002486 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002487 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
Alexander Gordeev5ad68672014-02-13 17:50:02 +02002488 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
Kalle Valo5b07e072014-09-14 12:50:06 +03002489 ar_pci->num_msi_intrs);
Alexander Gordeev5ad68672014-02-13 17:50:02 +02002490 if (ret > 0)
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002491 return 0;
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002492
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002493 /* fall-through */
2494 }
2495
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002496 /* Try MSI */
Michal Kaziorcfe9c452013-11-25 14:06:27 +01002497 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2498 ar_pci->num_msi_intrs = 1;
2499 ret = pci_enable_msi(ar_pci->pdev);
2500 if (ret == 0)
2501 return 0;
2502
2503 /* fall-through */
2504 }
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002505
2506 /* Try legacy irq
2507 *
2508 * A potential race occurs here: The CORE_BASE write
2509 * depends on target correctly decoding AXI address but
2510 * host won't know when target writes BAR to CORE_CTRL.
2511 * This write might get lost if target has NOT written BAR.
2512 * For now, fix the race by repeating the write in below
2513 * synchronization checking. */
2514 ar_pci->num_msi_intrs = 0;
2515
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002516 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2517 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002518
2519 return 0;
2520}
2521
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002522static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002523{
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002524 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2525 0);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002526}
2527
2528static int ath10k_pci_deinit_irq(struct ath10k *ar)
2529{
2530 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2531
2532 switch (ar_pci->num_msi_intrs) {
2533 case 0:
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002534 ath10k_pci_deinit_irq_legacy(ar);
2535 return 0;
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002536 case 1:
2537 /* fall-through */
2538 case MSI_NUM_REQUEST:
2539 pci_disable_msi(ar_pci->pdev);
2540 return 0;
Alexander Gordeevbb8b6212014-02-13 17:50:01 +02002541 default:
2542 pci_disable_msi(ar_pci->pdev);
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002543 }
2544
Michal Kazior7aa7a722014-08-25 12:09:38 +02002545 ath10k_warn(ar, "unknown irq configuration upon deinit\n");
Michal Kaziorfc15ca12013-11-25 14:06:21 +01002546 return -EINVAL;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002547}
2548
Michal Kaziord7fb47f2013-11-08 08:01:26 +01002549static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002550{
2551 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
Kalle Valo0399eca2014-03-28 09:32:21 +02002552 unsigned long timeout;
Kalle Valo0399eca2014-03-28 09:32:21 +02002553 u32 val;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002554
Michal Kazior7aa7a722014-08-25 12:09:38 +02002555 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002556
Kalle Valo0399eca2014-03-28 09:32:21 +02002557 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2558
2559 do {
2560 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2561
Michal Kazior7aa7a722014-08-25 12:09:38 +02002562 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2563 val);
Kalle Valo50f87a62014-03-28 09:32:52 +02002564
Kalle Valo0399eca2014-03-28 09:32:21 +02002565 /* target should never return this */
2566 if (val == 0xffffffff)
2567 continue;
2568
Michal Kazior7710cd22014-04-23 19:30:04 +03002569 /* the device has crashed so don't bother trying anymore */
2570 if (val & FW_IND_EVENT_PENDING)
2571 break;
2572
Kalle Valo0399eca2014-03-28 09:32:21 +02002573 if (val & FW_IND_INITIALIZED)
2574 break;
2575
Kalle Valo5e3dd152013-06-12 20:52:10 +03002576 if (ar_pci->num_msi_intrs == 0)
2577 /* Fix potential race by repeating CORE_BASE writes */
Michal Kaziora4282492014-10-20 14:14:37 +02002578 ath10k_pci_enable_legacy_irq(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002579
Kalle Valo0399eca2014-03-28 09:32:21 +02002580 mdelay(10);
2581 } while (time_before(jiffies, timeout));
2582
Michal Kaziora4282492014-10-20 14:14:37 +02002583 ath10k_pci_disable_and_clear_legacy_irq(ar);
Michal Kazior7c0f0e32014-10-20 14:14:38 +02002584 ath10k_pci_irq_msi_fw_mask(ar);
Michal Kaziora4282492014-10-20 14:14:37 +02002585
Michal Kazior6a4f6e12014-04-23 19:30:03 +03002586 if (val == 0xffffffff) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002587 ath10k_err(ar, "failed to read device register, device is gone\n");
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002588 return -EIO;
Michal Kazior6a4f6e12014-04-23 19:30:03 +03002589 }
2590
Michal Kazior7710cd22014-04-23 19:30:04 +03002591 if (val & FW_IND_EVENT_PENDING) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002592 ath10k_warn(ar, "device has crashed during init\n");
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002593 return -ECOMM;
Michal Kazior7710cd22014-04-23 19:30:04 +03002594 }
2595
Michal Kazior6a4f6e12014-04-23 19:30:03 +03002596 if (!(val & FW_IND_INITIALIZED)) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002597 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
Kalle Valo0399eca2014-03-28 09:32:21 +02002598 val);
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002599 return -ETIMEDOUT;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002600 }
2601
Michal Kazior7aa7a722014-08-25 12:09:38 +02002602 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002603 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002604}
2605
Michal Kaziorfc36e3f2014-02-10 17:14:22 +01002606static int ath10k_pci_cold_reset(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002607{
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002608 int i;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002609 u32 val;
2610
Michal Kazior7aa7a722014-08-25 12:09:38 +02002611 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002612
Ben Greearf51dbe72014-09-29 14:41:46 +03002613 spin_lock_bh(&ar->data_lock);
2614
2615 ar->stats.fw_cold_reset_counter++;
2616
2617 spin_unlock_bh(&ar->data_lock);
2618
Kalle Valo5e3dd152013-06-12 20:52:10 +03002619 /* Put Target, including PCIe, into RESET. */
Kalle Valoe479ed42013-09-01 10:01:53 +03002620 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002621 val |= 1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002622 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002623
2624 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002625 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002626 RTC_STATE_COLD_RESET_MASK)
2627 break;
2628 msleep(1);
2629 }
2630
2631 /* Pull Target, including PCIe, out of RESET. */
2632 val &= ~1;
Kalle Valoe479ed42013-09-01 10:01:53 +03002633 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002634
2635 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
Kalle Valoe479ed42013-09-01 10:01:53 +03002636 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
Kalle Valo5e3dd152013-06-12 20:52:10 +03002637 RTC_STATE_COLD_RESET_MASK))
2638 break;
2639 msleep(1);
2640 }
2641
Michal Kazior7aa7a722014-08-25 12:09:38 +02002642 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
Kalle Valo50f87a62014-03-28 09:32:52 +02002643
Michal Kazior5b2589f2013-11-08 08:01:30 +01002644 return 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002645}
2646
Michal Kazior2986e3e2014-08-07 11:03:30 +02002647static int ath10k_pci_claim(struct ath10k *ar)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002648{
Michal Kazior2986e3e2014-08-07 11:03:30 +02002649 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2650 struct pci_dev *pdev = ar_pci->pdev;
Michal Kazior2986e3e2014-08-07 11:03:30 +02002651 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002652
2653 pci_set_drvdata(pdev, ar);
2654
Kalle Valo5e3dd152013-06-12 20:52:10 +03002655 ret = pci_enable_device(pdev);
2656 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002657 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
Michal Kazior2986e3e2014-08-07 11:03:30 +02002658 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002659 }
2660
Kalle Valo5e3dd152013-06-12 20:52:10 +03002661 ret = pci_request_region(pdev, BAR_NUM, "ath");
2662 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002663 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
Michal Kazior2986e3e2014-08-07 11:03:30 +02002664 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002665 goto err_device;
2666 }
2667
Michal Kazior2986e3e2014-08-07 11:03:30 +02002668 /* Target expects 32 bit DMA. Enforce it. */
Kalle Valo5e3dd152013-06-12 20:52:10 +03002669 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2670 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002671 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002672 goto err_region;
2673 }
2674
2675 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2676 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002677 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
Michal Kazior2986e3e2014-08-07 11:03:30 +02002678 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002679 goto err_region;
2680 }
2681
Kalle Valo5e3dd152013-06-12 20:52:10 +03002682 pci_set_master(pdev);
2683
Kalle Valo5e3dd152013-06-12 20:52:10 +03002684 /* Arrange for access to Target SoC registers. */
Michal Kazior2986e3e2014-08-07 11:03:30 +02002685 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2686 if (!ar_pci->mem) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002687 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002688 ret = -EIO;
2689 goto err_master;
2690 }
2691
Michal Kazior7aa7a722014-08-25 12:09:38 +02002692 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
Michal Kazior2986e3e2014-08-07 11:03:30 +02002693 return 0;
2694
2695err_master:
2696 pci_clear_master(pdev);
2697
2698err_region:
2699 pci_release_region(pdev, BAR_NUM);
2700
2701err_device:
2702 pci_disable_device(pdev);
2703
2704 return ret;
2705}
2706
2707static void ath10k_pci_release(struct ath10k *ar)
2708{
2709 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2710 struct pci_dev *pdev = ar_pci->pdev;
2711
2712 pci_iounmap(pdev, ar_pci->mem);
2713 pci_release_region(pdev, BAR_NUM);
2714 pci_clear_master(pdev);
2715 pci_disable_device(pdev);
2716}
2717
Michal Kazior7505f7c2014-12-02 10:55:54 +02002718static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
2719{
2720 const struct ath10k_pci_supp_chip *supp_chip;
2721 int i;
2722 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
2723
2724 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
2725 supp_chip = &ath10k_pci_supp_chips[i];
2726
2727 if (supp_chip->dev_id == dev_id &&
2728 supp_chip->rev_id == rev_id)
2729 return true;
2730 }
2731
2732 return false;
2733}
2734
Kalle Valo5e3dd152013-06-12 20:52:10 +03002735static int ath10k_pci_probe(struct pci_dev *pdev,
2736 const struct pci_device_id *pci_dev)
2737{
Kalle Valo5e3dd152013-06-12 20:52:10 +03002738 int ret = 0;
2739 struct ath10k *ar;
2740 struct ath10k_pci *ar_pci;
Michal Kaziord63955b2015-01-24 12:14:49 +02002741 enum ath10k_hw_rev hw_rev;
Michal Kazior2986e3e2014-08-07 11:03:30 +02002742 u32 chip_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002743
Michal Kaziord63955b2015-01-24 12:14:49 +02002744 switch (pci_dev->device) {
2745 case QCA988X_2_0_DEVICE_ID:
2746 hw_rev = ATH10K_HW_QCA988X;
2747 break;
2748 case QCA6174_2_1_DEVICE_ID:
2749 hw_rev = ATH10K_HW_QCA6174;
2750 break;
2751 default:
2752 WARN_ON(1);
2753 return -ENOTSUPP;
2754 }
2755
2756 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
2757 hw_rev, &ath10k_pci_hif_ops);
Michal Kaziore7b54192014-08-07 11:03:27 +02002758 if (!ar) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002759 dev_err(&pdev->dev, "failed to allocate core\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002760 return -ENOMEM;
Michal Kaziore7b54192014-08-07 11:03:27 +02002761 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03002762
Michal Kazior7aa7a722014-08-25 12:09:38 +02002763 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
2764
Michal Kaziore7b54192014-08-07 11:03:27 +02002765 ar_pci = ath10k_pci_priv(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002766 ar_pci->pdev = pdev;
2767 ar_pci->dev = &pdev->dev;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002768 ar_pci->ar = ar;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002769
Michal Kaziorde57e2c2015-04-17 09:19:17 +00002770 if (pdev->subsystem_vendor || pdev->subsystem_device)
2771 scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
2772 "%04x:%04x:%04x:%04x",
2773 pdev->vendor, pdev->device,
2774 pdev->subsystem_vendor, pdev->subsystem_device);
2775
Kalle Valo5e3dd152013-06-12 20:52:10 +03002776 spin_lock_init(&ar_pci->ce_lock);
Michal Kazior77258d42015-05-18 09:38:18 +00002777 spin_lock_init(&ar_pci->ps_lock);
2778
Michal Kazior728f95e2014-08-22 14:33:14 +02002779 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2780 (unsigned long)ar);
Michal Kazior77258d42015-05-18 09:38:18 +00002781 setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
2782 (unsigned long)ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002783
Michal Kazior2986e3e2014-08-07 11:03:30 +02002784 ret = ath10k_pci_claim(ar);
Kalle Valoe01ae682013-09-01 11:22:14 +03002785 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002786 ath10k_err(ar, "failed to claim device: %d\n", ret);
Michal Kaziore7b54192014-08-07 11:03:27 +02002787 goto err_core_destroy;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002788 }
2789
Michal Kazior84cbf3a2014-10-20 14:14:39 +02002790 ret = ath10k_pci_alloc_pipes(ar);
Michal Kazior25d0dbc2014-03-28 10:02:38 +02002791 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002792 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
2793 ret);
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002794 goto err_sleep;
Michal Kazior25d0dbc2014-03-28 10:02:38 +02002795 }
2796
Michal Kazior403d6272014-08-22 14:23:31 +02002797 ath10k_pci_ce_deinit(ar);
Michal Kazior7c0f0e32014-10-20 14:14:38 +02002798 ath10k_pci_irq_disable(ar);
Michal Kazior5c771e72014-08-22 14:23:34 +02002799
Michal Kazior403d6272014-08-22 14:23:31 +02002800 ret = ath10k_pci_init_irq(ar);
2801 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002802 ath10k_err(ar, "failed to init irqs: %d\n", ret);
Michal Kazior84cbf3a2014-10-20 14:14:39 +02002803 goto err_free_pipes;
Michal Kazior403d6272014-08-22 14:23:31 +02002804 }
2805
Michal Kazior7aa7a722014-08-25 12:09:38 +02002806 ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
Michal Kazior403d6272014-08-22 14:23:31 +02002807 ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
2808 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
2809
Michal Kazior5c771e72014-08-22 14:23:34 +02002810 ret = ath10k_pci_request_irq(ar);
Michal Kazior403d6272014-08-22 14:23:31 +02002811 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002812 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
Michal Kazior403d6272014-08-22 14:23:31 +02002813 goto err_deinit_irq;
2814 }
2815
Michal Kazior1a7fecb2015-01-24 12:14:48 +02002816 ret = ath10k_pci_chip_reset(ar);
2817 if (ret) {
2818 ath10k_err(ar, "failed to reset chip: %d\n", ret);
2819 goto err_free_irq;
2820 }
2821
2822 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2823 if (chip_id == 0xffffffff) {
2824 ath10k_err(ar, "failed to get chip id\n");
2825 goto err_free_irq;
2826 }
2827
2828 if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
2829 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
2830 pdev->device, chip_id);
Michal Kaziord9585a92015-04-10 13:01:27 +00002831 goto err_free_irq;
Michal Kazior1a7fecb2015-01-24 12:14:48 +02002832 }
2833
Kalle Valoe01ae682013-09-01 11:22:14 +03002834 ret = ath10k_core_register(ar, chip_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002835 if (ret) {
Michal Kazior7aa7a722014-08-25 12:09:38 +02002836 ath10k_err(ar, "failed to register driver core: %d\n", ret);
Michal Kazior5c771e72014-08-22 14:23:34 +02002837 goto err_free_irq;
Kalle Valo5e3dd152013-06-12 20:52:10 +03002838 }
2839
2840 return 0;
2841
Michal Kazior5c771e72014-08-22 14:23:34 +02002842err_free_irq:
2843 ath10k_pci_free_irq(ar);
Michal Kazior21396272014-08-28 10:24:40 +02002844 ath10k_pci_kill_tasklet(ar);
Michal Kazior5c771e72014-08-22 14:23:34 +02002845
Michal Kazior403d6272014-08-22 14:23:31 +02002846err_deinit_irq:
2847 ath10k_pci_deinit_irq(ar);
2848
Michal Kazior84cbf3a2014-10-20 14:14:39 +02002849err_free_pipes:
2850 ath10k_pci_free_pipes(ar);
Michal Kazior2986e3e2014-08-07 11:03:30 +02002851
Michal Kaziorc0c378f2014-08-07 11:03:28 +02002852err_sleep:
Michal Kazior2986e3e2014-08-07 11:03:30 +02002853 ath10k_pci_release(ar);
2854
Michal Kaziore7b54192014-08-07 11:03:27 +02002855err_core_destroy:
Kalle Valo5e3dd152013-06-12 20:52:10 +03002856 ath10k_core_destroy(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002857
2858 return ret;
2859}
2860
2861static void ath10k_pci_remove(struct pci_dev *pdev)
2862{
2863 struct ath10k *ar = pci_get_drvdata(pdev);
2864 struct ath10k_pci *ar_pci;
2865
Michal Kazior7aa7a722014-08-25 12:09:38 +02002866 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +03002867
2868 if (!ar)
2869 return;
2870
2871 ar_pci = ath10k_pci_priv(ar);
2872
2873 if (!ar_pci)
2874 return;
2875
Kalle Valo5e3dd152013-06-12 20:52:10 +03002876 ath10k_core_unregister(ar);
Michal Kazior5c771e72014-08-22 14:23:34 +02002877 ath10k_pci_free_irq(ar);
Michal Kazior21396272014-08-28 10:24:40 +02002878 ath10k_pci_kill_tasklet(ar);
Michal Kazior403d6272014-08-22 14:23:31 +02002879 ath10k_pci_deinit_irq(ar);
2880 ath10k_pci_ce_deinit(ar);
Michal Kazior84cbf3a2014-10-20 14:14:39 +02002881 ath10k_pci_free_pipes(ar);
Michal Kazior77258d42015-05-18 09:38:18 +00002882 ath10k_pci_sleep_sync(ar);
Michal Kazior2986e3e2014-08-07 11:03:30 +02002883 ath10k_pci_release(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002884 ath10k_core_destroy(ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002885}
2886
Kalle Valo5e3dd152013-06-12 20:52:10 +03002887MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2888
2889static struct pci_driver ath10k_pci_driver = {
2890 .name = "ath10k_pci",
2891 .id_table = ath10k_pci_id_table,
2892 .probe = ath10k_pci_probe,
2893 .remove = ath10k_pci_remove,
Kalle Valo5e3dd152013-06-12 20:52:10 +03002894};
2895
2896static int __init ath10k_pci_init(void)
2897{
2898 int ret;
2899
2900 ret = pci_register_driver(&ath10k_pci_driver);
2901 if (ret)
Michal Kazior7aa7a722014-08-25 12:09:38 +02002902 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
2903 ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002904
2905 return ret;
2906}
2907module_init(ath10k_pci_init);
2908
2909static void __exit ath10k_pci_exit(void)
2910{
2911 pci_unregister_driver(&ath10k_pci_driver);
2912}
2913
2914module_exit(ath10k_pci_exit);
2915
2916MODULE_AUTHOR("Qualcomm Atheros");
2917MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2918MODULE_LICENSE("Dual BSD/GPL");
Bartosz Markowski5c427f52015-02-18 13:16:37 +01002919
2920/* QCA988x 2.0 firmware files */
Bartosz Markowski8026cae2014-10-06 14:16:41 +02002921MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2922MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
2923MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
Bartosz Markowski5c427f52015-02-18 13:16:37 +01002924MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
Kalle Valo53513c32015-03-25 13:12:42 +02002925MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002926MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
Bartosz Markowski5c427f52015-02-18 13:16:37 +01002927
2928/* QCA6174 2.1 firmware files */
2929MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
2930MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
2931
2932/* QCA6174 3.1 firmware files */
2933MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
2934MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);