Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005-2011 Atheros Communications Inc. |
| 3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. |
| 4 | * |
| 5 | * Permission to use, copy, modify, and/or distribute this software for any |
| 6 | * purpose with or without fee is hereby granted, provided that the above |
| 7 | * copyright notice and this permission notice appear in all copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
| 15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/pci.h> |
| 19 | #include <linux/module.h> |
| 20 | #include <linux/interrupt.h> |
| 21 | #include <linux/spinlock.h> |
Kalle Valo | 650b91f | 2013-11-20 10:00:49 +0200 | [diff] [blame] | 22 | #include <linux/bitops.h> |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 23 | |
| 24 | #include "core.h" |
| 25 | #include "debug.h" |
| 26 | |
| 27 | #include "targaddrs.h" |
| 28 | #include "bmi.h" |
| 29 | |
| 30 | #include "hif.h" |
| 31 | #include "htc.h" |
| 32 | |
| 33 | #include "ce.h" |
| 34 | #include "pci.h" |
| 35 | |
Michal Kazior | cfe9c45 | 2013-11-25 14:06:27 +0100 | [diff] [blame] | 36 | enum ath10k_pci_irq_mode { |
| 37 | ATH10K_PCI_IRQ_AUTO = 0, |
| 38 | ATH10K_PCI_IRQ_LEGACY = 1, |
| 39 | ATH10K_PCI_IRQ_MSI = 2, |
| 40 | }; |
| 41 | |
Bartosz Markowski | 8cc8df9 | 2013-08-02 09:58:49 +0200 | [diff] [blame] | 42 | static unsigned int ath10k_target_ps; |
Michal Kazior | cfe9c45 | 2013-11-25 14:06:27 +0100 | [diff] [blame] | 43 | static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; |
| 44 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 45 | module_param(ath10k_target_ps, uint, 0644); |
| 46 | MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); |
| 47 | |
Michal Kazior | cfe9c45 | 2013-11-25 14:06:27 +0100 | [diff] [blame] | 48 | module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); |
| 49 | MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); |
| 50 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 51 | #define QCA988X_2_0_DEVICE_ID (0x003c) |
| 52 | |
| 53 | static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 54 | { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ |
| 55 | {0} |
| 56 | }; |
| 57 | |
| 58 | static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, |
| 59 | u32 *data); |
| 60 | |
| 61 | static void ath10k_pci_process_ce(struct ath10k *ar); |
| 62 | static int ath10k_pci_post_rx(struct ath10k *ar); |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 63 | static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 64 | int num); |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 65 | static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info); |
Michal Kazior | fc36e3f | 2014-02-10 17:14:22 +0100 | [diff] [blame] | 66 | static int ath10k_pci_cold_reset(struct ath10k *ar); |
| 67 | static int ath10k_pci_warm_reset(struct ath10k *ar); |
Michal Kazior | d7fb47f | 2013-11-08 08:01:26 +0100 | [diff] [blame] | 68 | static int ath10k_pci_wait_for_target_init(struct ath10k *ar); |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 69 | static int ath10k_pci_init_irq(struct ath10k *ar); |
| 70 | static int ath10k_pci_deinit_irq(struct ath10k *ar); |
| 71 | static int ath10k_pci_request_irq(struct ath10k *ar); |
| 72 | static void ath10k_pci_free_irq(struct ath10k *ar); |
Michal Kazior | 85622cd | 2013-11-25 14:06:22 +0100 | [diff] [blame] | 73 | static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, |
| 74 | struct ath10k_ce_pipe *rx_pipe, |
| 75 | struct bmi_xfer *xfer); |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 76 | static void ath10k_pci_cleanup_ce(struct ath10k *ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 77 | |
| 78 | static const struct ce_attr host_ce_config_wlan[] = { |
Kalle Valo | 48e9c22 | 2013-09-01 10:01:32 +0300 | [diff] [blame] | 79 | /* CE0: host->target HTC control and raw streams */ |
| 80 | { |
| 81 | .flags = CE_ATTR_FLAGS, |
| 82 | .src_nentries = 16, |
| 83 | .src_sz_max = 256, |
| 84 | .dest_nentries = 0, |
| 85 | }, |
| 86 | |
| 87 | /* CE1: target->host HTT + HTC control */ |
| 88 | { |
| 89 | .flags = CE_ATTR_FLAGS, |
| 90 | .src_nentries = 0, |
| 91 | .src_sz_max = 512, |
| 92 | .dest_nentries = 512, |
| 93 | }, |
| 94 | |
| 95 | /* CE2: target->host WMI */ |
| 96 | { |
| 97 | .flags = CE_ATTR_FLAGS, |
| 98 | .src_nentries = 0, |
| 99 | .src_sz_max = 2048, |
| 100 | .dest_nentries = 32, |
| 101 | }, |
| 102 | |
| 103 | /* CE3: host->target WMI */ |
| 104 | { |
| 105 | .flags = CE_ATTR_FLAGS, |
| 106 | .src_nentries = 32, |
| 107 | .src_sz_max = 2048, |
| 108 | .dest_nentries = 0, |
| 109 | }, |
| 110 | |
| 111 | /* CE4: host->target HTT */ |
| 112 | { |
| 113 | .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, |
| 114 | .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, |
| 115 | .src_sz_max = 256, |
| 116 | .dest_nentries = 0, |
| 117 | }, |
| 118 | |
| 119 | /* CE5: unused */ |
| 120 | { |
| 121 | .flags = CE_ATTR_FLAGS, |
| 122 | .src_nentries = 0, |
| 123 | .src_sz_max = 0, |
| 124 | .dest_nentries = 0, |
| 125 | }, |
| 126 | |
| 127 | /* CE6: target autonomous hif_memcpy */ |
| 128 | { |
| 129 | .flags = CE_ATTR_FLAGS, |
| 130 | .src_nentries = 0, |
| 131 | .src_sz_max = 0, |
| 132 | .dest_nentries = 0, |
| 133 | }, |
| 134 | |
| 135 | /* CE7: ce_diag, the Diagnostic Window */ |
| 136 | { |
| 137 | .flags = CE_ATTR_FLAGS, |
| 138 | .src_nentries = 2, |
| 139 | .src_sz_max = DIAG_TRANSFER_LIMIT, |
| 140 | .dest_nentries = 2, |
| 141 | }, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 142 | }; |
| 143 | |
| 144 | /* Target firmware's Copy Engine configuration. */ |
| 145 | static const struct ce_pipe_config target_ce_config_wlan[] = { |
Kalle Valo | d88effb | 2013-09-01 10:01:39 +0300 | [diff] [blame] | 146 | /* CE0: host->target HTC control and raw streams */ |
| 147 | { |
| 148 | .pipenum = 0, |
| 149 | .pipedir = PIPEDIR_OUT, |
| 150 | .nentries = 32, |
| 151 | .nbytes_max = 256, |
| 152 | .flags = CE_ATTR_FLAGS, |
| 153 | .reserved = 0, |
| 154 | }, |
| 155 | |
| 156 | /* CE1: target->host HTT + HTC control */ |
| 157 | { |
| 158 | .pipenum = 1, |
| 159 | .pipedir = PIPEDIR_IN, |
| 160 | .nentries = 32, |
| 161 | .nbytes_max = 512, |
| 162 | .flags = CE_ATTR_FLAGS, |
| 163 | .reserved = 0, |
| 164 | }, |
| 165 | |
| 166 | /* CE2: target->host WMI */ |
| 167 | { |
| 168 | .pipenum = 2, |
| 169 | .pipedir = PIPEDIR_IN, |
| 170 | .nentries = 32, |
| 171 | .nbytes_max = 2048, |
| 172 | .flags = CE_ATTR_FLAGS, |
| 173 | .reserved = 0, |
| 174 | }, |
| 175 | |
| 176 | /* CE3: host->target WMI */ |
| 177 | { |
| 178 | .pipenum = 3, |
| 179 | .pipedir = PIPEDIR_OUT, |
| 180 | .nentries = 32, |
| 181 | .nbytes_max = 2048, |
| 182 | .flags = CE_ATTR_FLAGS, |
| 183 | .reserved = 0, |
| 184 | }, |
| 185 | |
| 186 | /* CE4: host->target HTT */ |
| 187 | { |
| 188 | .pipenum = 4, |
| 189 | .pipedir = PIPEDIR_OUT, |
| 190 | .nentries = 256, |
| 191 | .nbytes_max = 256, |
| 192 | .flags = CE_ATTR_FLAGS, |
| 193 | .reserved = 0, |
| 194 | }, |
| 195 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 196 | /* NB: 50% of src nentries, since tx has 2 frags */ |
Kalle Valo | d88effb | 2013-09-01 10:01:39 +0300 | [diff] [blame] | 197 | |
| 198 | /* CE5: unused */ |
| 199 | { |
| 200 | .pipenum = 5, |
| 201 | .pipedir = PIPEDIR_OUT, |
| 202 | .nentries = 32, |
| 203 | .nbytes_max = 2048, |
| 204 | .flags = CE_ATTR_FLAGS, |
| 205 | .reserved = 0, |
| 206 | }, |
| 207 | |
| 208 | /* CE6: Reserved for target autonomous hif_memcpy */ |
| 209 | { |
| 210 | .pipenum = 6, |
| 211 | .pipedir = PIPEDIR_INOUT, |
| 212 | .nentries = 32, |
| 213 | .nbytes_max = 4096, |
| 214 | .flags = CE_ATTR_FLAGS, |
| 215 | .reserved = 0, |
| 216 | }, |
| 217 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 218 | /* CE7 used only by Host */ |
| 219 | }; |
| 220 | |
Michal Kazior | e539887 | 2013-11-25 14:06:20 +0100 | [diff] [blame] | 221 | static bool ath10k_pci_irq_pending(struct ath10k *ar) |
| 222 | { |
| 223 | u32 cause; |
| 224 | |
| 225 | /* Check if the shared legacy irq is for us */ |
| 226 | cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
| 227 | PCIE_INTR_CAUSE_ADDRESS); |
| 228 | if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) |
| 229 | return true; |
| 230 | |
| 231 | return false; |
| 232 | } |
| 233 | |
Michal Kazior | 2685218 | 2013-11-25 14:06:25 +0100 | [diff] [blame] | 234 | static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) |
| 235 | { |
| 236 | /* IMPORTANT: INTR_CLR register has to be set after |
| 237 | * INTR_ENABLE is set to 0, otherwise interrupt can not be |
| 238 | * really cleared. */ |
| 239 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, |
| 240 | 0); |
| 241 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, |
| 242 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); |
| 243 | |
| 244 | /* IMPORTANT: this extra read transaction is required to |
| 245 | * flush the posted write buffer. */ |
| 246 | (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
| 247 | PCIE_INTR_ENABLE_ADDRESS); |
| 248 | } |
| 249 | |
| 250 | static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) |
| 251 | { |
| 252 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + |
| 253 | PCIE_INTR_ENABLE_ADDRESS, |
| 254 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); |
| 255 | |
| 256 | /* IMPORTANT: this extra read transaction is required to |
| 257 | * flush the posted write buffer. */ |
| 258 | (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
| 259 | PCIE_INTR_ENABLE_ADDRESS); |
| 260 | } |
| 261 | |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 262 | static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg) |
| 263 | { |
| 264 | struct ath10k *ar = arg; |
| 265 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 266 | |
| 267 | if (ar_pci->num_msi_intrs == 0) { |
| 268 | if (!ath10k_pci_irq_pending(ar)) |
| 269 | return IRQ_NONE; |
| 270 | |
| 271 | ath10k_pci_disable_and_clear_legacy_irq(ar); |
| 272 | } |
| 273 | |
| 274 | tasklet_schedule(&ar_pci->early_irq_tasklet); |
| 275 | |
| 276 | return IRQ_HANDLED; |
| 277 | } |
| 278 | |
| 279 | static int ath10k_pci_request_early_irq(struct ath10k *ar) |
| 280 | { |
| 281 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 282 | int ret; |
| 283 | |
| 284 | /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first |
| 285 | * interrupt from irq vector is triggered in all cases for FW |
| 286 | * indication/errors */ |
| 287 | ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler, |
| 288 | IRQF_SHARED, "ath10k_pci (early)", ar); |
| 289 | if (ret) { |
| 290 | ath10k_warn("failed to request early irq: %d\n", ret); |
| 291 | return ret; |
| 292 | } |
| 293 | |
| 294 | return 0; |
| 295 | } |
| 296 | |
| 297 | static void ath10k_pci_free_early_irq(struct ath10k *ar) |
| 298 | { |
| 299 | free_irq(ath10k_pci_priv(ar)->pdev->irq, ar); |
| 300 | } |
| 301 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 302 | /* |
| 303 | * Diagnostic read/write access is provided for startup/config/debug usage. |
| 304 | * Caller must guarantee proper alignment, when applicable, and single user |
| 305 | * at any moment. |
| 306 | */ |
| 307 | static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, |
| 308 | int nbytes) |
| 309 | { |
| 310 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 311 | int ret = 0; |
| 312 | u32 buf; |
| 313 | unsigned int completed_nbytes, orig_nbytes, remaining_bytes; |
| 314 | unsigned int id; |
| 315 | unsigned int flags; |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 316 | struct ath10k_ce_pipe *ce_diag; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 317 | /* Host buffer address in CE space */ |
| 318 | u32 ce_data; |
| 319 | dma_addr_t ce_data_base = 0; |
| 320 | void *data_buf = NULL; |
| 321 | int i; |
| 322 | |
| 323 | /* |
| 324 | * This code cannot handle reads to non-memory space. Redirect to the |
| 325 | * register read fn but preserve the multi word read capability of |
| 326 | * this fn |
| 327 | */ |
| 328 | if (address < DRAM_BASE_ADDRESS) { |
| 329 | if (!IS_ALIGNED(address, 4) || |
| 330 | !IS_ALIGNED((unsigned long)data, 4)) |
| 331 | return -EIO; |
| 332 | |
| 333 | while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access( |
| 334 | ar, address, (u32 *)data)) == 0)) { |
| 335 | nbytes -= sizeof(u32); |
| 336 | address += sizeof(u32); |
| 337 | data += sizeof(u32); |
| 338 | } |
| 339 | return ret; |
| 340 | } |
| 341 | |
| 342 | ce_diag = ar_pci->ce_diag; |
| 343 | |
| 344 | /* |
| 345 | * Allocate a temporary bounce buffer to hold caller's data |
| 346 | * to be DMA'ed from Target. This guarantees |
| 347 | * 1) 4-byte alignment |
| 348 | * 2) Buffer in DMA-able space |
| 349 | */ |
| 350 | orig_nbytes = nbytes; |
| 351 | data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, |
| 352 | orig_nbytes, |
| 353 | &ce_data_base); |
| 354 | |
| 355 | if (!data_buf) { |
| 356 | ret = -ENOMEM; |
| 357 | goto done; |
| 358 | } |
| 359 | memset(data_buf, 0, orig_nbytes); |
| 360 | |
| 361 | remaining_bytes = orig_nbytes; |
| 362 | ce_data = ce_data_base; |
| 363 | while (remaining_bytes) { |
| 364 | nbytes = min_t(unsigned int, remaining_bytes, |
| 365 | DIAG_TRANSFER_LIMIT); |
| 366 | |
| 367 | ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data); |
| 368 | if (ret != 0) |
| 369 | goto done; |
| 370 | |
| 371 | /* Request CE to send from Target(!) address to Host buffer */ |
| 372 | /* |
| 373 | * The address supplied by the caller is in the |
| 374 | * Target CPU virtual address space. |
| 375 | * |
| 376 | * In order to use this address with the diagnostic CE, |
| 377 | * convert it from Target CPU virtual address space |
| 378 | * to CE address space |
| 379 | */ |
| 380 | ath10k_pci_wake(ar); |
| 381 | address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, |
| 382 | address); |
| 383 | ath10k_pci_sleep(ar); |
| 384 | |
| 385 | ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, |
| 386 | 0); |
| 387 | if (ret) |
| 388 | goto done; |
| 389 | |
| 390 | i = 0; |
| 391 | while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, |
| 392 | &completed_nbytes, |
| 393 | &id) != 0) { |
| 394 | mdelay(1); |
| 395 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { |
| 396 | ret = -EBUSY; |
| 397 | goto done; |
| 398 | } |
| 399 | } |
| 400 | |
| 401 | if (nbytes != completed_nbytes) { |
| 402 | ret = -EIO; |
| 403 | goto done; |
| 404 | } |
| 405 | |
| 406 | if (buf != (u32) address) { |
| 407 | ret = -EIO; |
| 408 | goto done; |
| 409 | } |
| 410 | |
| 411 | i = 0; |
| 412 | while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, |
| 413 | &completed_nbytes, |
| 414 | &id, &flags) != 0) { |
| 415 | mdelay(1); |
| 416 | |
| 417 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { |
| 418 | ret = -EBUSY; |
| 419 | goto done; |
| 420 | } |
| 421 | } |
| 422 | |
| 423 | if (nbytes != completed_nbytes) { |
| 424 | ret = -EIO; |
| 425 | goto done; |
| 426 | } |
| 427 | |
| 428 | if (buf != ce_data) { |
| 429 | ret = -EIO; |
| 430 | goto done; |
| 431 | } |
| 432 | |
| 433 | remaining_bytes -= nbytes; |
| 434 | address += nbytes; |
| 435 | ce_data += nbytes; |
| 436 | } |
| 437 | |
| 438 | done: |
| 439 | if (ret == 0) { |
| 440 | /* Copy data from allocated DMA buf to caller's buf */ |
| 441 | WARN_ON_ONCE(orig_nbytes & 3); |
| 442 | for (i = 0; i < orig_nbytes / sizeof(__le32); i++) { |
| 443 | ((u32 *)data)[i] = |
| 444 | __le32_to_cpu(((__le32 *)data_buf)[i]); |
| 445 | } |
| 446 | } else |
| 447 | ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", |
| 448 | __func__, address); |
| 449 | |
| 450 | if (data_buf) |
| 451 | pci_free_consistent(ar_pci->pdev, orig_nbytes, |
| 452 | data_buf, ce_data_base); |
| 453 | |
| 454 | return ret; |
| 455 | } |
| 456 | |
| 457 | /* Read 4-byte aligned data from Target memory or register */ |
| 458 | static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, |
| 459 | u32 *data) |
| 460 | { |
| 461 | /* Assume range doesn't cross this boundary */ |
| 462 | if (address >= DRAM_BASE_ADDRESS) |
| 463 | return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32)); |
| 464 | |
| 465 | ath10k_pci_wake(ar); |
| 466 | *data = ath10k_pci_read32(ar, address); |
| 467 | ath10k_pci_sleep(ar); |
| 468 | return 0; |
| 469 | } |
| 470 | |
| 471 | static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, |
| 472 | const void *data, int nbytes) |
| 473 | { |
| 474 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 475 | int ret = 0; |
| 476 | u32 buf; |
| 477 | unsigned int completed_nbytes, orig_nbytes, remaining_bytes; |
| 478 | unsigned int id; |
| 479 | unsigned int flags; |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 480 | struct ath10k_ce_pipe *ce_diag; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 481 | void *data_buf = NULL; |
| 482 | u32 ce_data; /* Host buffer address in CE space */ |
| 483 | dma_addr_t ce_data_base = 0; |
| 484 | int i; |
| 485 | |
| 486 | ce_diag = ar_pci->ce_diag; |
| 487 | |
| 488 | /* |
| 489 | * Allocate a temporary bounce buffer to hold caller's data |
| 490 | * to be DMA'ed to Target. This guarantees |
| 491 | * 1) 4-byte alignment |
| 492 | * 2) Buffer in DMA-able space |
| 493 | */ |
| 494 | orig_nbytes = nbytes; |
| 495 | data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, |
| 496 | orig_nbytes, |
| 497 | &ce_data_base); |
| 498 | if (!data_buf) { |
| 499 | ret = -ENOMEM; |
| 500 | goto done; |
| 501 | } |
| 502 | |
| 503 | /* Copy caller's data to allocated DMA buf */ |
| 504 | WARN_ON_ONCE(orig_nbytes & 3); |
| 505 | for (i = 0; i < orig_nbytes / sizeof(__le32); i++) |
| 506 | ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]); |
| 507 | |
| 508 | /* |
| 509 | * The address supplied by the caller is in the |
| 510 | * Target CPU virtual address space. |
| 511 | * |
| 512 | * In order to use this address with the diagnostic CE, |
| 513 | * convert it from |
| 514 | * Target CPU virtual address space |
| 515 | * to |
| 516 | * CE address space |
| 517 | */ |
| 518 | ath10k_pci_wake(ar); |
| 519 | address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); |
| 520 | ath10k_pci_sleep(ar); |
| 521 | |
| 522 | remaining_bytes = orig_nbytes; |
| 523 | ce_data = ce_data_base; |
| 524 | while (remaining_bytes) { |
| 525 | /* FIXME: check cast */ |
| 526 | nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); |
| 527 | |
| 528 | /* Set up to receive directly into Target(!) address */ |
| 529 | ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address); |
| 530 | if (ret != 0) |
| 531 | goto done; |
| 532 | |
| 533 | /* |
| 534 | * Request CE to send caller-supplied data that |
| 535 | * was copied to bounce buffer to Target(!) address. |
| 536 | */ |
| 537 | ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data, |
| 538 | nbytes, 0, 0); |
| 539 | if (ret != 0) |
| 540 | goto done; |
| 541 | |
| 542 | i = 0; |
| 543 | while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, |
| 544 | &completed_nbytes, |
| 545 | &id) != 0) { |
| 546 | mdelay(1); |
| 547 | |
| 548 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { |
| 549 | ret = -EBUSY; |
| 550 | goto done; |
| 551 | } |
| 552 | } |
| 553 | |
| 554 | if (nbytes != completed_nbytes) { |
| 555 | ret = -EIO; |
| 556 | goto done; |
| 557 | } |
| 558 | |
| 559 | if (buf != ce_data) { |
| 560 | ret = -EIO; |
| 561 | goto done; |
| 562 | } |
| 563 | |
| 564 | i = 0; |
| 565 | while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, |
| 566 | &completed_nbytes, |
| 567 | &id, &flags) != 0) { |
| 568 | mdelay(1); |
| 569 | |
| 570 | if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { |
| 571 | ret = -EBUSY; |
| 572 | goto done; |
| 573 | } |
| 574 | } |
| 575 | |
| 576 | if (nbytes != completed_nbytes) { |
| 577 | ret = -EIO; |
| 578 | goto done; |
| 579 | } |
| 580 | |
| 581 | if (buf != address) { |
| 582 | ret = -EIO; |
| 583 | goto done; |
| 584 | } |
| 585 | |
| 586 | remaining_bytes -= nbytes; |
| 587 | address += nbytes; |
| 588 | ce_data += nbytes; |
| 589 | } |
| 590 | |
| 591 | done: |
| 592 | if (data_buf) { |
| 593 | pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf, |
| 594 | ce_data_base); |
| 595 | } |
| 596 | |
| 597 | if (ret != 0) |
| 598 | ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__, |
| 599 | address); |
| 600 | |
| 601 | return ret; |
| 602 | } |
| 603 | |
| 604 | /* Write 4B data to Target memory or register */ |
| 605 | static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address, |
| 606 | u32 data) |
| 607 | { |
| 608 | /* Assume range doesn't cross this boundary */ |
| 609 | if (address >= DRAM_BASE_ADDRESS) |
| 610 | return ath10k_pci_diag_write_mem(ar, address, &data, |
| 611 | sizeof(u32)); |
| 612 | |
| 613 | ath10k_pci_wake(ar); |
| 614 | ath10k_pci_write32(ar, address, data); |
| 615 | ath10k_pci_sleep(ar); |
| 616 | return 0; |
| 617 | } |
| 618 | |
| 619 | static bool ath10k_pci_target_is_awake(struct ath10k *ar) |
| 620 | { |
| 621 | void __iomem *mem = ath10k_pci_priv(ar)->mem; |
| 622 | u32 val; |
| 623 | val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + |
| 624 | RTC_STATE_ADDRESS); |
| 625 | return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON); |
| 626 | } |
| 627 | |
Kalle Valo | 3aebe54 | 2013-09-01 10:02:07 +0300 | [diff] [blame] | 628 | int ath10k_do_pci_wake(struct ath10k *ar) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 629 | { |
| 630 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 631 | void __iomem *pci_addr = ar_pci->mem; |
| 632 | int tot_delay = 0; |
| 633 | int curr_delay = 5; |
| 634 | |
| 635 | if (atomic_read(&ar_pci->keep_awake_count) == 0) { |
| 636 | /* Force AWAKE */ |
| 637 | iowrite32(PCIE_SOC_WAKE_V_MASK, |
| 638 | pci_addr + PCIE_LOCAL_BASE_ADDRESS + |
| 639 | PCIE_SOC_WAKE_ADDRESS); |
| 640 | } |
| 641 | atomic_inc(&ar_pci->keep_awake_count); |
| 642 | |
| 643 | if (ar_pci->verified_awake) |
Kalle Valo | 3aebe54 | 2013-09-01 10:02:07 +0300 | [diff] [blame] | 644 | return 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 645 | |
| 646 | for (;;) { |
| 647 | if (ath10k_pci_target_is_awake(ar)) { |
| 648 | ar_pci->verified_awake = true; |
Kalle Valo | 3aebe54 | 2013-09-01 10:02:07 +0300 | [diff] [blame] | 649 | return 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 650 | } |
| 651 | |
| 652 | if (tot_delay > PCIE_WAKE_TIMEOUT) { |
Kalle Valo | 3aebe54 | 2013-09-01 10:02:07 +0300 | [diff] [blame] | 653 | ath10k_warn("target took longer %d us to wake up (awake count %d)\n", |
| 654 | PCIE_WAKE_TIMEOUT, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 655 | atomic_read(&ar_pci->keep_awake_count)); |
Kalle Valo | 3aebe54 | 2013-09-01 10:02:07 +0300 | [diff] [blame] | 656 | return -ETIMEDOUT; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 657 | } |
| 658 | |
| 659 | udelay(curr_delay); |
| 660 | tot_delay += curr_delay; |
| 661 | |
| 662 | if (curr_delay < 50) |
| 663 | curr_delay += 5; |
| 664 | } |
| 665 | } |
| 666 | |
| 667 | void ath10k_do_pci_sleep(struct ath10k *ar) |
| 668 | { |
| 669 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 670 | void __iomem *pci_addr = ar_pci->mem; |
| 671 | |
| 672 | if (atomic_dec_and_test(&ar_pci->keep_awake_count)) { |
| 673 | /* Allow sleep */ |
| 674 | ar_pci->verified_awake = false; |
| 675 | iowrite32(PCIE_SOC_WAKE_RESET, |
| 676 | pci_addr + PCIE_LOCAL_BASE_ADDRESS + |
| 677 | PCIE_SOC_WAKE_ADDRESS); |
| 678 | } |
| 679 | } |
| 680 | |
| 681 | /* |
| 682 | * FIXME: Handle OOM properly. |
| 683 | */ |
| 684 | static inline |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 685 | struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 686 | { |
| 687 | struct ath10k_pci_compl *compl = NULL; |
| 688 | |
| 689 | spin_lock_bh(&pipe_info->pipe_lock); |
| 690 | if (list_empty(&pipe_info->compl_free)) { |
| 691 | ath10k_warn("Completion buffers are full\n"); |
| 692 | goto exit; |
| 693 | } |
| 694 | compl = list_first_entry(&pipe_info->compl_free, |
| 695 | struct ath10k_pci_compl, list); |
| 696 | list_del(&compl->list); |
| 697 | exit: |
| 698 | spin_unlock_bh(&pipe_info->pipe_lock); |
| 699 | return compl; |
| 700 | } |
| 701 | |
| 702 | /* Called by lower (CE) layer when a send to Target completes. */ |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 703 | static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 704 | { |
| 705 | struct ath10k *ar = ce_state->ar; |
| 706 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 707 | struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 708 | struct ath10k_pci_compl *compl; |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 709 | void *transfer_context; |
| 710 | u32 ce_data; |
| 711 | unsigned int nbytes; |
| 712 | unsigned int transfer_id; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 713 | |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 714 | while (ath10k_ce_completed_send_next(ce_state, &transfer_context, |
| 715 | &ce_data, &nbytes, |
| 716 | &transfer_id) == 0) { |
Michal Kazior | 726346f | 2014-02-27 18:50:04 +0200 | [diff] [blame^] | 717 | if (transfer_context == NULL) |
| 718 | continue; |
| 719 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 720 | compl = get_free_compl(pipe_info); |
| 721 | if (!compl) |
| 722 | break; |
| 723 | |
Michal Kazior | f9d8fec | 2013-08-13 07:54:56 +0200 | [diff] [blame] | 724 | compl->state = ATH10K_PCI_COMPL_SEND; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 725 | compl->ce_state = ce_state; |
| 726 | compl->pipe_info = pipe_info; |
Kalle Valo | aa5c1db4 | 2013-09-01 10:01:46 +0300 | [diff] [blame] | 727 | compl->skb = transfer_context; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 728 | compl->nbytes = nbytes; |
| 729 | compl->transfer_id = transfer_id; |
| 730 | compl->flags = 0; |
| 731 | |
| 732 | /* |
| 733 | * Add the completion to the processing queue. |
| 734 | */ |
| 735 | spin_lock_bh(&ar_pci->compl_lock); |
| 736 | list_add_tail(&compl->list, &ar_pci->compl_process); |
| 737 | spin_unlock_bh(&ar_pci->compl_lock); |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 738 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 739 | |
| 740 | ath10k_pci_process_ce(ar); |
| 741 | } |
| 742 | |
| 743 | /* Called by lower (CE) layer when data is received from the Target. */ |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 744 | static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 745 | { |
| 746 | struct ath10k *ar = ce_state->ar; |
| 747 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 748 | struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 749 | struct ath10k_pci_compl *compl; |
| 750 | struct sk_buff *skb; |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 751 | void *transfer_context; |
| 752 | u32 ce_data; |
| 753 | unsigned int nbytes; |
| 754 | unsigned int transfer_id; |
| 755 | unsigned int flags; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 756 | |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 757 | while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, |
| 758 | &ce_data, &nbytes, &transfer_id, |
| 759 | &flags) == 0) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 760 | compl = get_free_compl(pipe_info); |
| 761 | if (!compl) |
| 762 | break; |
| 763 | |
Michal Kazior | f9d8fec | 2013-08-13 07:54:56 +0200 | [diff] [blame] | 764 | compl->state = ATH10K_PCI_COMPL_RECV; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 765 | compl->ce_state = ce_state; |
| 766 | compl->pipe_info = pipe_info; |
Kalle Valo | aa5c1db4 | 2013-09-01 10:01:46 +0300 | [diff] [blame] | 767 | compl->skb = transfer_context; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 768 | compl->nbytes = nbytes; |
| 769 | compl->transfer_id = transfer_id; |
| 770 | compl->flags = flags; |
| 771 | |
| 772 | skb = transfer_context; |
| 773 | dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, |
| 774 | skb->len + skb_tailroom(skb), |
| 775 | DMA_FROM_DEVICE); |
| 776 | /* |
| 777 | * Add the completion to the processing queue. |
| 778 | */ |
| 779 | spin_lock_bh(&ar_pci->compl_lock); |
| 780 | list_add_tail(&compl->list, &ar_pci->compl_process); |
| 781 | spin_unlock_bh(&ar_pci->compl_lock); |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 782 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 783 | |
| 784 | ath10k_pci_process_ce(ar); |
| 785 | } |
| 786 | |
Michal Kazior | 726346f | 2014-02-27 18:50:04 +0200 | [diff] [blame^] | 787 | static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, |
| 788 | struct ath10k_hif_sg_item *items, int n_items) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 789 | { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 790 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | 726346f | 2014-02-27 18:50:04 +0200 | [diff] [blame^] | 791 | struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; |
| 792 | struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; |
| 793 | struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; |
| 794 | unsigned int nentries_mask = src_ring->nentries_mask; |
| 795 | unsigned int sw_index = src_ring->sw_index; |
| 796 | unsigned int write_index = src_ring->write_index; |
| 797 | int err, i; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 798 | |
Michal Kazior | 726346f | 2014-02-27 18:50:04 +0200 | [diff] [blame^] | 799 | spin_lock_bh(&ar_pci->ce_lock); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 800 | |
Michal Kazior | 726346f | 2014-02-27 18:50:04 +0200 | [diff] [blame^] | 801 | if (unlikely(CE_RING_DELTA(nentries_mask, |
| 802 | write_index, sw_index - 1) < n_items)) { |
| 803 | err = -ENOBUFS; |
| 804 | goto unlock; |
| 805 | } |
| 806 | |
| 807 | for (i = 0; i < n_items - 1; i++) { |
| 808 | ath10k_dbg(ATH10K_DBG_PCI, |
| 809 | "pci tx item %d paddr 0x%08x len %d n_items %d\n", |
| 810 | i, items[i].paddr, items[i].len, n_items); |
| 811 | ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ", |
| 812 | items[i].vaddr, items[i].len); |
| 813 | |
| 814 | err = ath10k_ce_send_nolock(ce_pipe, |
| 815 | items[i].transfer_context, |
| 816 | items[i].paddr, |
| 817 | items[i].len, |
| 818 | items[i].transfer_id, |
| 819 | CE_SEND_FLAG_GATHER); |
| 820 | if (err) |
| 821 | goto unlock; |
| 822 | } |
| 823 | |
| 824 | /* `i` is equal to `n_items -1` after for() */ |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 825 | |
| 826 | ath10k_dbg(ATH10K_DBG_PCI, |
Michal Kazior | 726346f | 2014-02-27 18:50:04 +0200 | [diff] [blame^] | 827 | "pci tx item %d paddr 0x%08x len %d n_items %d\n", |
| 828 | i, items[i].paddr, items[i].len, n_items); |
| 829 | ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ", |
| 830 | items[i].vaddr, items[i].len); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 831 | |
Michal Kazior | 726346f | 2014-02-27 18:50:04 +0200 | [diff] [blame^] | 832 | err = ath10k_ce_send_nolock(ce_pipe, |
| 833 | items[i].transfer_context, |
| 834 | items[i].paddr, |
| 835 | items[i].len, |
| 836 | items[i].transfer_id, |
| 837 | 0); |
| 838 | if (err) |
| 839 | goto unlock; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 840 | |
Michal Kazior | 726346f | 2014-02-27 18:50:04 +0200 | [diff] [blame^] | 841 | err = 0; |
| 842 | unlock: |
| 843 | spin_unlock_bh(&ar_pci->ce_lock); |
| 844 | return err; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 845 | } |
| 846 | |
| 847 | static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) |
| 848 | { |
| 849 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | 3efcb3b | 2013-10-02 11:03:41 +0200 | [diff] [blame] | 850 | return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 851 | } |
| 852 | |
| 853 | static void ath10k_pci_hif_dump_area(struct ath10k *ar) |
| 854 | { |
| 855 | u32 reg_dump_area = 0; |
| 856 | u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; |
| 857 | u32 host_addr; |
| 858 | int ret; |
| 859 | u32 i; |
| 860 | |
| 861 | ath10k_err("firmware crashed!\n"); |
| 862 | ath10k_err("hardware name %s version 0x%x\n", |
| 863 | ar->hw_params.name, ar->target_version); |
Chun-Yeow Yeoh | 5ba88b3 | 2014-01-21 17:21:21 +0800 | [diff] [blame] | 864 | ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 865 | |
| 866 | host_addr = host_interest_item_address(HI_ITEM(hi_failure_state)); |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 867 | ret = ath10k_pci_diag_read_mem(ar, host_addr, |
| 868 | ®_dump_area, sizeof(u32)); |
| 869 | if (ret) { |
| 870 | ath10k_err("failed to read FW dump area address: %d\n", ret); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 871 | return; |
| 872 | } |
| 873 | |
| 874 | ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area); |
| 875 | |
| 876 | ret = ath10k_pci_diag_read_mem(ar, reg_dump_area, |
| 877 | ®_dump_values[0], |
| 878 | REG_DUMP_COUNT_QCA988X * sizeof(u32)); |
| 879 | if (ret != 0) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 880 | ath10k_err("failed to read FW dump area: %d\n", ret); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 881 | return; |
| 882 | } |
| 883 | |
| 884 | BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); |
| 885 | |
| 886 | ath10k_err("target Register Dump\n"); |
| 887 | for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) |
| 888 | ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", |
| 889 | i, |
| 890 | reg_dump_values[i], |
| 891 | reg_dump_values[i + 1], |
| 892 | reg_dump_values[i + 2], |
| 893 | reg_dump_values[i + 3]); |
Michal Kazior | affd321 | 2013-07-16 09:54:35 +0200 | [diff] [blame] | 894 | |
Michal Kazior | 5e90de8 | 2013-10-16 16:46:05 +0300 | [diff] [blame] | 895 | queue_work(ar->workqueue, &ar->restart_work); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 896 | } |
| 897 | |
| 898 | static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, |
| 899 | int force) |
| 900 | { |
| 901 | if (!force) { |
| 902 | int resources; |
| 903 | /* |
| 904 | * Decide whether to actually poll for completions, or just |
| 905 | * wait for a later chance. |
| 906 | * If there seem to be plenty of resources left, then just wait |
| 907 | * since checking involves reading a CE register, which is a |
| 908 | * relatively expensive operation. |
| 909 | */ |
| 910 | resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); |
| 911 | |
| 912 | /* |
| 913 | * If at least 50% of the total resources are still available, |
| 914 | * don't bother checking again yet. |
| 915 | */ |
| 916 | if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) |
| 917 | return; |
| 918 | } |
| 919 | ath10k_ce_per_engine_service(ar, pipe); |
| 920 | } |
| 921 | |
Michal Kazior | e799bbf | 2013-07-05 16:15:12 +0300 | [diff] [blame] | 922 | static void ath10k_pci_hif_set_callbacks(struct ath10k *ar, |
| 923 | struct ath10k_hif_cb *callbacks) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 924 | { |
| 925 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 926 | |
| 927 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); |
| 928 | |
| 929 | memcpy(&ar_pci->msg_callbacks_current, callbacks, |
| 930 | sizeof(ar_pci->msg_callbacks_current)); |
| 931 | } |
| 932 | |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 933 | static int ath10k_pci_alloc_compl(struct ath10k *ar) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 934 | { |
| 935 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 936 | const struct ce_attr *attr; |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 937 | struct ath10k_pci_pipe *pipe_info; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 938 | struct ath10k_pci_compl *compl; |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 939 | int i, pipe_num, completions; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 940 | |
| 941 | spin_lock_init(&ar_pci->compl_lock); |
| 942 | INIT_LIST_HEAD(&ar_pci->compl_process); |
| 943 | |
Michal Kazior | fad6ed7 | 2013-11-08 08:01:23 +0100 | [diff] [blame] | 944 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 945 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
| 946 | |
| 947 | spin_lock_init(&pipe_info->pipe_lock); |
| 948 | INIT_LIST_HEAD(&pipe_info->compl_free); |
| 949 | |
| 950 | /* Handle Diagnostic CE specially */ |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 951 | if (pipe_info->ce_hdl == ar_pci->ce_diag) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 952 | continue; |
| 953 | |
| 954 | attr = &host_ce_config_wlan[pipe_num]; |
| 955 | completions = 0; |
| 956 | |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 957 | if (attr->src_nentries) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 958 | completions += attr->src_nentries; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 959 | |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 960 | if (attr->dest_nentries) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 961 | completions += attr->dest_nentries; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 962 | |
| 963 | for (i = 0; i < completions; i++) { |
Michal Kazior | ffe5daa | 2013-08-13 07:54:55 +0200 | [diff] [blame] | 964 | compl = kmalloc(sizeof(*compl), GFP_KERNEL); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 965 | if (!compl) { |
| 966 | ath10k_warn("No memory for completion state\n"); |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 967 | ath10k_pci_cleanup_ce(ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 968 | return -ENOMEM; |
| 969 | } |
| 970 | |
Michal Kazior | f9d8fec | 2013-08-13 07:54:56 +0200 | [diff] [blame] | 971 | compl->state = ATH10K_PCI_COMPL_FREE; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 972 | list_add_tail(&compl->list, &pipe_info->compl_free); |
| 973 | } |
| 974 | } |
| 975 | |
| 976 | return 0; |
| 977 | } |
| 978 | |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 979 | static int ath10k_pci_setup_ce_irq(struct ath10k *ar) |
| 980 | { |
| 981 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 982 | const struct ce_attr *attr; |
| 983 | struct ath10k_pci_pipe *pipe_info; |
| 984 | int pipe_num, disable_interrupts; |
| 985 | |
| 986 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
| 987 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
| 988 | |
| 989 | /* Handle Diagnostic CE specially */ |
| 990 | if (pipe_info->ce_hdl == ar_pci->ce_diag) |
| 991 | continue; |
| 992 | |
| 993 | attr = &host_ce_config_wlan[pipe_num]; |
| 994 | |
| 995 | if (attr->src_nentries) { |
| 996 | disable_interrupts = attr->flags & CE_ATTR_DIS_INTR; |
| 997 | ath10k_ce_send_cb_register(pipe_info->ce_hdl, |
| 998 | ath10k_pci_ce_send_done, |
| 999 | disable_interrupts); |
| 1000 | } |
| 1001 | |
| 1002 | if (attr->dest_nentries) |
| 1003 | ath10k_ce_recv_cb_register(pipe_info->ce_hdl, |
| 1004 | ath10k_pci_ce_recv_data); |
| 1005 | } |
| 1006 | |
| 1007 | return 0; |
| 1008 | } |
| 1009 | |
Michal Kazior | 96a9d0d | 2013-11-08 08:01:25 +0100 | [diff] [blame] | 1010 | static void ath10k_pci_kill_tasklet(struct ath10k *ar) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1011 | { |
| 1012 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1013 | int i; |
| 1014 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1015 | tasklet_kill(&ar_pci->intr_tq); |
Michal Kazior | 103d4f5 | 2013-11-08 08:01:24 +0100 | [diff] [blame] | 1016 | tasklet_kill(&ar_pci->msi_fw_err); |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 1017 | tasklet_kill(&ar_pci->early_irq_tasklet); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1018 | |
| 1019 | for (i = 0; i < CE_COUNT; i++) |
| 1020 | tasklet_kill(&ar_pci->pipe_info[i].intr); |
Michal Kazior | 96a9d0d | 2013-11-08 08:01:25 +0100 | [diff] [blame] | 1021 | } |
| 1022 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1023 | static void ath10k_pci_cleanup_ce(struct ath10k *ar) |
| 1024 | { |
| 1025 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 1026 | struct ath10k_pci_compl *compl, *tmp; |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 1027 | struct ath10k_pci_pipe *pipe_info; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1028 | struct sk_buff *netbuf; |
| 1029 | int pipe_num; |
| 1030 | |
| 1031 | /* Free pending completions. */ |
| 1032 | spin_lock_bh(&ar_pci->compl_lock); |
| 1033 | if (!list_empty(&ar_pci->compl_process)) |
| 1034 | ath10k_warn("pending completions still present! possible memory leaks.\n"); |
| 1035 | |
| 1036 | list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) { |
| 1037 | list_del(&compl->list); |
Kalle Valo | aa5c1db4 | 2013-09-01 10:01:46 +0300 | [diff] [blame] | 1038 | netbuf = compl->skb; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1039 | dev_kfree_skb_any(netbuf); |
| 1040 | kfree(compl); |
| 1041 | } |
| 1042 | spin_unlock_bh(&ar_pci->compl_lock); |
| 1043 | |
| 1044 | /* Free unused completions for each pipe. */ |
Michal Kazior | fad6ed7 | 2013-11-08 08:01:23 +0100 | [diff] [blame] | 1045 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1046 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
| 1047 | |
| 1048 | spin_lock_bh(&pipe_info->pipe_lock); |
| 1049 | list_for_each_entry_safe(compl, tmp, |
| 1050 | &pipe_info->compl_free, list) { |
| 1051 | list_del(&compl->list); |
| 1052 | kfree(compl); |
| 1053 | } |
| 1054 | spin_unlock_bh(&pipe_info->pipe_lock); |
| 1055 | } |
| 1056 | } |
| 1057 | |
| 1058 | static void ath10k_pci_process_ce(struct ath10k *ar) |
| 1059 | { |
| 1060 | struct ath10k_pci *ar_pci = ar->hif.priv; |
| 1061 | struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; |
| 1062 | struct ath10k_pci_compl *compl; |
| 1063 | struct sk_buff *skb; |
| 1064 | unsigned int nbytes; |
| 1065 | int ret, send_done = 0; |
| 1066 | |
| 1067 | /* Upper layers aren't ready to handle tx/rx completions in parallel so |
| 1068 | * we must serialize all completion processing. */ |
| 1069 | |
| 1070 | spin_lock_bh(&ar_pci->compl_lock); |
| 1071 | if (ar_pci->compl_processing) { |
| 1072 | spin_unlock_bh(&ar_pci->compl_lock); |
| 1073 | return; |
| 1074 | } |
| 1075 | ar_pci->compl_processing = true; |
| 1076 | spin_unlock_bh(&ar_pci->compl_lock); |
| 1077 | |
| 1078 | for (;;) { |
| 1079 | spin_lock_bh(&ar_pci->compl_lock); |
| 1080 | if (list_empty(&ar_pci->compl_process)) { |
| 1081 | spin_unlock_bh(&ar_pci->compl_lock); |
| 1082 | break; |
| 1083 | } |
| 1084 | compl = list_first_entry(&ar_pci->compl_process, |
| 1085 | struct ath10k_pci_compl, list); |
| 1086 | list_del(&compl->list); |
| 1087 | spin_unlock_bh(&ar_pci->compl_lock); |
| 1088 | |
Michal Kazior | f9d8fec | 2013-08-13 07:54:56 +0200 | [diff] [blame] | 1089 | switch (compl->state) { |
| 1090 | case ATH10K_PCI_COMPL_SEND: |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1091 | cb->tx_completion(ar, |
Kalle Valo | aa5c1db4 | 2013-09-01 10:01:46 +0300 | [diff] [blame] | 1092 | compl->skb, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1093 | compl->transfer_id); |
| 1094 | send_done = 1; |
Michal Kazior | f9d8fec | 2013-08-13 07:54:56 +0200 | [diff] [blame] | 1095 | break; |
| 1096 | case ATH10K_PCI_COMPL_RECV: |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1097 | ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1); |
| 1098 | if (ret) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 1099 | ath10k_warn("failed to post RX buffer for pipe %d: %d\n", |
| 1100 | compl->pipe_info->pipe_num, ret); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1101 | break; |
| 1102 | } |
| 1103 | |
Kalle Valo | aa5c1db4 | 2013-09-01 10:01:46 +0300 | [diff] [blame] | 1104 | skb = compl->skb; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1105 | nbytes = compl->nbytes; |
| 1106 | |
| 1107 | ath10k_dbg(ATH10K_DBG_PCI, |
| 1108 | "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n", |
| 1109 | skb, nbytes); |
| 1110 | ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, |
| 1111 | "ath10k rx: ", skb->data, nbytes); |
| 1112 | |
| 1113 | if (skb->len + skb_tailroom(skb) >= nbytes) { |
| 1114 | skb_trim(skb, 0); |
| 1115 | skb_put(skb, nbytes); |
| 1116 | cb->rx_completion(ar, skb, |
| 1117 | compl->pipe_info->pipe_num); |
| 1118 | } else { |
| 1119 | ath10k_warn("rxed more than expected (nbytes %d, max %d)", |
| 1120 | nbytes, |
| 1121 | skb->len + skb_tailroom(skb)); |
| 1122 | } |
Michal Kazior | f9d8fec | 2013-08-13 07:54:56 +0200 | [diff] [blame] | 1123 | break; |
| 1124 | case ATH10K_PCI_COMPL_FREE: |
| 1125 | ath10k_warn("free completion cannot be processed\n"); |
| 1126 | break; |
| 1127 | default: |
| 1128 | ath10k_warn("invalid completion state (%d)\n", |
| 1129 | compl->state); |
| 1130 | break; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1131 | } |
| 1132 | |
Michal Kazior | f9d8fec | 2013-08-13 07:54:56 +0200 | [diff] [blame] | 1133 | compl->state = ATH10K_PCI_COMPL_FREE; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1134 | |
| 1135 | /* |
| 1136 | * Add completion back to the pipe's free list. |
| 1137 | */ |
| 1138 | spin_lock_bh(&compl->pipe_info->pipe_lock); |
| 1139 | list_add_tail(&compl->list, &compl->pipe_info->compl_free); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1140 | spin_unlock_bh(&compl->pipe_info->pipe_lock); |
| 1141 | } |
| 1142 | |
| 1143 | spin_lock_bh(&ar_pci->compl_lock); |
| 1144 | ar_pci->compl_processing = false; |
| 1145 | spin_unlock_bh(&ar_pci->compl_lock); |
| 1146 | } |
| 1147 | |
| 1148 | /* TODO - temporary mapping while we have too few CE's */ |
| 1149 | static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, |
| 1150 | u16 service_id, u8 *ul_pipe, |
| 1151 | u8 *dl_pipe, int *ul_is_polled, |
| 1152 | int *dl_is_polled) |
| 1153 | { |
| 1154 | int ret = 0; |
| 1155 | |
| 1156 | /* polling for received messages not supported */ |
| 1157 | *dl_is_polled = 0; |
| 1158 | |
| 1159 | switch (service_id) { |
| 1160 | case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: |
| 1161 | /* |
| 1162 | * Host->target HTT gets its own pipe, so it can be polled |
| 1163 | * while other pipes are interrupt driven. |
| 1164 | */ |
| 1165 | *ul_pipe = 4; |
| 1166 | /* |
| 1167 | * Use the same target->host pipe for HTC ctrl, HTC raw |
| 1168 | * streams, and HTT. |
| 1169 | */ |
| 1170 | *dl_pipe = 1; |
| 1171 | break; |
| 1172 | |
| 1173 | case ATH10K_HTC_SVC_ID_RSVD_CTRL: |
| 1174 | case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: |
| 1175 | /* |
| 1176 | * Note: HTC_RAW_STREAMS_SVC is currently unused, and |
| 1177 | * HTC_CTRL_RSVD_SVC could share the same pipe as the |
| 1178 | * WMI services. So, if another CE is needed, change |
| 1179 | * this to *ul_pipe = 3, which frees up CE 0. |
| 1180 | */ |
| 1181 | /* *ul_pipe = 3; */ |
| 1182 | *ul_pipe = 0; |
| 1183 | *dl_pipe = 1; |
| 1184 | break; |
| 1185 | |
| 1186 | case ATH10K_HTC_SVC_ID_WMI_DATA_BK: |
| 1187 | case ATH10K_HTC_SVC_ID_WMI_DATA_BE: |
| 1188 | case ATH10K_HTC_SVC_ID_WMI_DATA_VI: |
| 1189 | case ATH10K_HTC_SVC_ID_WMI_DATA_VO: |
| 1190 | |
| 1191 | case ATH10K_HTC_SVC_ID_WMI_CONTROL: |
| 1192 | *ul_pipe = 3; |
| 1193 | *dl_pipe = 2; |
| 1194 | break; |
| 1195 | |
| 1196 | /* pipe 5 unused */ |
| 1197 | /* pipe 6 reserved */ |
| 1198 | /* pipe 7 reserved */ |
| 1199 | |
| 1200 | default: |
| 1201 | ret = -1; |
| 1202 | break; |
| 1203 | } |
| 1204 | *ul_is_polled = |
| 1205 | (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0; |
| 1206 | |
| 1207 | return ret; |
| 1208 | } |
| 1209 | |
| 1210 | static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, |
| 1211 | u8 *ul_pipe, u8 *dl_pipe) |
| 1212 | { |
| 1213 | int ul_is_polled, dl_is_polled; |
| 1214 | |
| 1215 | (void)ath10k_pci_hif_map_service_to_pipe(ar, |
| 1216 | ATH10K_HTC_SVC_ID_RSVD_CTRL, |
| 1217 | ul_pipe, |
| 1218 | dl_pipe, |
| 1219 | &ul_is_polled, |
| 1220 | &dl_is_polled); |
| 1221 | } |
| 1222 | |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 1223 | static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1224 | int num) |
| 1225 | { |
| 1226 | struct ath10k *ar = pipe_info->hif_ce_state; |
| 1227 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 1228 | struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1229 | struct sk_buff *skb; |
| 1230 | dma_addr_t ce_data; |
| 1231 | int i, ret = 0; |
| 1232 | |
| 1233 | if (pipe_info->buf_sz == 0) |
| 1234 | return 0; |
| 1235 | |
| 1236 | for (i = 0; i < num; i++) { |
| 1237 | skb = dev_alloc_skb(pipe_info->buf_sz); |
| 1238 | if (!skb) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 1239 | ath10k_warn("failed to allocate skbuff for pipe %d\n", |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1240 | num); |
| 1241 | ret = -ENOMEM; |
| 1242 | goto err; |
| 1243 | } |
| 1244 | |
| 1245 | WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); |
| 1246 | |
| 1247 | ce_data = dma_map_single(ar->dev, skb->data, |
| 1248 | skb->len + skb_tailroom(skb), |
| 1249 | DMA_FROM_DEVICE); |
| 1250 | |
| 1251 | if (unlikely(dma_mapping_error(ar->dev, ce_data))) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 1252 | ath10k_warn("failed to DMA map sk_buff\n"); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1253 | dev_kfree_skb_any(skb); |
| 1254 | ret = -EIO; |
| 1255 | goto err; |
| 1256 | } |
| 1257 | |
| 1258 | ATH10K_SKB_CB(skb)->paddr = ce_data; |
| 1259 | |
| 1260 | pci_dma_sync_single_for_device(ar_pci->pdev, ce_data, |
| 1261 | pipe_info->buf_sz, |
| 1262 | PCI_DMA_FROMDEVICE); |
| 1263 | |
| 1264 | ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb, |
| 1265 | ce_data); |
| 1266 | if (ret) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 1267 | ath10k_warn("failed to enqueue to pipe %d: %d\n", |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1268 | num, ret); |
| 1269 | goto err; |
| 1270 | } |
| 1271 | } |
| 1272 | |
| 1273 | return ret; |
| 1274 | |
| 1275 | err: |
| 1276 | ath10k_pci_rx_pipe_cleanup(pipe_info); |
| 1277 | return ret; |
| 1278 | } |
| 1279 | |
| 1280 | static int ath10k_pci_post_rx(struct ath10k *ar) |
| 1281 | { |
| 1282 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 1283 | struct ath10k_pci_pipe *pipe_info; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1284 | const struct ce_attr *attr; |
| 1285 | int pipe_num, ret = 0; |
| 1286 | |
Michal Kazior | fad6ed7 | 2013-11-08 08:01:23 +0100 | [diff] [blame] | 1287 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1288 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
| 1289 | attr = &host_ce_config_wlan[pipe_num]; |
| 1290 | |
| 1291 | if (attr->dest_nentries == 0) |
| 1292 | continue; |
| 1293 | |
| 1294 | ret = ath10k_pci_post_rx_pipe(pipe_info, |
| 1295 | attr->dest_nentries - 1); |
| 1296 | if (ret) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 1297 | ath10k_warn("failed to post RX buffer for pipe %d: %d\n", |
| 1298 | pipe_num, ret); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1299 | |
| 1300 | for (; pipe_num >= 0; pipe_num--) { |
| 1301 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
| 1302 | ath10k_pci_rx_pipe_cleanup(pipe_info); |
| 1303 | } |
| 1304 | return ret; |
| 1305 | } |
| 1306 | } |
| 1307 | |
| 1308 | return 0; |
| 1309 | } |
| 1310 | |
| 1311 | static int ath10k_pci_hif_start(struct ath10k *ar) |
| 1312 | { |
| 1313 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 1314 | int ret, ret_early; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1315 | |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 1316 | ath10k_pci_free_early_irq(ar); |
| 1317 | ath10k_pci_kill_tasklet(ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1318 | |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 1319 | ret = ath10k_pci_alloc_compl(ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1320 | if (ret) { |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 1321 | ath10k_warn("failed to allocate CE completions: %d\n", ret); |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 1322 | goto err_early_irq; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1323 | } |
| 1324 | |
Michal Kazior | 5d1aa94 | 2013-11-25 14:06:24 +0100 | [diff] [blame] | 1325 | ret = ath10k_pci_request_irq(ar); |
| 1326 | if (ret) { |
| 1327 | ath10k_warn("failed to post RX buffers for all pipes: %d\n", |
| 1328 | ret); |
| 1329 | goto err_free_compl; |
| 1330 | } |
| 1331 | |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 1332 | ret = ath10k_pci_setup_ce_irq(ar); |
| 1333 | if (ret) { |
| 1334 | ath10k_warn("failed to setup CE interrupts: %d\n", ret); |
Michal Kazior | 5d1aa94 | 2013-11-25 14:06:24 +0100 | [diff] [blame] | 1335 | goto err_stop; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1336 | } |
| 1337 | |
| 1338 | /* Post buffers once to start things off. */ |
| 1339 | ret = ath10k_pci_post_rx(ar); |
| 1340 | if (ret) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 1341 | ath10k_warn("failed to post RX buffers for all pipes: %d\n", |
| 1342 | ret); |
Michal Kazior | 5d1aa94 | 2013-11-25 14:06:24 +0100 | [diff] [blame] | 1343 | goto err_stop; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1344 | } |
| 1345 | |
| 1346 | ar_pci->started = 1; |
| 1347 | return 0; |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 1348 | |
Michal Kazior | 5d1aa94 | 2013-11-25 14:06:24 +0100 | [diff] [blame] | 1349 | err_stop: |
| 1350 | ath10k_ce_disable_interrupts(ar); |
| 1351 | ath10k_pci_free_irq(ar); |
| 1352 | ath10k_pci_kill_tasklet(ar); |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 1353 | ath10k_pci_process_ce(ar); |
| 1354 | err_free_compl: |
| 1355 | ath10k_pci_cleanup_ce(ar); |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 1356 | err_early_irq: |
| 1357 | /* Though there should be no interrupts (device was reset) |
| 1358 | * power_down() expects the early IRQ to be installed as per the |
| 1359 | * driver lifecycle. */ |
| 1360 | ret_early = ath10k_pci_request_early_irq(ar); |
| 1361 | if (ret_early) |
| 1362 | ath10k_warn("failed to re-enable early irq: %d\n", ret_early); |
| 1363 | |
Michal Kazior | c80de12 | 2013-11-25 14:06:23 +0100 | [diff] [blame] | 1364 | return ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1365 | } |
| 1366 | |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 1367 | static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1368 | { |
| 1369 | struct ath10k *ar; |
| 1370 | struct ath10k_pci *ar_pci; |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 1371 | struct ath10k_ce_pipe *ce_hdl; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1372 | u32 buf_sz; |
| 1373 | struct sk_buff *netbuf; |
| 1374 | u32 ce_data; |
| 1375 | |
| 1376 | buf_sz = pipe_info->buf_sz; |
| 1377 | |
| 1378 | /* Unused Copy Engine */ |
| 1379 | if (buf_sz == 0) |
| 1380 | return; |
| 1381 | |
| 1382 | ar = pipe_info->hif_ce_state; |
| 1383 | ar_pci = ath10k_pci_priv(ar); |
| 1384 | |
| 1385 | if (!ar_pci->started) |
| 1386 | return; |
| 1387 | |
| 1388 | ce_hdl = pipe_info->ce_hdl; |
| 1389 | |
| 1390 | while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf, |
| 1391 | &ce_data) == 0) { |
| 1392 | dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr, |
| 1393 | netbuf->len + skb_tailroom(netbuf), |
| 1394 | DMA_FROM_DEVICE); |
| 1395 | dev_kfree_skb_any(netbuf); |
| 1396 | } |
| 1397 | } |
| 1398 | |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 1399 | static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1400 | { |
| 1401 | struct ath10k *ar; |
| 1402 | struct ath10k_pci *ar_pci; |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 1403 | struct ath10k_ce_pipe *ce_hdl; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1404 | struct sk_buff *netbuf; |
| 1405 | u32 ce_data; |
| 1406 | unsigned int nbytes; |
| 1407 | unsigned int id; |
| 1408 | u32 buf_sz; |
| 1409 | |
| 1410 | buf_sz = pipe_info->buf_sz; |
| 1411 | |
| 1412 | /* Unused Copy Engine */ |
| 1413 | if (buf_sz == 0) |
| 1414 | return; |
| 1415 | |
| 1416 | ar = pipe_info->hif_ce_state; |
| 1417 | ar_pci = ath10k_pci_priv(ar); |
| 1418 | |
| 1419 | if (!ar_pci->started) |
| 1420 | return; |
| 1421 | |
| 1422 | ce_hdl = pipe_info->ce_hdl; |
| 1423 | |
| 1424 | while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, |
| 1425 | &ce_data, &nbytes, &id) == 0) { |
Kalle Valo | e9bb0aa | 2013-09-08 18:36:11 +0300 | [diff] [blame] | 1426 | /* |
| 1427 | * Indicate the completion to higer layer to free |
| 1428 | * the buffer |
| 1429 | */ |
Michal Kazior | 2415fc1 | 2013-11-08 08:01:32 +0100 | [diff] [blame] | 1430 | |
| 1431 | if (!netbuf) { |
| 1432 | ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n", |
| 1433 | ce_hdl->id); |
| 1434 | continue; |
| 1435 | } |
| 1436 | |
Kalle Valo | e9bb0aa | 2013-09-08 18:36:11 +0300 | [diff] [blame] | 1437 | ar_pci->msg_callbacks_current.tx_completion(ar, |
| 1438 | netbuf, |
| 1439 | id); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1440 | } |
| 1441 | } |
| 1442 | |
| 1443 | /* |
| 1444 | * Cleanup residual buffers for device shutdown: |
| 1445 | * buffers that were enqueued for receive |
| 1446 | * buffers that were to be sent |
| 1447 | * Note: Buffers that had completed but which were |
| 1448 | * not yet processed are on a completion queue. They |
| 1449 | * are handled when the completion thread shuts down. |
| 1450 | */ |
| 1451 | static void ath10k_pci_buffer_cleanup(struct ath10k *ar) |
| 1452 | { |
| 1453 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 1454 | int pipe_num; |
| 1455 | |
Michal Kazior | fad6ed7 | 2013-11-08 08:01:23 +0100 | [diff] [blame] | 1456 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 1457 | struct ath10k_pci_pipe *pipe_info; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1458 | |
| 1459 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
| 1460 | ath10k_pci_rx_pipe_cleanup(pipe_info); |
| 1461 | ath10k_pci_tx_pipe_cleanup(pipe_info); |
| 1462 | } |
| 1463 | } |
| 1464 | |
| 1465 | static void ath10k_pci_ce_deinit(struct ath10k *ar) |
| 1466 | { |
| 1467 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 1468 | struct ath10k_pci_pipe *pipe_info; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1469 | int pipe_num; |
| 1470 | |
Michal Kazior | fad6ed7 | 2013-11-08 08:01:23 +0100 | [diff] [blame] | 1471 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1472 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
| 1473 | if (pipe_info->ce_hdl) { |
| 1474 | ath10k_ce_deinit(pipe_info->ce_hdl); |
| 1475 | pipe_info->ce_hdl = NULL; |
| 1476 | pipe_info->buf_sz = 0; |
| 1477 | } |
| 1478 | } |
| 1479 | } |
| 1480 | |
| 1481 | static void ath10k_pci_hif_stop(struct ath10k *ar) |
| 1482 | { |
Michal Kazior | 32270b6 | 2013-08-02 09:15:47 +0200 | [diff] [blame] | 1483 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | 5d1aa94 | 2013-11-25 14:06:24 +0100 | [diff] [blame] | 1484 | int ret; |
Michal Kazior | 32270b6 | 2013-08-02 09:15:47 +0200 | [diff] [blame] | 1485 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1486 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); |
| 1487 | |
Michal Kazior | 5d1aa94 | 2013-11-25 14:06:24 +0100 | [diff] [blame] | 1488 | ret = ath10k_ce_disable_interrupts(ar); |
| 1489 | if (ret) |
| 1490 | ath10k_warn("failed to disable CE interrupts: %d\n", ret); |
Michal Kazior | 32270b6 | 2013-08-02 09:15:47 +0200 | [diff] [blame] | 1491 | |
Michal Kazior | 5d1aa94 | 2013-11-25 14:06:24 +0100 | [diff] [blame] | 1492 | ath10k_pci_free_irq(ar); |
| 1493 | ath10k_pci_kill_tasklet(ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1494 | |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 1495 | ret = ath10k_pci_request_early_irq(ar); |
| 1496 | if (ret) |
| 1497 | ath10k_warn("failed to re-enable early irq: %d\n", ret); |
| 1498 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1499 | /* At this point, asynchronous threads are stopped, the target should |
| 1500 | * not DMA nor interrupt. We process the leftovers and then free |
| 1501 | * everything else up. */ |
| 1502 | |
| 1503 | ath10k_pci_process_ce(ar); |
| 1504 | ath10k_pci_cleanup_ce(ar); |
| 1505 | ath10k_pci_buffer_cleanup(ar); |
Michal Kazior | 32270b6 | 2013-08-02 09:15:47 +0200 | [diff] [blame] | 1506 | |
Michal Kazior | 6a42a47 | 2013-11-08 08:01:35 +0100 | [diff] [blame] | 1507 | /* Make the sure the device won't access any structures on the host by |
| 1508 | * resetting it. The device was fed with PCI CE ringbuffer |
| 1509 | * configuration during init. If ringbuffers are freed and the device |
| 1510 | * were to access them this could lead to memory corruption on the |
| 1511 | * host. */ |
Michal Kazior | fc36e3f | 2014-02-10 17:14:22 +0100 | [diff] [blame] | 1512 | ath10k_pci_warm_reset(ar); |
Michal Kazior | 6a42a47 | 2013-11-08 08:01:35 +0100 | [diff] [blame] | 1513 | |
Michal Kazior | 32270b6 | 2013-08-02 09:15:47 +0200 | [diff] [blame] | 1514 | ar_pci->started = 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1515 | } |
| 1516 | |
| 1517 | static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, |
| 1518 | void *req, u32 req_len, |
| 1519 | void *resp, u32 *resp_len) |
| 1520 | { |
| 1521 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 1522 | struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; |
| 1523 | struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; |
| 1524 | struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; |
| 1525 | struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1526 | dma_addr_t req_paddr = 0; |
| 1527 | dma_addr_t resp_paddr = 0; |
| 1528 | struct bmi_xfer xfer = {}; |
| 1529 | void *treq, *tresp = NULL; |
| 1530 | int ret = 0; |
| 1531 | |
Michal Kazior | 85622cd | 2013-11-25 14:06:22 +0100 | [diff] [blame] | 1532 | might_sleep(); |
| 1533 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1534 | if (resp && !resp_len) |
| 1535 | return -EINVAL; |
| 1536 | |
| 1537 | if (resp && resp_len && *resp_len == 0) |
| 1538 | return -EINVAL; |
| 1539 | |
| 1540 | treq = kmemdup(req, req_len, GFP_KERNEL); |
| 1541 | if (!treq) |
| 1542 | return -ENOMEM; |
| 1543 | |
| 1544 | req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); |
| 1545 | ret = dma_mapping_error(ar->dev, req_paddr); |
| 1546 | if (ret) |
| 1547 | goto err_dma; |
| 1548 | |
| 1549 | if (resp && resp_len) { |
| 1550 | tresp = kzalloc(*resp_len, GFP_KERNEL); |
| 1551 | if (!tresp) { |
| 1552 | ret = -ENOMEM; |
| 1553 | goto err_req; |
| 1554 | } |
| 1555 | |
| 1556 | resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, |
| 1557 | DMA_FROM_DEVICE); |
| 1558 | ret = dma_mapping_error(ar->dev, resp_paddr); |
| 1559 | if (ret) |
| 1560 | goto err_req; |
| 1561 | |
| 1562 | xfer.wait_for_resp = true; |
| 1563 | xfer.resp_len = 0; |
| 1564 | |
| 1565 | ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr); |
| 1566 | } |
| 1567 | |
| 1568 | init_completion(&xfer.done); |
| 1569 | |
| 1570 | ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); |
| 1571 | if (ret) |
| 1572 | goto err_resp; |
| 1573 | |
Michal Kazior | 85622cd | 2013-11-25 14:06:22 +0100 | [diff] [blame] | 1574 | ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer); |
| 1575 | if (ret) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1576 | u32 unused_buffer; |
| 1577 | unsigned int unused_nbytes; |
| 1578 | unsigned int unused_id; |
| 1579 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1580 | ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, |
| 1581 | &unused_nbytes, &unused_id); |
| 1582 | } else { |
| 1583 | /* non-zero means we did not time out */ |
| 1584 | ret = 0; |
| 1585 | } |
| 1586 | |
| 1587 | err_resp: |
| 1588 | if (resp) { |
| 1589 | u32 unused_buffer; |
| 1590 | |
| 1591 | ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); |
| 1592 | dma_unmap_single(ar->dev, resp_paddr, |
| 1593 | *resp_len, DMA_FROM_DEVICE); |
| 1594 | } |
| 1595 | err_req: |
| 1596 | dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); |
| 1597 | |
| 1598 | if (ret == 0 && resp_len) { |
| 1599 | *resp_len = min(*resp_len, xfer.resp_len); |
| 1600 | memcpy(resp, tresp, xfer.resp_len); |
| 1601 | } |
| 1602 | err_dma: |
| 1603 | kfree(treq); |
| 1604 | kfree(tresp); |
| 1605 | |
| 1606 | return ret; |
| 1607 | } |
| 1608 | |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 1609 | static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1610 | { |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 1611 | struct bmi_xfer *xfer; |
| 1612 | u32 ce_data; |
| 1613 | unsigned int nbytes; |
| 1614 | unsigned int transfer_id; |
| 1615 | |
| 1616 | if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data, |
| 1617 | &nbytes, &transfer_id)) |
| 1618 | return; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1619 | |
| 1620 | if (xfer->wait_for_resp) |
| 1621 | return; |
| 1622 | |
| 1623 | complete(&xfer->done); |
| 1624 | } |
| 1625 | |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 1626 | static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1627 | { |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 1628 | struct bmi_xfer *xfer; |
| 1629 | u32 ce_data; |
| 1630 | unsigned int nbytes; |
| 1631 | unsigned int transfer_id; |
| 1632 | unsigned int flags; |
| 1633 | |
| 1634 | if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data, |
| 1635 | &nbytes, &transfer_id, &flags)) |
| 1636 | return; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1637 | |
| 1638 | if (!xfer->wait_for_resp) { |
| 1639 | ath10k_warn("unexpected: BMI data received; ignoring\n"); |
| 1640 | return; |
| 1641 | } |
| 1642 | |
| 1643 | xfer->resp_len = nbytes; |
| 1644 | complete(&xfer->done); |
| 1645 | } |
| 1646 | |
Michal Kazior | 85622cd | 2013-11-25 14:06:22 +0100 | [diff] [blame] | 1647 | static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, |
| 1648 | struct ath10k_ce_pipe *rx_pipe, |
| 1649 | struct bmi_xfer *xfer) |
| 1650 | { |
| 1651 | unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; |
| 1652 | |
| 1653 | while (time_before_eq(jiffies, timeout)) { |
| 1654 | ath10k_pci_bmi_send_done(tx_pipe); |
| 1655 | ath10k_pci_bmi_recv_data(rx_pipe); |
| 1656 | |
| 1657 | if (completion_done(&xfer->done)) |
| 1658 | return 0; |
| 1659 | |
| 1660 | schedule(); |
| 1661 | } |
| 1662 | |
| 1663 | return -ETIMEDOUT; |
| 1664 | } |
| 1665 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1666 | /* |
| 1667 | * Map from service/endpoint to Copy Engine. |
| 1668 | * This table is derived from the CE_PCI TABLE, above. |
| 1669 | * It is passed to the Target at startup for use by firmware. |
| 1670 | */ |
| 1671 | static const struct service_to_pipe target_service_to_ce_map_wlan[] = { |
| 1672 | { |
| 1673 | ATH10K_HTC_SVC_ID_WMI_DATA_VO, |
| 1674 | PIPEDIR_OUT, /* out = UL = host -> target */ |
| 1675 | 3, |
| 1676 | }, |
| 1677 | { |
| 1678 | ATH10K_HTC_SVC_ID_WMI_DATA_VO, |
| 1679 | PIPEDIR_IN, /* in = DL = target -> host */ |
| 1680 | 2, |
| 1681 | }, |
| 1682 | { |
| 1683 | ATH10K_HTC_SVC_ID_WMI_DATA_BK, |
| 1684 | PIPEDIR_OUT, /* out = UL = host -> target */ |
| 1685 | 3, |
| 1686 | }, |
| 1687 | { |
| 1688 | ATH10K_HTC_SVC_ID_WMI_DATA_BK, |
| 1689 | PIPEDIR_IN, /* in = DL = target -> host */ |
| 1690 | 2, |
| 1691 | }, |
| 1692 | { |
| 1693 | ATH10K_HTC_SVC_ID_WMI_DATA_BE, |
| 1694 | PIPEDIR_OUT, /* out = UL = host -> target */ |
| 1695 | 3, |
| 1696 | }, |
| 1697 | { |
| 1698 | ATH10K_HTC_SVC_ID_WMI_DATA_BE, |
| 1699 | PIPEDIR_IN, /* in = DL = target -> host */ |
| 1700 | 2, |
| 1701 | }, |
| 1702 | { |
| 1703 | ATH10K_HTC_SVC_ID_WMI_DATA_VI, |
| 1704 | PIPEDIR_OUT, /* out = UL = host -> target */ |
| 1705 | 3, |
| 1706 | }, |
| 1707 | { |
| 1708 | ATH10K_HTC_SVC_ID_WMI_DATA_VI, |
| 1709 | PIPEDIR_IN, /* in = DL = target -> host */ |
| 1710 | 2, |
| 1711 | }, |
| 1712 | { |
| 1713 | ATH10K_HTC_SVC_ID_WMI_CONTROL, |
| 1714 | PIPEDIR_OUT, /* out = UL = host -> target */ |
| 1715 | 3, |
| 1716 | }, |
| 1717 | { |
| 1718 | ATH10K_HTC_SVC_ID_WMI_CONTROL, |
| 1719 | PIPEDIR_IN, /* in = DL = target -> host */ |
| 1720 | 2, |
| 1721 | }, |
| 1722 | { |
| 1723 | ATH10K_HTC_SVC_ID_RSVD_CTRL, |
| 1724 | PIPEDIR_OUT, /* out = UL = host -> target */ |
| 1725 | 0, /* could be moved to 3 (share with WMI) */ |
| 1726 | }, |
| 1727 | { |
| 1728 | ATH10K_HTC_SVC_ID_RSVD_CTRL, |
| 1729 | PIPEDIR_IN, /* in = DL = target -> host */ |
| 1730 | 1, |
| 1731 | }, |
| 1732 | { |
| 1733 | ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ |
| 1734 | PIPEDIR_OUT, /* out = UL = host -> target */ |
| 1735 | 0, |
| 1736 | }, |
| 1737 | { |
| 1738 | ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ |
| 1739 | PIPEDIR_IN, /* in = DL = target -> host */ |
| 1740 | 1, |
| 1741 | }, |
| 1742 | { |
| 1743 | ATH10K_HTC_SVC_ID_HTT_DATA_MSG, |
| 1744 | PIPEDIR_OUT, /* out = UL = host -> target */ |
| 1745 | 4, |
| 1746 | }, |
| 1747 | { |
| 1748 | ATH10K_HTC_SVC_ID_HTT_DATA_MSG, |
| 1749 | PIPEDIR_IN, /* in = DL = target -> host */ |
| 1750 | 1, |
| 1751 | }, |
| 1752 | |
| 1753 | /* (Additions here) */ |
| 1754 | |
| 1755 | { /* Must be last */ |
| 1756 | 0, |
| 1757 | 0, |
| 1758 | 0, |
| 1759 | }, |
| 1760 | }; |
| 1761 | |
| 1762 | /* |
| 1763 | * Send an interrupt to the device to wake up the Target CPU |
| 1764 | * so it has an opportunity to notice any changed state. |
| 1765 | */ |
| 1766 | static int ath10k_pci_wake_target_cpu(struct ath10k *ar) |
| 1767 | { |
| 1768 | int ret; |
| 1769 | u32 core_ctrl; |
| 1770 | |
| 1771 | ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS | |
| 1772 | CORE_CTRL_ADDRESS, |
| 1773 | &core_ctrl); |
| 1774 | if (ret) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 1775 | ath10k_warn("failed to read core_ctrl: %d\n", ret); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1776 | return ret; |
| 1777 | } |
| 1778 | |
| 1779 | /* A_INUM_FIRMWARE interrupt to Target CPU */ |
| 1780 | core_ctrl |= CORE_CTRL_CPU_INTR_MASK; |
| 1781 | |
| 1782 | ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS | |
| 1783 | CORE_CTRL_ADDRESS, |
| 1784 | core_ctrl); |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 1785 | if (ret) { |
| 1786 | ath10k_warn("failed to set target CPU interrupt mask: %d\n", |
| 1787 | ret); |
| 1788 | return ret; |
| 1789 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1790 | |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 1791 | return 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1792 | } |
| 1793 | |
| 1794 | static int ath10k_pci_init_config(struct ath10k *ar) |
| 1795 | { |
| 1796 | u32 interconnect_targ_addr; |
| 1797 | u32 pcie_state_targ_addr = 0; |
| 1798 | u32 pipe_cfg_targ_addr = 0; |
| 1799 | u32 svc_to_pipe_map = 0; |
| 1800 | u32 pcie_config_flags = 0; |
| 1801 | u32 ealloc_value; |
| 1802 | u32 ealloc_targ_addr; |
| 1803 | u32 flag2_value; |
| 1804 | u32 flag2_targ_addr; |
| 1805 | int ret = 0; |
| 1806 | |
| 1807 | /* Download to Target the CE Config and the service-to-CE map */ |
| 1808 | interconnect_targ_addr = |
| 1809 | host_interest_item_address(HI_ITEM(hi_interconnect_state)); |
| 1810 | |
| 1811 | /* Supply Target-side CE configuration */ |
| 1812 | ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr, |
| 1813 | &pcie_state_targ_addr); |
| 1814 | if (ret != 0) { |
| 1815 | ath10k_err("Failed to get pcie state addr: %d\n", ret); |
| 1816 | return ret; |
| 1817 | } |
| 1818 | |
| 1819 | if (pcie_state_targ_addr == 0) { |
| 1820 | ret = -EIO; |
| 1821 | ath10k_err("Invalid pcie state addr\n"); |
| 1822 | return ret; |
| 1823 | } |
| 1824 | |
| 1825 | ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + |
| 1826 | offsetof(struct pcie_state, |
| 1827 | pipe_cfg_addr), |
| 1828 | &pipe_cfg_targ_addr); |
| 1829 | if (ret != 0) { |
| 1830 | ath10k_err("Failed to get pipe cfg addr: %d\n", ret); |
| 1831 | return ret; |
| 1832 | } |
| 1833 | |
| 1834 | if (pipe_cfg_targ_addr == 0) { |
| 1835 | ret = -EIO; |
| 1836 | ath10k_err("Invalid pipe cfg addr\n"); |
| 1837 | return ret; |
| 1838 | } |
| 1839 | |
| 1840 | ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, |
| 1841 | target_ce_config_wlan, |
| 1842 | sizeof(target_ce_config_wlan)); |
| 1843 | |
| 1844 | if (ret != 0) { |
| 1845 | ath10k_err("Failed to write pipe cfg: %d\n", ret); |
| 1846 | return ret; |
| 1847 | } |
| 1848 | |
| 1849 | ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + |
| 1850 | offsetof(struct pcie_state, |
| 1851 | svc_to_pipe_map), |
| 1852 | &svc_to_pipe_map); |
| 1853 | if (ret != 0) { |
| 1854 | ath10k_err("Failed to get svc/pipe map: %d\n", ret); |
| 1855 | return ret; |
| 1856 | } |
| 1857 | |
| 1858 | if (svc_to_pipe_map == 0) { |
| 1859 | ret = -EIO; |
| 1860 | ath10k_err("Invalid svc_to_pipe map\n"); |
| 1861 | return ret; |
| 1862 | } |
| 1863 | |
| 1864 | ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, |
| 1865 | target_service_to_ce_map_wlan, |
| 1866 | sizeof(target_service_to_ce_map_wlan)); |
| 1867 | if (ret != 0) { |
| 1868 | ath10k_err("Failed to write svc/pipe map: %d\n", ret); |
| 1869 | return ret; |
| 1870 | } |
| 1871 | |
| 1872 | ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr + |
| 1873 | offsetof(struct pcie_state, |
| 1874 | config_flags), |
| 1875 | &pcie_config_flags); |
| 1876 | if (ret != 0) { |
| 1877 | ath10k_err("Failed to get pcie config_flags: %d\n", ret); |
| 1878 | return ret; |
| 1879 | } |
| 1880 | |
| 1881 | pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; |
| 1882 | |
| 1883 | ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr + |
| 1884 | offsetof(struct pcie_state, config_flags), |
| 1885 | &pcie_config_flags, |
| 1886 | sizeof(pcie_config_flags)); |
| 1887 | if (ret != 0) { |
| 1888 | ath10k_err("Failed to write pcie config_flags: %d\n", ret); |
| 1889 | return ret; |
| 1890 | } |
| 1891 | |
| 1892 | /* configure early allocation */ |
| 1893 | ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); |
| 1894 | |
| 1895 | ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value); |
| 1896 | if (ret != 0) { |
| 1897 | ath10k_err("Faile to get early alloc val: %d\n", ret); |
| 1898 | return ret; |
| 1899 | } |
| 1900 | |
| 1901 | /* first bank is switched to IRAM */ |
| 1902 | ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & |
| 1903 | HI_EARLY_ALLOC_MAGIC_MASK); |
| 1904 | ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & |
| 1905 | HI_EARLY_ALLOC_IRAM_BANKS_MASK); |
| 1906 | |
| 1907 | ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value); |
| 1908 | if (ret != 0) { |
| 1909 | ath10k_err("Failed to set early alloc val: %d\n", ret); |
| 1910 | return ret; |
| 1911 | } |
| 1912 | |
| 1913 | /* Tell Target to proceed with initialization */ |
| 1914 | flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); |
| 1915 | |
| 1916 | ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value); |
| 1917 | if (ret != 0) { |
| 1918 | ath10k_err("Failed to get option val: %d\n", ret); |
| 1919 | return ret; |
| 1920 | } |
| 1921 | |
| 1922 | flag2_value |= HI_OPTION_EARLY_CFG_DONE; |
| 1923 | |
| 1924 | ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value); |
| 1925 | if (ret != 0) { |
| 1926 | ath10k_err("Failed to set option val: %d\n", ret); |
| 1927 | return ret; |
| 1928 | } |
| 1929 | |
| 1930 | return 0; |
| 1931 | } |
| 1932 | |
| 1933 | |
| 1934 | |
| 1935 | static int ath10k_pci_ce_init(struct ath10k *ar) |
| 1936 | { |
| 1937 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 1938 | struct ath10k_pci_pipe *pipe_info; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1939 | const struct ce_attr *attr; |
| 1940 | int pipe_num; |
| 1941 | |
Michal Kazior | fad6ed7 | 2013-11-08 08:01:23 +0100 | [diff] [blame] | 1942 | for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1943 | pipe_info = &ar_pci->pipe_info[pipe_num]; |
| 1944 | pipe_info->pipe_num = pipe_num; |
| 1945 | pipe_info->hif_ce_state = ar; |
| 1946 | attr = &host_ce_config_wlan[pipe_num]; |
| 1947 | |
| 1948 | pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); |
| 1949 | if (pipe_info->ce_hdl == NULL) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 1950 | ath10k_err("failed to initialize CE for pipe: %d\n", |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1951 | pipe_num); |
| 1952 | |
| 1953 | /* It is safe to call it here. It checks if ce_hdl is |
| 1954 | * valid for each pipe */ |
| 1955 | ath10k_pci_ce_deinit(ar); |
| 1956 | return -1; |
| 1957 | } |
| 1958 | |
Michal Kazior | fad6ed7 | 2013-11-08 08:01:23 +0100 | [diff] [blame] | 1959 | if (pipe_num == CE_COUNT - 1) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1960 | /* |
| 1961 | * Reserve the ultimate CE for |
| 1962 | * diagnostic Window support |
| 1963 | */ |
Michal Kazior | fad6ed7 | 2013-11-08 08:01:23 +0100 | [diff] [blame] | 1964 | ar_pci->ce_diag = pipe_info->ce_hdl; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1965 | continue; |
| 1966 | } |
| 1967 | |
| 1968 | pipe_info->buf_sz = (size_t) (attr->src_sz_max); |
| 1969 | } |
| 1970 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1971 | return 0; |
| 1972 | } |
| 1973 | |
| 1974 | static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) |
| 1975 | { |
| 1976 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 1977 | u32 fw_indicator_address, fw_indicator; |
| 1978 | |
| 1979 | ath10k_pci_wake(ar); |
| 1980 | |
| 1981 | fw_indicator_address = ar_pci->fw_indicator_address; |
| 1982 | fw_indicator = ath10k_pci_read32(ar, fw_indicator_address); |
| 1983 | |
| 1984 | if (fw_indicator & FW_IND_EVENT_PENDING) { |
| 1985 | /* ACK: clear Target-side pending event */ |
| 1986 | ath10k_pci_write32(ar, fw_indicator_address, |
| 1987 | fw_indicator & ~FW_IND_EVENT_PENDING); |
| 1988 | |
| 1989 | if (ar_pci->started) { |
| 1990 | ath10k_pci_hif_dump_area(ar); |
| 1991 | } else { |
| 1992 | /* |
| 1993 | * Probable Target failure before we're prepared |
| 1994 | * to handle it. Generally unexpected. |
| 1995 | */ |
| 1996 | ath10k_warn("early firmware event indicated\n"); |
| 1997 | } |
| 1998 | } |
| 1999 | |
| 2000 | ath10k_pci_sleep(ar); |
| 2001 | } |
| 2002 | |
Michal Kazior | fc36e3f | 2014-02-10 17:14:22 +0100 | [diff] [blame] | 2003 | static int ath10k_pci_warm_reset(struct ath10k *ar) |
| 2004 | { |
| 2005 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2006 | int ret = 0; |
| 2007 | u32 val; |
| 2008 | |
| 2009 | ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n"); |
| 2010 | |
| 2011 | ret = ath10k_do_pci_wake(ar); |
| 2012 | if (ret) { |
| 2013 | ath10k_err("failed to wake up target: %d\n", ret); |
| 2014 | return ret; |
| 2015 | } |
| 2016 | |
| 2017 | /* debug */ |
| 2018 | val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
| 2019 | PCIE_INTR_CAUSE_ADDRESS); |
| 2020 | ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val); |
| 2021 | |
| 2022 | val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
| 2023 | CPU_INTR_ADDRESS); |
| 2024 | ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", |
| 2025 | val); |
| 2026 | |
| 2027 | /* disable pending irqs */ |
| 2028 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + |
| 2029 | PCIE_INTR_ENABLE_ADDRESS, 0); |
| 2030 | |
| 2031 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + |
| 2032 | PCIE_INTR_CLR_ADDRESS, ~0); |
| 2033 | |
| 2034 | msleep(100); |
| 2035 | |
| 2036 | /* clear fw indicator */ |
| 2037 | ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0); |
| 2038 | |
| 2039 | /* clear target LF timer interrupts */ |
| 2040 | val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + |
| 2041 | SOC_LF_TIMER_CONTROL0_ADDRESS); |
| 2042 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + |
| 2043 | SOC_LF_TIMER_CONTROL0_ADDRESS, |
| 2044 | val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); |
| 2045 | |
| 2046 | /* reset CE */ |
| 2047 | val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + |
| 2048 | SOC_RESET_CONTROL_ADDRESS); |
| 2049 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, |
| 2050 | val | SOC_RESET_CONTROL_CE_RST_MASK); |
| 2051 | val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + |
| 2052 | SOC_RESET_CONTROL_ADDRESS); |
| 2053 | msleep(10); |
| 2054 | |
| 2055 | /* unreset CE */ |
| 2056 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, |
| 2057 | val & ~SOC_RESET_CONTROL_CE_RST_MASK); |
| 2058 | val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + |
| 2059 | SOC_RESET_CONTROL_ADDRESS); |
| 2060 | msleep(10); |
| 2061 | |
| 2062 | /* debug */ |
| 2063 | val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
| 2064 | PCIE_INTR_CAUSE_ADDRESS); |
| 2065 | ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val); |
| 2066 | |
| 2067 | val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + |
| 2068 | CPU_INTR_ADDRESS); |
| 2069 | ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", |
| 2070 | val); |
| 2071 | |
| 2072 | /* CPU warm reset */ |
| 2073 | val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + |
| 2074 | SOC_RESET_CONTROL_ADDRESS); |
| 2075 | ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, |
| 2076 | val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); |
| 2077 | |
| 2078 | val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + |
| 2079 | SOC_RESET_CONTROL_ADDRESS); |
| 2080 | ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val); |
| 2081 | |
| 2082 | msleep(100); |
| 2083 | |
| 2084 | ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n"); |
| 2085 | |
| 2086 | ath10k_do_pci_sleep(ar); |
| 2087 | return ret; |
| 2088 | } |
| 2089 | |
| 2090 | static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2091 | { |
Bartosz Markowski | 8cc8df9 | 2013-08-02 09:58:49 +0200 | [diff] [blame] | 2092 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Kalle Valo | 95cbb6a | 2013-11-20 10:00:35 +0200 | [diff] [blame] | 2093 | const char *irq_mode; |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2094 | int ret; |
| 2095 | |
| 2096 | /* |
| 2097 | * Bring the target up cleanly. |
| 2098 | * |
| 2099 | * The target may be in an undefined state with an AUX-powered Target |
| 2100 | * and a Host in WoW mode. If the Host crashes, loses power, or is |
| 2101 | * restarted (without unloading the driver) then the Target is left |
| 2102 | * (aux) powered and running. On a subsequent driver load, the Target |
| 2103 | * is in an unexpected state. We try to catch that here in order to |
| 2104 | * reset the Target and retry the probe. |
| 2105 | */ |
Michal Kazior | fc36e3f | 2014-02-10 17:14:22 +0100 | [diff] [blame] | 2106 | if (cold_reset) |
| 2107 | ret = ath10k_pci_cold_reset(ar); |
| 2108 | else |
| 2109 | ret = ath10k_pci_warm_reset(ar); |
| 2110 | |
Michal Kazior | 5b2589f | 2013-11-08 08:01:30 +0100 | [diff] [blame] | 2111 | if (ret) { |
| 2112 | ath10k_err("failed to reset target: %d\n", ret); |
Michal Kazior | 98563d5 | 2013-11-08 08:01:33 +0100 | [diff] [blame] | 2113 | goto err; |
Michal Kazior | 5b2589f | 2013-11-08 08:01:30 +0100 | [diff] [blame] | 2114 | } |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2115 | |
Bartosz Markowski | 8cc8df9 | 2013-08-02 09:58:49 +0200 | [diff] [blame] | 2116 | if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2117 | /* Force AWAKE forever */ |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2118 | ath10k_do_pci_wake(ar); |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2119 | |
| 2120 | ret = ath10k_pci_ce_init(ar); |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2121 | if (ret) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 2122 | ath10k_err("failed to initialize CE: %d\n", ret); |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2123 | goto err_ps; |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 2124 | } |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2125 | |
Michal Kazior | 98563d5 | 2013-11-08 08:01:33 +0100 | [diff] [blame] | 2126 | ret = ath10k_ce_disable_interrupts(ar); |
| 2127 | if (ret) { |
| 2128 | ath10k_err("failed to disable CE interrupts: %d\n", ret); |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2129 | goto err_ce; |
| 2130 | } |
| 2131 | |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2132 | ret = ath10k_pci_init_irq(ar); |
Michal Kazior | 98563d5 | 2013-11-08 08:01:33 +0100 | [diff] [blame] | 2133 | if (ret) { |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2134 | ath10k_err("failed to init irqs: %d\n", ret); |
Michal Kazior | 98563d5 | 2013-11-08 08:01:33 +0100 | [diff] [blame] | 2135 | goto err_ce; |
| 2136 | } |
| 2137 | |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 2138 | ret = ath10k_pci_request_early_irq(ar); |
| 2139 | if (ret) { |
| 2140 | ath10k_err("failed to request early irq: %d\n", ret); |
| 2141 | goto err_deinit_irq; |
| 2142 | } |
| 2143 | |
Michal Kazior | 98563d5 | 2013-11-08 08:01:33 +0100 | [diff] [blame] | 2144 | ret = ath10k_pci_wait_for_target_init(ar); |
| 2145 | if (ret) { |
| 2146 | ath10k_err("failed to wait for target to init: %d\n", ret); |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 2147 | goto err_free_early_irq; |
Michal Kazior | 98563d5 | 2013-11-08 08:01:33 +0100 | [diff] [blame] | 2148 | } |
| 2149 | |
| 2150 | ret = ath10k_pci_init_config(ar); |
| 2151 | if (ret) { |
| 2152 | ath10k_err("failed to setup init config: %d\n", ret); |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 2153 | goto err_free_early_irq; |
Michal Kazior | 98563d5 | 2013-11-08 08:01:33 +0100 | [diff] [blame] | 2154 | } |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2155 | |
| 2156 | ret = ath10k_pci_wake_target_cpu(ar); |
| 2157 | if (ret) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 2158 | ath10k_err("could not wake up target CPU: %d\n", ret); |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 2159 | goto err_free_early_irq; |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2160 | } |
| 2161 | |
Kalle Valo | 95cbb6a | 2013-11-20 10:00:35 +0200 | [diff] [blame] | 2162 | if (ar_pci->num_msi_intrs > 1) |
| 2163 | irq_mode = "MSI-X"; |
| 2164 | else if (ar_pci->num_msi_intrs == 1) |
| 2165 | irq_mode = "MSI"; |
| 2166 | else |
| 2167 | irq_mode = "legacy"; |
| 2168 | |
Kalle Valo | 650b91f | 2013-11-20 10:00:49 +0200 | [diff] [blame] | 2169 | if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) |
| 2170 | ath10k_info("pci irq %s\n", irq_mode); |
Kalle Valo | 95cbb6a | 2013-11-20 10:00:35 +0200 | [diff] [blame] | 2171 | |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2172 | return 0; |
| 2173 | |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 2174 | err_free_early_irq: |
| 2175 | ath10k_pci_free_early_irq(ar); |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2176 | err_deinit_irq: |
| 2177 | ath10k_pci_deinit_irq(ar); |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2178 | err_ce: |
| 2179 | ath10k_pci_ce_deinit(ar); |
Michal Kazior | fc36e3f | 2014-02-10 17:14:22 +0100 | [diff] [blame] | 2180 | ath10k_pci_warm_reset(ar); |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2181 | err_ps: |
Bartosz Markowski | 8cc8df9 | 2013-08-02 09:58:49 +0200 | [diff] [blame] | 2182 | if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2183 | ath10k_do_pci_sleep(ar); |
| 2184 | err: |
| 2185 | return ret; |
| 2186 | } |
| 2187 | |
Michal Kazior | fc36e3f | 2014-02-10 17:14:22 +0100 | [diff] [blame] | 2188 | static int ath10k_pci_hif_power_up(struct ath10k *ar) |
| 2189 | { |
| 2190 | int ret; |
| 2191 | |
| 2192 | /* |
| 2193 | * Hardware CUS232 version 2 has some issues with cold reset and the |
| 2194 | * preferred (and safer) way to perform a device reset is through a |
| 2195 | * warm reset. |
| 2196 | * |
| 2197 | * Warm reset doesn't always work though (notably after a firmware |
| 2198 | * crash) so fall back to cold reset if necessary. |
| 2199 | */ |
| 2200 | ret = __ath10k_pci_hif_power_up(ar, false); |
| 2201 | if (ret) { |
| 2202 | ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n", |
| 2203 | ret); |
| 2204 | |
| 2205 | ret = __ath10k_pci_hif_power_up(ar, true); |
| 2206 | if (ret) { |
| 2207 | ath10k_err("failed to power up target using cold reset too (%d)\n", |
| 2208 | ret); |
| 2209 | return ret; |
| 2210 | } |
| 2211 | } |
| 2212 | |
| 2213 | return 0; |
| 2214 | } |
| 2215 | |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2216 | static void ath10k_pci_hif_power_down(struct ath10k *ar) |
| 2217 | { |
Bartosz Markowski | 8cc8df9 | 2013-08-02 09:58:49 +0200 | [diff] [blame] | 2218 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2219 | |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 2220 | ath10k_pci_free_early_irq(ar); |
| 2221 | ath10k_pci_kill_tasklet(ar); |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2222 | ath10k_pci_deinit_irq(ar); |
Michal Kazior | fc36e3f | 2014-02-10 17:14:22 +0100 | [diff] [blame] | 2223 | ath10k_pci_warm_reset(ar); |
Bartosz Markowski | 8cc8df9 | 2013-08-02 09:58:49 +0200 | [diff] [blame] | 2224 | |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2225 | ath10k_pci_ce_deinit(ar); |
Bartosz Markowski | 8cc8df9 | 2013-08-02 09:58:49 +0200 | [diff] [blame] | 2226 | if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2227 | ath10k_do_pci_sleep(ar); |
| 2228 | } |
| 2229 | |
Michal Kazior | 8cd13ca | 2013-07-16 09:38:54 +0200 | [diff] [blame] | 2230 | #ifdef CONFIG_PM |
| 2231 | |
| 2232 | #define ATH10K_PCI_PM_CONTROL 0x44 |
| 2233 | |
| 2234 | static int ath10k_pci_hif_suspend(struct ath10k *ar) |
| 2235 | { |
| 2236 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2237 | struct pci_dev *pdev = ar_pci->pdev; |
| 2238 | u32 val; |
| 2239 | |
| 2240 | pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); |
| 2241 | |
| 2242 | if ((val & 0x000000ff) != 0x3) { |
| 2243 | pci_save_state(pdev); |
| 2244 | pci_disable_device(pdev); |
| 2245 | pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, |
| 2246 | (val & 0xffffff00) | 0x03); |
| 2247 | } |
| 2248 | |
| 2249 | return 0; |
| 2250 | } |
| 2251 | |
| 2252 | static int ath10k_pci_hif_resume(struct ath10k *ar) |
| 2253 | { |
| 2254 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2255 | struct pci_dev *pdev = ar_pci->pdev; |
| 2256 | u32 val; |
| 2257 | |
| 2258 | pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val); |
| 2259 | |
| 2260 | if ((val & 0x000000ff) != 0) { |
| 2261 | pci_restore_state(pdev); |
| 2262 | pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL, |
| 2263 | val & 0xffffff00); |
| 2264 | /* |
| 2265 | * Suspend/Resume resets the PCI configuration space, |
| 2266 | * so we have to re-disable the RETRY_TIMEOUT register (0x41) |
| 2267 | * to keep PCI Tx retries from interfering with C3 CPU state |
| 2268 | */ |
| 2269 | pci_read_config_dword(pdev, 0x40, &val); |
| 2270 | |
| 2271 | if ((val & 0x0000ff00) != 0) |
| 2272 | pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); |
| 2273 | } |
| 2274 | |
| 2275 | return 0; |
| 2276 | } |
| 2277 | #endif |
| 2278 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2279 | static const struct ath10k_hif_ops ath10k_pci_hif_ops = { |
Michal Kazior | 726346f | 2014-02-27 18:50:04 +0200 | [diff] [blame^] | 2280 | .tx_sg = ath10k_pci_hif_tx_sg, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2281 | .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, |
| 2282 | .start = ath10k_pci_hif_start, |
| 2283 | .stop = ath10k_pci_hif_stop, |
| 2284 | .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, |
| 2285 | .get_default_pipe = ath10k_pci_hif_get_default_pipe, |
| 2286 | .send_complete_check = ath10k_pci_hif_send_complete_check, |
Michal Kazior | e799bbf | 2013-07-05 16:15:12 +0300 | [diff] [blame] | 2287 | .set_callbacks = ath10k_pci_hif_set_callbacks, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2288 | .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, |
Michal Kazior | 8c5c536 | 2013-07-16 09:38:50 +0200 | [diff] [blame] | 2289 | .power_up = ath10k_pci_hif_power_up, |
| 2290 | .power_down = ath10k_pci_hif_power_down, |
Michal Kazior | 8cd13ca | 2013-07-16 09:38:54 +0200 | [diff] [blame] | 2291 | #ifdef CONFIG_PM |
| 2292 | .suspend = ath10k_pci_hif_suspend, |
| 2293 | .resume = ath10k_pci_hif_resume, |
| 2294 | #endif |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2295 | }; |
| 2296 | |
| 2297 | static void ath10k_pci_ce_tasklet(unsigned long ptr) |
| 2298 | { |
Michal Kazior | 87263e5 | 2013-08-27 13:08:01 +0200 | [diff] [blame] | 2299 | struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2300 | struct ath10k_pci *ar_pci = pipe->ar_pci; |
| 2301 | |
| 2302 | ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); |
| 2303 | } |
| 2304 | |
| 2305 | static void ath10k_msi_err_tasklet(unsigned long data) |
| 2306 | { |
| 2307 | struct ath10k *ar = (struct ath10k *)data; |
| 2308 | |
| 2309 | ath10k_pci_fw_interrupt_handler(ar); |
| 2310 | } |
| 2311 | |
| 2312 | /* |
| 2313 | * Handler for a per-engine interrupt on a PARTICULAR CE. |
| 2314 | * This is used in cases where each CE has a private MSI interrupt. |
| 2315 | */ |
| 2316 | static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg) |
| 2317 | { |
| 2318 | struct ath10k *ar = arg; |
| 2319 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2320 | int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL; |
| 2321 | |
Dan Carpenter | e574267 | 2013-06-18 10:28:46 +0300 | [diff] [blame] | 2322 | if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2323 | ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id); |
| 2324 | return IRQ_HANDLED; |
| 2325 | } |
| 2326 | |
| 2327 | /* |
| 2328 | * NOTE: We are able to derive ce_id from irq because we |
| 2329 | * use a one-to-one mapping for CE's 0..5. |
| 2330 | * CE's 6 & 7 do not use interrupts at all. |
| 2331 | * |
| 2332 | * This mapping must be kept in sync with the mapping |
| 2333 | * used by firmware. |
| 2334 | */ |
| 2335 | tasklet_schedule(&ar_pci->pipe_info[ce_id].intr); |
| 2336 | return IRQ_HANDLED; |
| 2337 | } |
| 2338 | |
| 2339 | static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg) |
| 2340 | { |
| 2341 | struct ath10k *ar = arg; |
| 2342 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2343 | |
| 2344 | tasklet_schedule(&ar_pci->msi_fw_err); |
| 2345 | return IRQ_HANDLED; |
| 2346 | } |
| 2347 | |
| 2348 | /* |
| 2349 | * Top-level interrupt handler for all PCI interrupts from a Target. |
| 2350 | * When a block of MSI interrupts is allocated, this top-level handler |
| 2351 | * is not used; instead, we directly call the correct sub-handler. |
| 2352 | */ |
| 2353 | static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) |
| 2354 | { |
| 2355 | struct ath10k *ar = arg; |
| 2356 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2357 | |
| 2358 | if (ar_pci->num_msi_intrs == 0) { |
Michal Kazior | e539887 | 2013-11-25 14:06:20 +0100 | [diff] [blame] | 2359 | if (!ath10k_pci_irq_pending(ar)) |
| 2360 | return IRQ_NONE; |
| 2361 | |
Michal Kazior | 2685218 | 2013-11-25 14:06:25 +0100 | [diff] [blame] | 2362 | ath10k_pci_disable_and_clear_legacy_irq(ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2363 | } |
| 2364 | |
| 2365 | tasklet_schedule(&ar_pci->intr_tq); |
| 2366 | |
| 2367 | return IRQ_HANDLED; |
| 2368 | } |
| 2369 | |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 2370 | static void ath10k_pci_early_irq_tasklet(unsigned long data) |
| 2371 | { |
| 2372 | struct ath10k *ar = (struct ath10k *)data; |
| 2373 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2374 | u32 fw_ind; |
| 2375 | int ret; |
| 2376 | |
| 2377 | ret = ath10k_pci_wake(ar); |
| 2378 | if (ret) { |
| 2379 | ath10k_warn("failed to wake target in early irq tasklet: %d\n", |
| 2380 | ret); |
| 2381 | return; |
| 2382 | } |
| 2383 | |
| 2384 | fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address); |
| 2385 | if (fw_ind & FW_IND_EVENT_PENDING) { |
| 2386 | ath10k_pci_write32(ar, ar_pci->fw_indicator_address, |
| 2387 | fw_ind & ~FW_IND_EVENT_PENDING); |
| 2388 | |
| 2389 | /* Some structures are unavailable during early boot or at |
| 2390 | * driver teardown so just print that the device has crashed. */ |
| 2391 | ath10k_warn("device crashed - no diagnostics available\n"); |
| 2392 | } |
| 2393 | |
| 2394 | ath10k_pci_sleep(ar); |
| 2395 | ath10k_pci_enable_legacy_irq(ar); |
| 2396 | } |
| 2397 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2398 | static void ath10k_pci_tasklet(unsigned long data) |
| 2399 | { |
| 2400 | struct ath10k *ar = (struct ath10k *)data; |
| 2401 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2402 | |
| 2403 | ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */ |
| 2404 | ath10k_ce_per_engine_service_any(ar); |
| 2405 | |
Michal Kazior | 2685218 | 2013-11-25 14:06:25 +0100 | [diff] [blame] | 2406 | /* Re-enable legacy irq that was disabled in the irq handler */ |
| 2407 | if (ar_pci->num_msi_intrs == 0) |
| 2408 | ath10k_pci_enable_legacy_irq(ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2409 | } |
| 2410 | |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2411 | static int ath10k_pci_request_irq_msix(struct ath10k *ar) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2412 | { |
| 2413 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2414 | int ret, i; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2415 | |
| 2416 | ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, |
| 2417 | ath10k_pci_msi_fw_handler, |
| 2418 | IRQF_SHARED, "ath10k_pci", ar); |
Michal Kazior | 591ecdb | 2013-07-31 10:55:15 +0200 | [diff] [blame] | 2419 | if (ret) { |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2420 | ath10k_warn("failed to request MSI-X fw irq %d: %d\n", |
Michal Kazior | 591ecdb | 2013-07-31 10:55:15 +0200 | [diff] [blame] | 2421 | ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2422 | return ret; |
Michal Kazior | 591ecdb | 2013-07-31 10:55:15 +0200 | [diff] [blame] | 2423 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2424 | |
| 2425 | for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) { |
| 2426 | ret = request_irq(ar_pci->pdev->irq + i, |
| 2427 | ath10k_pci_per_engine_handler, |
| 2428 | IRQF_SHARED, "ath10k_pci", ar); |
| 2429 | if (ret) { |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2430 | ath10k_warn("failed to request MSI-X ce irq %d: %d\n", |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2431 | ar_pci->pdev->irq + i, ret); |
| 2432 | |
Michal Kazior | 87b1423 | 2013-06-26 08:50:50 +0200 | [diff] [blame] | 2433 | for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) |
| 2434 | free_irq(ar_pci->pdev->irq + i, ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2435 | |
Michal Kazior | 87b1423 | 2013-06-26 08:50:50 +0200 | [diff] [blame] | 2436 | free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2437 | return ret; |
| 2438 | } |
| 2439 | } |
| 2440 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2441 | return 0; |
| 2442 | } |
| 2443 | |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2444 | static int ath10k_pci_request_irq_msi(struct ath10k *ar) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2445 | { |
| 2446 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2447 | int ret; |
| 2448 | |
| 2449 | ret = request_irq(ar_pci->pdev->irq, |
| 2450 | ath10k_pci_interrupt_handler, |
| 2451 | IRQF_SHARED, "ath10k_pci", ar); |
Kalle Valo | f378274 | 2013-10-17 11:36:15 +0300 | [diff] [blame] | 2452 | if (ret) { |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2453 | ath10k_warn("failed to request MSI irq %d: %d\n", |
| 2454 | ar_pci->pdev->irq, ret); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2455 | return ret; |
Kalle Valo | f378274 | 2013-10-17 11:36:15 +0300 | [diff] [blame] | 2456 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2457 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2458 | return 0; |
| 2459 | } |
| 2460 | |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2461 | static int ath10k_pci_request_irq_legacy(struct ath10k *ar) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2462 | { |
| 2463 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2464 | int ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2465 | |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2466 | ret = request_irq(ar_pci->pdev->irq, |
| 2467 | ath10k_pci_interrupt_handler, |
| 2468 | IRQF_SHARED, "ath10k_pci", ar); |
Kalle Valo | f378274 | 2013-10-17 11:36:15 +0300 | [diff] [blame] | 2469 | if (ret) { |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2470 | ath10k_warn("failed to request legacy irq %d: %d\n", |
| 2471 | ar_pci->pdev->irq, ret); |
Kalle Valo | f378274 | 2013-10-17 11:36:15 +0300 | [diff] [blame] | 2472 | return ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2473 | } |
| 2474 | |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2475 | return 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2476 | } |
| 2477 | |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2478 | static int ath10k_pci_request_irq(struct ath10k *ar) |
| 2479 | { |
| 2480 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2481 | |
| 2482 | switch (ar_pci->num_msi_intrs) { |
| 2483 | case 0: |
| 2484 | return ath10k_pci_request_irq_legacy(ar); |
| 2485 | case 1: |
| 2486 | return ath10k_pci_request_irq_msi(ar); |
| 2487 | case MSI_NUM_REQUEST: |
| 2488 | return ath10k_pci_request_irq_msix(ar); |
| 2489 | } |
| 2490 | |
| 2491 | ath10k_warn("unknown irq configuration upon request\n"); |
| 2492 | return -EINVAL; |
| 2493 | } |
| 2494 | |
| 2495 | static void ath10k_pci_free_irq(struct ath10k *ar) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2496 | { |
| 2497 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2498 | int i; |
| 2499 | |
| 2500 | /* There's at least one interrupt irregardless whether its legacy INTR |
| 2501 | * or MSI or MSI-X */ |
| 2502 | for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) |
| 2503 | free_irq(ar_pci->pdev->irq + i, ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2504 | } |
| 2505 | |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2506 | static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) |
| 2507 | { |
| 2508 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2509 | int i; |
| 2510 | |
| 2511 | tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar); |
| 2512 | tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, |
| 2513 | (unsigned long)ar); |
Michal Kazior | ab977bd | 2013-11-25 14:06:26 +0100 | [diff] [blame] | 2514 | tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet, |
| 2515 | (unsigned long)ar); |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2516 | |
| 2517 | for (i = 0; i < CE_COUNT; i++) { |
| 2518 | ar_pci->pipe_info[i].ar_pci = ar_pci; |
| 2519 | tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet, |
| 2520 | (unsigned long)&ar_pci->pipe_info[i]); |
| 2521 | } |
| 2522 | } |
| 2523 | |
| 2524 | static int ath10k_pci_init_irq(struct ath10k *ar) |
| 2525 | { |
| 2526 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | cfe9c45 | 2013-11-25 14:06:27 +0100 | [diff] [blame] | 2527 | bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X, |
| 2528 | ar_pci->features); |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2529 | int ret; |
| 2530 | |
| 2531 | ath10k_pci_init_irq_tasklets(ar); |
| 2532 | |
Michal Kazior | cfe9c45 | 2013-11-25 14:06:27 +0100 | [diff] [blame] | 2533 | if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO && |
| 2534 | !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) |
| 2535 | ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode); |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2536 | |
| 2537 | /* Try MSI-X */ |
Michal Kazior | cfe9c45 | 2013-11-25 14:06:27 +0100 | [diff] [blame] | 2538 | if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) { |
| 2539 | ar_pci->num_msi_intrs = MSI_NUM_REQUEST; |
Alexander Gordeev | 5ad6867 | 2014-02-13 17:50:02 +0200 | [diff] [blame] | 2540 | ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs, |
| 2541 | ar_pci->num_msi_intrs); |
| 2542 | if (ret > 0) |
Michal Kazior | cfe9c45 | 2013-11-25 14:06:27 +0100 | [diff] [blame] | 2543 | return 0; |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2544 | |
Michal Kazior | cfe9c45 | 2013-11-25 14:06:27 +0100 | [diff] [blame] | 2545 | /* fall-through */ |
| 2546 | } |
| 2547 | |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2548 | /* Try MSI */ |
Michal Kazior | cfe9c45 | 2013-11-25 14:06:27 +0100 | [diff] [blame] | 2549 | if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { |
| 2550 | ar_pci->num_msi_intrs = 1; |
| 2551 | ret = pci_enable_msi(ar_pci->pdev); |
| 2552 | if (ret == 0) |
| 2553 | return 0; |
| 2554 | |
| 2555 | /* fall-through */ |
| 2556 | } |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2557 | |
| 2558 | /* Try legacy irq |
| 2559 | * |
| 2560 | * A potential race occurs here: The CORE_BASE write |
| 2561 | * depends on target correctly decoding AXI address but |
| 2562 | * host won't know when target writes BAR to CORE_CTRL. |
| 2563 | * This write might get lost if target has NOT written BAR. |
| 2564 | * For now, fix the race by repeating the write in below |
| 2565 | * synchronization checking. */ |
| 2566 | ar_pci->num_msi_intrs = 0; |
| 2567 | |
| 2568 | ret = ath10k_pci_wake(ar); |
| 2569 | if (ret) { |
| 2570 | ath10k_warn("failed to wake target: %d\n", ret); |
| 2571 | return ret; |
| 2572 | } |
| 2573 | |
| 2574 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, |
| 2575 | PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); |
| 2576 | ath10k_pci_sleep(ar); |
| 2577 | |
| 2578 | return 0; |
| 2579 | } |
| 2580 | |
| 2581 | static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar) |
| 2582 | { |
| 2583 | int ret; |
| 2584 | |
| 2585 | ret = ath10k_pci_wake(ar); |
| 2586 | if (ret) { |
| 2587 | ath10k_warn("failed to wake target: %d\n", ret); |
| 2588 | return ret; |
| 2589 | } |
| 2590 | |
| 2591 | ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, |
| 2592 | 0); |
| 2593 | ath10k_pci_sleep(ar); |
| 2594 | |
| 2595 | return 0; |
| 2596 | } |
| 2597 | |
| 2598 | static int ath10k_pci_deinit_irq(struct ath10k *ar) |
| 2599 | { |
| 2600 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2601 | |
| 2602 | switch (ar_pci->num_msi_intrs) { |
| 2603 | case 0: |
| 2604 | return ath10k_pci_deinit_irq_legacy(ar); |
| 2605 | case 1: |
| 2606 | /* fall-through */ |
| 2607 | case MSI_NUM_REQUEST: |
| 2608 | pci_disable_msi(ar_pci->pdev); |
| 2609 | return 0; |
Alexander Gordeev | bb8b621 | 2014-02-13 17:50:01 +0200 | [diff] [blame] | 2610 | default: |
| 2611 | pci_disable_msi(ar_pci->pdev); |
Michal Kazior | fc15ca1 | 2013-11-25 14:06:21 +0100 | [diff] [blame] | 2612 | } |
| 2613 | |
| 2614 | ath10k_warn("unknown irq configuration upon deinit\n"); |
| 2615 | return -EINVAL; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2616 | } |
| 2617 | |
Michal Kazior | d7fb47f | 2013-11-08 08:01:26 +0100 | [diff] [blame] | 2618 | static int ath10k_pci_wait_for_target_init(struct ath10k *ar) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2619 | { |
| 2620 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 2621 | int wait_limit = 300; /* 3 sec */ |
Kalle Valo | f378274 | 2013-10-17 11:36:15 +0300 | [diff] [blame] | 2622 | int ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2623 | |
Michal Kazior | 98563d5 | 2013-11-08 08:01:33 +0100 | [diff] [blame] | 2624 | ret = ath10k_pci_wake(ar); |
Kalle Valo | f378274 | 2013-10-17 11:36:15 +0300 | [diff] [blame] | 2625 | if (ret) { |
Michal Kazior | 5b2589f | 2013-11-08 08:01:30 +0100 | [diff] [blame] | 2626 | ath10k_err("failed to wake up target: %d\n", ret); |
Kalle Valo | f378274 | 2013-10-17 11:36:15 +0300 | [diff] [blame] | 2627 | return ret; |
| 2628 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2629 | |
| 2630 | while (wait_limit-- && |
| 2631 | !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) & |
| 2632 | FW_IND_INITIALIZED)) { |
| 2633 | if (ar_pci->num_msi_intrs == 0) |
| 2634 | /* Fix potential race by repeating CORE_BASE writes */ |
| 2635 | iowrite32(PCIE_INTR_FIRMWARE_MASK | |
| 2636 | PCIE_INTR_CE_MASK_ALL, |
| 2637 | ar_pci->mem + (SOC_CORE_BASE_ADDRESS | |
| 2638 | PCIE_INTR_ENABLE_ADDRESS)); |
| 2639 | mdelay(10); |
| 2640 | } |
| 2641 | |
| 2642 | if (wait_limit < 0) { |
Michal Kazior | 5b2589f | 2013-11-08 08:01:30 +0100 | [diff] [blame] | 2643 | ath10k_err("target stalled\n"); |
| 2644 | ret = -EIO; |
| 2645 | goto out; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2646 | } |
| 2647 | |
Michal Kazior | 5b2589f | 2013-11-08 08:01:30 +0100 | [diff] [blame] | 2648 | out: |
Michal Kazior | 98563d5 | 2013-11-08 08:01:33 +0100 | [diff] [blame] | 2649 | ath10k_pci_sleep(ar); |
Michal Kazior | 5b2589f | 2013-11-08 08:01:30 +0100 | [diff] [blame] | 2650 | return ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2651 | } |
| 2652 | |
Michal Kazior | fc36e3f | 2014-02-10 17:14:22 +0100 | [diff] [blame] | 2653 | static int ath10k_pci_cold_reset(struct ath10k *ar) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2654 | { |
Michal Kazior | 5b2589f | 2013-11-08 08:01:30 +0100 | [diff] [blame] | 2655 | int i, ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2656 | u32 val; |
| 2657 | |
Michal Kazior | 5b2589f | 2013-11-08 08:01:30 +0100 | [diff] [blame] | 2658 | ret = ath10k_do_pci_wake(ar); |
| 2659 | if (ret) { |
| 2660 | ath10k_err("failed to wake up target: %d\n", |
| 2661 | ret); |
| 2662 | return ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2663 | } |
| 2664 | |
| 2665 | /* Put Target, including PCIe, into RESET. */ |
Kalle Valo | e479ed4 | 2013-09-01 10:01:53 +0300 | [diff] [blame] | 2666 | val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2667 | val |= 1; |
Kalle Valo | e479ed4 | 2013-09-01 10:01:53 +0300 | [diff] [blame] | 2668 | ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2669 | |
| 2670 | for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { |
Kalle Valo | e479ed4 | 2013-09-01 10:01:53 +0300 | [diff] [blame] | 2671 | if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) & |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2672 | RTC_STATE_COLD_RESET_MASK) |
| 2673 | break; |
| 2674 | msleep(1); |
| 2675 | } |
| 2676 | |
| 2677 | /* Pull Target, including PCIe, out of RESET. */ |
| 2678 | val &= ~1; |
Kalle Valo | e479ed4 | 2013-09-01 10:01:53 +0300 | [diff] [blame] | 2679 | ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2680 | |
| 2681 | for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { |
Kalle Valo | e479ed4 | 2013-09-01 10:01:53 +0300 | [diff] [blame] | 2682 | if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) & |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2683 | RTC_STATE_COLD_RESET_MASK)) |
| 2684 | break; |
| 2685 | msleep(1); |
| 2686 | } |
| 2687 | |
Michal Kazior | 5b2589f | 2013-11-08 08:01:30 +0100 | [diff] [blame] | 2688 | ath10k_do_pci_sleep(ar); |
| 2689 | return 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2690 | } |
| 2691 | |
| 2692 | static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) |
| 2693 | { |
| 2694 | int i; |
| 2695 | |
| 2696 | for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) { |
| 2697 | if (!test_bit(i, ar_pci->features)) |
| 2698 | continue; |
| 2699 | |
| 2700 | switch (i) { |
| 2701 | case ATH10K_PCI_FEATURE_MSI_X: |
Kalle Valo | 24cfade | 2013-09-08 17:55:50 +0300 | [diff] [blame] | 2702 | ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n"); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2703 | break; |
Bartosz Markowski | 8cc8df9 | 2013-08-02 09:58:49 +0200 | [diff] [blame] | 2704 | case ATH10K_PCI_FEATURE_SOC_POWER_SAVE: |
Kalle Valo | 24cfade | 2013-09-08 17:55:50 +0300 | [diff] [blame] | 2705 | ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n"); |
Bartosz Markowski | 8cc8df9 | 2013-08-02 09:58:49 +0200 | [diff] [blame] | 2706 | break; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2707 | } |
| 2708 | } |
| 2709 | } |
| 2710 | |
| 2711 | static int ath10k_pci_probe(struct pci_dev *pdev, |
| 2712 | const struct pci_device_id *pci_dev) |
| 2713 | { |
| 2714 | void __iomem *mem; |
| 2715 | int ret = 0; |
| 2716 | struct ath10k *ar; |
| 2717 | struct ath10k_pci *ar_pci; |
Kalle Valo | e01ae68 | 2013-09-01 11:22:14 +0300 | [diff] [blame] | 2718 | u32 lcr_val, chip_id; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2719 | |
| 2720 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); |
| 2721 | |
| 2722 | ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL); |
| 2723 | if (ar_pci == NULL) |
| 2724 | return -ENOMEM; |
| 2725 | |
| 2726 | ar_pci->pdev = pdev; |
| 2727 | ar_pci->dev = &pdev->dev; |
| 2728 | |
| 2729 | switch (pci_dev->device) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2730 | case QCA988X_2_0_DEVICE_ID: |
| 2731 | set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features); |
| 2732 | break; |
| 2733 | default: |
| 2734 | ret = -ENODEV; |
Masanari Iida | 6d3be30 | 2013-09-30 23:19:09 +0900 | [diff] [blame] | 2735 | ath10k_err("Unknown device ID: %d\n", pci_dev->device); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2736 | goto err_ar_pci; |
| 2737 | } |
| 2738 | |
Bartosz Markowski | 8cc8df9 | 2013-08-02 09:58:49 +0200 | [diff] [blame] | 2739 | if (ath10k_target_ps) |
| 2740 | set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features); |
| 2741 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2742 | ath10k_pci_dump_features(ar_pci); |
| 2743 | |
Michal Kazior | 3a0861f | 2013-07-05 16:15:06 +0300 | [diff] [blame] | 2744 | ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2745 | if (!ar) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 2746 | ath10k_err("failed to create driver core\n"); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2747 | ret = -EINVAL; |
| 2748 | goto err_ar_pci; |
| 2749 | } |
| 2750 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2751 | ar_pci->ar = ar; |
| 2752 | ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS; |
| 2753 | atomic_set(&ar_pci->keep_awake_count, 0); |
| 2754 | |
| 2755 | pci_set_drvdata(pdev, ar); |
| 2756 | |
| 2757 | /* |
| 2758 | * Without any knowledge of the Host, the Target may have been reset or |
| 2759 | * power cycled and its Config Space may no longer reflect the PCI |
| 2760 | * address space that was assigned earlier by the PCI infrastructure. |
| 2761 | * Refresh it now. |
| 2762 | */ |
| 2763 | ret = pci_assign_resource(pdev, BAR_NUM); |
| 2764 | if (ret) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 2765 | ath10k_err("failed to assign PCI space: %d\n", ret); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2766 | goto err_ar; |
| 2767 | } |
| 2768 | |
| 2769 | ret = pci_enable_device(pdev); |
| 2770 | if (ret) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 2771 | ath10k_err("failed to enable PCI device: %d\n", ret); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2772 | goto err_ar; |
| 2773 | } |
| 2774 | |
| 2775 | /* Request MMIO resources */ |
| 2776 | ret = pci_request_region(pdev, BAR_NUM, "ath"); |
| 2777 | if (ret) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 2778 | ath10k_err("failed to request MMIO region: %d\n", ret); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2779 | goto err_device; |
| 2780 | } |
| 2781 | |
| 2782 | /* |
| 2783 | * Target structures have a limit of 32 bit DMA pointers. |
| 2784 | * DMA pointers can be wider than 32 bits by default on some systems. |
| 2785 | */ |
| 2786 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
| 2787 | if (ret) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 2788 | ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2789 | goto err_region; |
| 2790 | } |
| 2791 | |
| 2792 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
| 2793 | if (ret) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 2794 | ath10k_err("failed to set consistent DMA mask to 32-bit\n"); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2795 | goto err_region; |
| 2796 | } |
| 2797 | |
| 2798 | /* Set bus master bit in PCI_COMMAND to enable DMA */ |
| 2799 | pci_set_master(pdev); |
| 2800 | |
| 2801 | /* |
| 2802 | * Temporary FIX: disable ASPM |
| 2803 | * Will be removed after the OTP is programmed |
| 2804 | */ |
| 2805 | pci_read_config_dword(pdev, 0x80, &lcr_val); |
| 2806 | pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00)); |
| 2807 | |
| 2808 | /* Arrange for access to Target SoC registers. */ |
| 2809 | mem = pci_iomap(pdev, BAR_NUM, 0); |
| 2810 | if (!mem) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 2811 | ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2812 | ret = -EIO; |
| 2813 | goto err_master; |
| 2814 | } |
| 2815 | |
| 2816 | ar_pci->mem = mem; |
| 2817 | |
| 2818 | spin_lock_init(&ar_pci->ce_lock); |
| 2819 | |
Kalle Valo | e01ae68 | 2013-09-01 11:22:14 +0300 | [diff] [blame] | 2820 | ret = ath10k_do_pci_wake(ar); |
| 2821 | if (ret) { |
| 2822 | ath10k_err("Failed to get chip id: %d\n", ret); |
Wei Yongjun | 12eb087 | 2013-10-30 13:24:39 +0800 | [diff] [blame] | 2823 | goto err_iomap; |
Kalle Valo | e01ae68 | 2013-09-01 11:22:14 +0300 | [diff] [blame] | 2824 | } |
| 2825 | |
Kalle Valo | 233eb97 | 2013-10-16 16:46:11 +0300 | [diff] [blame] | 2826 | chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); |
Kalle Valo | e01ae68 | 2013-09-01 11:22:14 +0300 | [diff] [blame] | 2827 | |
| 2828 | ath10k_do_pci_sleep(ar); |
| 2829 | |
Kalle Valo | 24cfade | 2013-09-08 17:55:50 +0300 | [diff] [blame] | 2830 | ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); |
| 2831 | |
Kalle Valo | e01ae68 | 2013-09-01 11:22:14 +0300 | [diff] [blame] | 2832 | ret = ath10k_core_register(ar, chip_id); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2833 | if (ret) { |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 2834 | ath10k_err("failed to register driver core: %d\n", ret); |
Michal Kazior | 32270b6 | 2013-08-02 09:15:47 +0200 | [diff] [blame] | 2835 | goto err_iomap; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2836 | } |
| 2837 | |
| 2838 | return 0; |
| 2839 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2840 | err_iomap: |
| 2841 | pci_iounmap(pdev, mem); |
| 2842 | err_master: |
| 2843 | pci_clear_master(pdev); |
| 2844 | err_region: |
| 2845 | pci_release_region(pdev, BAR_NUM); |
| 2846 | err_device: |
| 2847 | pci_disable_device(pdev); |
| 2848 | err_ar: |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2849 | ath10k_core_destroy(ar); |
| 2850 | err_ar_pci: |
| 2851 | /* call HIF PCI free here */ |
| 2852 | kfree(ar_pci); |
| 2853 | |
| 2854 | return ret; |
| 2855 | } |
| 2856 | |
| 2857 | static void ath10k_pci_remove(struct pci_dev *pdev) |
| 2858 | { |
| 2859 | struct ath10k *ar = pci_get_drvdata(pdev); |
| 2860 | struct ath10k_pci *ar_pci; |
| 2861 | |
| 2862 | ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); |
| 2863 | |
| 2864 | if (!ar) |
| 2865 | return; |
| 2866 | |
| 2867 | ar_pci = ath10k_pci_priv(ar); |
| 2868 | |
| 2869 | if (!ar_pci) |
| 2870 | return; |
| 2871 | |
| 2872 | tasklet_kill(&ar_pci->msi_fw_err); |
| 2873 | |
| 2874 | ath10k_core_unregister(ar); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2875 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2876 | pci_iounmap(pdev, ar_pci->mem); |
| 2877 | pci_release_region(pdev, BAR_NUM); |
| 2878 | pci_clear_master(pdev); |
| 2879 | pci_disable_device(pdev); |
| 2880 | |
| 2881 | ath10k_core_destroy(ar); |
| 2882 | kfree(ar_pci); |
| 2883 | } |
| 2884 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2885 | MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); |
| 2886 | |
| 2887 | static struct pci_driver ath10k_pci_driver = { |
| 2888 | .name = "ath10k_pci", |
| 2889 | .id_table = ath10k_pci_id_table, |
| 2890 | .probe = ath10k_pci_probe, |
| 2891 | .remove = ath10k_pci_remove, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2892 | }; |
| 2893 | |
| 2894 | static int __init ath10k_pci_init(void) |
| 2895 | { |
| 2896 | int ret; |
| 2897 | |
| 2898 | ret = pci_register_driver(&ath10k_pci_driver); |
| 2899 | if (ret) |
Michal Kazior | 1d2b48d | 2013-11-08 08:01:34 +0100 | [diff] [blame] | 2900 | ath10k_err("failed to register PCI driver: %d\n", ret); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2901 | |
| 2902 | return ret; |
| 2903 | } |
| 2904 | module_init(ath10k_pci_init); |
| 2905 | |
| 2906 | static void __exit ath10k_pci_exit(void) |
| 2907 | { |
| 2908 | pci_unregister_driver(&ath10k_pci_driver); |
| 2909 | } |
| 2910 | |
| 2911 | module_exit(ath10k_pci_exit); |
| 2912 | |
| 2913 | MODULE_AUTHOR("Qualcomm Atheros"); |
| 2914 | MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); |
| 2915 | MODULE_LICENSE("Dual BSD/GPL"); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 2916 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); |
| 2917 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE); |
| 2918 | MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); |