Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2005-2011 Atheros Communications Inc. |
| 3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. |
| 4 | * |
| 5 | * Permission to use, copy, modify, and/or distribute this software for any |
| 6 | * purpose with or without fee is hereby granted, provided that the above |
| 7 | * copyright notice and this permission notice appear in all copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
| 15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 16 | */ |
| 17 | |
| 18 | #include "hif.h" |
| 19 | #include "pci.h" |
| 20 | #include "ce.h" |
| 21 | #include "debug.h" |
| 22 | |
| 23 | /* |
| 24 | * Support for Copy Engine hardware, which is mainly used for |
| 25 | * communication between Host and Target over a PCIe interconnect. |
| 26 | */ |
| 27 | |
| 28 | /* |
| 29 | * A single CopyEngine (CE) comprises two "rings": |
| 30 | * a source ring |
| 31 | * a destination ring |
| 32 | * |
| 33 | * Each ring consists of a number of descriptors which specify |
| 34 | * an address, length, and meta-data. |
| 35 | * |
| 36 | * Typically, one side of the PCIe interconnect (Host or Target) |
| 37 | * controls one ring and the other side controls the other ring. |
| 38 | * The source side chooses when to initiate a transfer and it |
| 39 | * chooses what to send (buffer address, length). The destination |
| 40 | * side keeps a supply of "anonymous receive buffers" available and |
| 41 | * it handles incoming data as it arrives (when the destination |
| 42 | * recieves an interrupt). |
| 43 | * |
| 44 | * The sender may send a simple buffer (address/length) or it may |
| 45 | * send a small list of buffers. When a small list is sent, hardware |
| 46 | * "gathers" these and they end up in a single destination buffer |
| 47 | * with a single interrupt. |
| 48 | * |
| 49 | * There are several "contexts" managed by this layer -- more, it |
| 50 | * may seem -- than should be needed. These are provided mainly for |
| 51 | * maximum flexibility and especially to facilitate a simpler HIF |
| 52 | * implementation. There are per-CopyEngine recv, send, and watermark |
| 53 | * contexts. These are supplied by the caller when a recv, send, |
| 54 | * or watermark handler is established and they are echoed back to |
| 55 | * the caller when the respective callbacks are invoked. There is |
| 56 | * also a per-transfer context supplied by the caller when a buffer |
| 57 | * (or sendlist) is sent and when a buffer is enqueued for recv. |
| 58 | * These per-transfer contexts are echoed back to the caller when |
| 59 | * the buffer is sent/received. |
| 60 | */ |
| 61 | |
| 62 | static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar, |
| 63 | u32 ce_ctrl_addr, |
| 64 | unsigned int n) |
| 65 | { |
| 66 | ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n); |
| 67 | } |
| 68 | |
| 69 | static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar, |
| 70 | u32 ce_ctrl_addr) |
| 71 | { |
| 72 | return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS); |
| 73 | } |
| 74 | |
| 75 | static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar, |
| 76 | u32 ce_ctrl_addr, |
| 77 | unsigned int n) |
| 78 | { |
Bartosz Markowski | 57a8930 | 2013-08-07 15:17:45 +0200 | [diff] [blame] | 79 | ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 80 | } |
| 81 | |
| 82 | static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, |
| 83 | u32 ce_ctrl_addr) |
| 84 | { |
| 85 | return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS); |
| 86 | } |
| 87 | |
| 88 | static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, |
| 89 | u32 ce_ctrl_addr) |
| 90 | { |
| 91 | return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS); |
| 92 | } |
| 93 | |
| 94 | static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, |
| 95 | u32 ce_ctrl_addr, |
| 96 | unsigned int addr) |
| 97 | { |
| 98 | ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr); |
| 99 | } |
| 100 | |
| 101 | static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, |
| 102 | u32 ce_ctrl_addr, |
| 103 | unsigned int n) |
| 104 | { |
| 105 | ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n); |
| 106 | } |
| 107 | |
| 108 | static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, |
| 109 | u32 ce_ctrl_addr, |
| 110 | unsigned int n) |
| 111 | { |
| 112 | u32 ctrl1_addr = ath10k_pci_read32((ar), |
| 113 | (ce_ctrl_addr) + CE_CTRL1_ADDRESS); |
| 114 | |
| 115 | ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, |
| 116 | (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) | |
| 117 | CE_CTRL1_DMAX_LENGTH_SET(n)); |
| 118 | } |
| 119 | |
| 120 | static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, |
| 121 | u32 ce_ctrl_addr, |
| 122 | unsigned int n) |
| 123 | { |
| 124 | u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); |
| 125 | |
| 126 | ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, |
| 127 | (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) | |
| 128 | CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n)); |
| 129 | } |
| 130 | |
| 131 | static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, |
| 132 | u32 ce_ctrl_addr, |
| 133 | unsigned int n) |
| 134 | { |
| 135 | u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); |
| 136 | |
| 137 | ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, |
| 138 | (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) | |
| 139 | CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n)); |
| 140 | } |
| 141 | |
| 142 | static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, |
| 143 | u32 ce_ctrl_addr) |
| 144 | { |
| 145 | return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS); |
| 146 | } |
| 147 | |
| 148 | static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, |
| 149 | u32 ce_ctrl_addr, |
| 150 | u32 addr) |
| 151 | { |
| 152 | ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr); |
| 153 | } |
| 154 | |
| 155 | static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, |
| 156 | u32 ce_ctrl_addr, |
| 157 | unsigned int n) |
| 158 | { |
| 159 | ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n); |
| 160 | } |
| 161 | |
| 162 | static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, |
| 163 | u32 ce_ctrl_addr, |
| 164 | unsigned int n) |
| 165 | { |
| 166 | u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); |
| 167 | |
| 168 | ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, |
| 169 | (addr & ~SRC_WATERMARK_HIGH_MASK) | |
| 170 | SRC_WATERMARK_HIGH_SET(n)); |
| 171 | } |
| 172 | |
| 173 | static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, |
| 174 | u32 ce_ctrl_addr, |
| 175 | unsigned int n) |
| 176 | { |
| 177 | u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); |
| 178 | |
| 179 | ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, |
| 180 | (addr & ~SRC_WATERMARK_LOW_MASK) | |
| 181 | SRC_WATERMARK_LOW_SET(n)); |
| 182 | } |
| 183 | |
| 184 | static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, |
| 185 | u32 ce_ctrl_addr, |
| 186 | unsigned int n) |
| 187 | { |
| 188 | u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); |
| 189 | |
| 190 | ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, |
| 191 | (addr & ~DST_WATERMARK_HIGH_MASK) | |
| 192 | DST_WATERMARK_HIGH_SET(n)); |
| 193 | } |
| 194 | |
| 195 | static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, |
| 196 | u32 ce_ctrl_addr, |
| 197 | unsigned int n) |
| 198 | { |
| 199 | u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); |
| 200 | |
| 201 | ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, |
| 202 | (addr & ~DST_WATERMARK_LOW_MASK) | |
| 203 | DST_WATERMARK_LOW_SET(n)); |
| 204 | } |
| 205 | |
| 206 | static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, |
| 207 | u32 ce_ctrl_addr) |
| 208 | { |
| 209 | u32 host_ie_addr = ath10k_pci_read32(ar, |
| 210 | ce_ctrl_addr + HOST_IE_ADDRESS); |
| 211 | |
| 212 | ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, |
| 213 | host_ie_addr | HOST_IE_COPY_COMPLETE_MASK); |
| 214 | } |
| 215 | |
| 216 | static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, |
| 217 | u32 ce_ctrl_addr) |
| 218 | { |
| 219 | u32 host_ie_addr = ath10k_pci_read32(ar, |
| 220 | ce_ctrl_addr + HOST_IE_ADDRESS); |
| 221 | |
| 222 | ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, |
| 223 | host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK); |
| 224 | } |
| 225 | |
| 226 | static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, |
| 227 | u32 ce_ctrl_addr) |
| 228 | { |
| 229 | u32 host_ie_addr = ath10k_pci_read32(ar, |
| 230 | ce_ctrl_addr + HOST_IE_ADDRESS); |
| 231 | |
| 232 | ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, |
| 233 | host_ie_addr & ~CE_WATERMARK_MASK); |
| 234 | } |
| 235 | |
| 236 | static inline void ath10k_ce_error_intr_enable(struct ath10k *ar, |
| 237 | u32 ce_ctrl_addr) |
| 238 | { |
| 239 | u32 misc_ie_addr = ath10k_pci_read32(ar, |
| 240 | ce_ctrl_addr + MISC_IE_ADDRESS); |
| 241 | |
| 242 | ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, |
| 243 | misc_ie_addr | CE_ERROR_MASK); |
| 244 | } |
| 245 | |
Michal Kazior | 93e0daa | 2013-11-08 08:01:27 +0100 | [diff] [blame] | 246 | static inline void ath10k_ce_error_intr_disable(struct ath10k *ar, |
| 247 | u32 ce_ctrl_addr) |
| 248 | { |
| 249 | u32 misc_ie_addr = ath10k_pci_read32(ar, |
| 250 | ce_ctrl_addr + MISC_IE_ADDRESS); |
| 251 | |
| 252 | ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, |
| 253 | misc_ie_addr & ~CE_ERROR_MASK); |
| 254 | } |
| 255 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 256 | static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, |
| 257 | u32 ce_ctrl_addr, |
| 258 | unsigned int mask) |
| 259 | { |
| 260 | ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask); |
| 261 | } |
| 262 | |
| 263 | |
| 264 | /* |
| 265 | * Guts of ath10k_ce_send, used by both ath10k_ce_send and |
| 266 | * ath10k_ce_sendlist_send. |
| 267 | * The caller takes responsibility for any needed locking. |
| 268 | */ |
Michal Kazior | 726346f | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 269 | int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, |
| 270 | void *per_transfer_context, |
| 271 | u32 buffer, |
| 272 | unsigned int nbytes, |
| 273 | unsigned int transfer_id, |
| 274 | unsigned int flags) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 275 | { |
| 276 | struct ath10k *ar = ce_state->ar; |
Michal Kazior | d21fb95 | 2013-08-27 13:08:03 +0200 | [diff] [blame] | 277 | struct ath10k_ce_ring *src_ring = ce_state->src_ring; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 278 | struct ce_desc *desc, *sdesc; |
| 279 | unsigned int nentries_mask = src_ring->nentries_mask; |
| 280 | unsigned int sw_index = src_ring->sw_index; |
| 281 | unsigned int write_index = src_ring->write_index; |
| 282 | u32 ctrl_addr = ce_state->ctrl_addr; |
| 283 | u32 desc_flags = 0; |
| 284 | int ret = 0; |
| 285 | |
| 286 | if (nbytes > ce_state->src_sz_max) |
| 287 | ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n", |
| 288 | __func__, nbytes, ce_state->src_sz_max); |
| 289 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 290 | if (unlikely(CE_RING_DELTA(nentries_mask, |
| 291 | write_index, sw_index - 1) <= 0)) { |
Michal Kazior | 3efcb3b | 2013-10-02 11:03:41 +0200 | [diff] [blame] | 292 | ret = -ENOSR; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 293 | goto exit; |
| 294 | } |
| 295 | |
| 296 | desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space, |
| 297 | write_index); |
| 298 | sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index); |
| 299 | |
| 300 | desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA); |
| 301 | |
| 302 | if (flags & CE_SEND_FLAG_GATHER) |
| 303 | desc_flags |= CE_DESC_FLAGS_GATHER; |
| 304 | if (flags & CE_SEND_FLAG_BYTE_SWAP) |
| 305 | desc_flags |= CE_DESC_FLAGS_BYTE_SWAP; |
| 306 | |
| 307 | sdesc->addr = __cpu_to_le32(buffer); |
| 308 | sdesc->nbytes = __cpu_to_le16(nbytes); |
| 309 | sdesc->flags = __cpu_to_le16(desc_flags); |
| 310 | |
| 311 | *desc = *sdesc; |
| 312 | |
| 313 | src_ring->per_transfer_context[write_index] = per_transfer_context; |
| 314 | |
| 315 | /* Update Source Ring Write Index */ |
| 316 | write_index = CE_RING_IDX_INCR(nentries_mask, write_index); |
| 317 | |
| 318 | /* WORKAROUND */ |
| 319 | if (!(flags & CE_SEND_FLAG_GATHER)) |
| 320 | ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); |
| 321 | |
| 322 | src_ring->write_index = write_index; |
| 323 | exit: |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 324 | return ret; |
| 325 | } |
| 326 | |
Michal Kazior | 08b8aa0 | 2014-05-26 12:02:59 +0200 | [diff] [blame] | 327 | void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) |
| 328 | { |
| 329 | struct ath10k *ar = pipe->ar; |
| 330 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 331 | struct ath10k_ce_ring *src_ring = pipe->src_ring; |
| 332 | u32 ctrl_addr = pipe->ctrl_addr; |
| 333 | |
| 334 | lockdep_assert_held(&ar_pci->ce_lock); |
| 335 | |
| 336 | /* |
| 337 | * This function must be called only if there is an incomplete |
| 338 | * scatter-gather transfer (before index register is updated) |
| 339 | * that needs to be cleaned up. |
| 340 | */ |
| 341 | if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index)) |
| 342 | return; |
| 343 | |
| 344 | if (WARN_ON_ONCE(src_ring->write_index == |
| 345 | ath10k_ce_src_ring_write_index_get(ar, ctrl_addr))) |
| 346 | return; |
| 347 | |
| 348 | src_ring->write_index--; |
| 349 | src_ring->write_index &= src_ring->nentries_mask; |
| 350 | |
| 351 | src_ring->per_transfer_context[src_ring->write_index] = NULL; |
| 352 | } |
| 353 | |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 354 | int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 355 | void *per_transfer_context, |
| 356 | u32 buffer, |
| 357 | unsigned int nbytes, |
| 358 | unsigned int transfer_id, |
| 359 | unsigned int flags) |
| 360 | { |
| 361 | struct ath10k *ar = ce_state->ar; |
| 362 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 363 | int ret; |
| 364 | |
| 365 | spin_lock_bh(&ar_pci->ce_lock); |
| 366 | ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, |
| 367 | buffer, nbytes, transfer_id, flags); |
| 368 | spin_unlock_bh(&ar_pci->ce_lock); |
| 369 | |
| 370 | return ret; |
| 371 | } |
| 372 | |
Michal Kazior | 3efcb3b | 2013-10-02 11:03:41 +0200 | [diff] [blame] | 373 | int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) |
| 374 | { |
| 375 | struct ath10k *ar = pipe->ar; |
| 376 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 377 | int delta; |
| 378 | |
| 379 | spin_lock_bh(&ar_pci->ce_lock); |
| 380 | delta = CE_RING_DELTA(pipe->src_ring->nentries_mask, |
| 381 | pipe->src_ring->write_index, |
| 382 | pipe->src_ring->sw_index - 1); |
| 383 | spin_unlock_bh(&ar_pci->ce_lock); |
| 384 | |
| 385 | return delta; |
| 386 | } |
| 387 | |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 388 | int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 389 | void *per_recv_context, |
| 390 | u32 buffer) |
| 391 | { |
Michal Kazior | d21fb95 | 2013-08-27 13:08:03 +0200 | [diff] [blame] | 392 | struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 393 | u32 ctrl_addr = ce_state->ctrl_addr; |
| 394 | struct ath10k *ar = ce_state->ar; |
| 395 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 396 | unsigned int nentries_mask = dest_ring->nentries_mask; |
| 397 | unsigned int write_index; |
| 398 | unsigned int sw_index; |
| 399 | int ret; |
| 400 | |
| 401 | spin_lock_bh(&ar_pci->ce_lock); |
| 402 | write_index = dest_ring->write_index; |
| 403 | sw_index = dest_ring->sw_index; |
| 404 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 405 | if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) { |
| 406 | struct ce_desc *base = dest_ring->base_addr_owner_space; |
| 407 | struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); |
| 408 | |
| 409 | /* Update destination descriptor */ |
| 410 | desc->addr = __cpu_to_le32(buffer); |
| 411 | desc->nbytes = 0; |
| 412 | |
| 413 | dest_ring->per_transfer_context[write_index] = |
| 414 | per_recv_context; |
| 415 | |
| 416 | /* Update Destination Ring Write Index */ |
| 417 | write_index = CE_RING_IDX_INCR(nentries_mask, write_index); |
| 418 | ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); |
| 419 | dest_ring->write_index = write_index; |
| 420 | ret = 0; |
| 421 | } else { |
| 422 | ret = -EIO; |
| 423 | } |
Kalle Valo | 3aebe54 | 2013-09-01 10:02:07 +0300 | [diff] [blame] | 424 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 425 | spin_unlock_bh(&ar_pci->ce_lock); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 426 | return ret; |
| 427 | } |
| 428 | |
| 429 | /* |
| 430 | * Guts of ath10k_ce_completed_recv_next. |
| 431 | * The caller takes responsibility for any necessary locking. |
| 432 | */ |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 433 | static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 434 | void **per_transfer_contextp, |
| 435 | u32 *bufferp, |
| 436 | unsigned int *nbytesp, |
| 437 | unsigned int *transfer_idp, |
| 438 | unsigned int *flagsp) |
| 439 | { |
Michal Kazior | d21fb95 | 2013-08-27 13:08:03 +0200 | [diff] [blame] | 440 | struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 441 | unsigned int nentries_mask = dest_ring->nentries_mask; |
| 442 | unsigned int sw_index = dest_ring->sw_index; |
| 443 | |
| 444 | struct ce_desc *base = dest_ring->base_addr_owner_space; |
| 445 | struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index); |
| 446 | struct ce_desc sdesc; |
| 447 | u16 nbytes; |
| 448 | |
| 449 | /* Copy in one go for performance reasons */ |
| 450 | sdesc = *desc; |
| 451 | |
| 452 | nbytes = __le16_to_cpu(sdesc.nbytes); |
| 453 | if (nbytes == 0) { |
| 454 | /* |
| 455 | * This closes a relatively unusual race where the Host |
| 456 | * sees the updated DRRI before the update to the |
| 457 | * corresponding descriptor has completed. We treat this |
| 458 | * as a descriptor that is not yet done. |
| 459 | */ |
| 460 | return -EIO; |
| 461 | } |
| 462 | |
| 463 | desc->nbytes = 0; |
| 464 | |
| 465 | /* Return data from completed destination descriptor */ |
| 466 | *bufferp = __le32_to_cpu(sdesc.addr); |
| 467 | *nbytesp = nbytes; |
| 468 | *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA); |
| 469 | |
| 470 | if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP) |
| 471 | *flagsp = CE_RECV_FLAG_SWAPPED; |
| 472 | else |
| 473 | *flagsp = 0; |
| 474 | |
| 475 | if (per_transfer_contextp) |
| 476 | *per_transfer_contextp = |
| 477 | dest_ring->per_transfer_context[sw_index]; |
| 478 | |
| 479 | /* sanity */ |
| 480 | dest_ring->per_transfer_context[sw_index] = NULL; |
| 481 | |
| 482 | /* Update sw_index */ |
| 483 | sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); |
| 484 | dest_ring->sw_index = sw_index; |
| 485 | |
| 486 | return 0; |
| 487 | } |
| 488 | |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 489 | int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 490 | void **per_transfer_contextp, |
| 491 | u32 *bufferp, |
| 492 | unsigned int *nbytesp, |
| 493 | unsigned int *transfer_idp, |
| 494 | unsigned int *flagsp) |
| 495 | { |
| 496 | struct ath10k *ar = ce_state->ar; |
| 497 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 498 | int ret; |
| 499 | |
| 500 | spin_lock_bh(&ar_pci->ce_lock); |
| 501 | ret = ath10k_ce_completed_recv_next_nolock(ce_state, |
| 502 | per_transfer_contextp, |
| 503 | bufferp, nbytesp, |
| 504 | transfer_idp, flagsp); |
| 505 | spin_unlock_bh(&ar_pci->ce_lock); |
| 506 | |
| 507 | return ret; |
| 508 | } |
| 509 | |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 510 | int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 511 | void **per_transfer_contextp, |
| 512 | u32 *bufferp) |
| 513 | { |
Michal Kazior | d21fb95 | 2013-08-27 13:08:03 +0200 | [diff] [blame] | 514 | struct ath10k_ce_ring *dest_ring; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 515 | unsigned int nentries_mask; |
| 516 | unsigned int sw_index; |
| 517 | unsigned int write_index; |
| 518 | int ret; |
| 519 | struct ath10k *ar; |
| 520 | struct ath10k_pci *ar_pci; |
| 521 | |
| 522 | dest_ring = ce_state->dest_ring; |
| 523 | |
| 524 | if (!dest_ring) |
| 525 | return -EIO; |
| 526 | |
| 527 | ar = ce_state->ar; |
| 528 | ar_pci = ath10k_pci_priv(ar); |
| 529 | |
| 530 | spin_lock_bh(&ar_pci->ce_lock); |
| 531 | |
| 532 | nentries_mask = dest_ring->nentries_mask; |
| 533 | sw_index = dest_ring->sw_index; |
| 534 | write_index = dest_ring->write_index; |
| 535 | if (write_index != sw_index) { |
| 536 | struct ce_desc *base = dest_ring->base_addr_owner_space; |
| 537 | struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index); |
| 538 | |
| 539 | /* Return data from completed destination descriptor */ |
| 540 | *bufferp = __le32_to_cpu(desc->addr); |
| 541 | |
| 542 | if (per_transfer_contextp) |
| 543 | *per_transfer_contextp = |
| 544 | dest_ring->per_transfer_context[sw_index]; |
| 545 | |
| 546 | /* sanity */ |
| 547 | dest_ring->per_transfer_context[sw_index] = NULL; |
| 548 | |
| 549 | /* Update sw_index */ |
| 550 | sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); |
| 551 | dest_ring->sw_index = sw_index; |
| 552 | ret = 0; |
| 553 | } else { |
| 554 | ret = -EIO; |
| 555 | } |
| 556 | |
| 557 | spin_unlock_bh(&ar_pci->ce_lock); |
| 558 | |
| 559 | return ret; |
| 560 | } |
| 561 | |
| 562 | /* |
| 563 | * Guts of ath10k_ce_completed_send_next. |
| 564 | * The caller takes responsibility for any necessary locking. |
| 565 | */ |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 566 | static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 567 | void **per_transfer_contextp, |
| 568 | u32 *bufferp, |
| 569 | unsigned int *nbytesp, |
| 570 | unsigned int *transfer_idp) |
| 571 | { |
Michal Kazior | d21fb95 | 2013-08-27 13:08:03 +0200 | [diff] [blame] | 572 | struct ath10k_ce_ring *src_ring = ce_state->src_ring; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 573 | u32 ctrl_addr = ce_state->ctrl_addr; |
| 574 | struct ath10k *ar = ce_state->ar; |
| 575 | unsigned int nentries_mask = src_ring->nentries_mask; |
| 576 | unsigned int sw_index = src_ring->sw_index; |
Kalle Valo | a40d3e4 | 2013-09-01 10:02:00 +0300 | [diff] [blame] | 577 | struct ce_desc *sdesc, *sbase; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 578 | unsigned int read_index; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 579 | |
| 580 | if (src_ring->hw_index == sw_index) { |
| 581 | /* |
| 582 | * The SW completion index has caught up with the cached |
| 583 | * version of the HW completion index. |
| 584 | * Update the cached HW completion index to see whether |
| 585 | * the SW has really caught up to the HW, or if the cached |
| 586 | * value of the HW index has become stale. |
| 587 | */ |
Kalle Valo | 3aebe54 | 2013-09-01 10:02:07 +0300 | [diff] [blame] | 588 | |
Michal Kazior | 9936194 | 2014-07-14 16:25:25 +0300 | [diff] [blame] | 589 | read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); |
| 590 | if (read_index == 0xffffffff) |
| 591 | return -ENODEV; |
| 592 | |
| 593 | read_index &= nentries_mask; |
| 594 | src_ring->hw_index = read_index; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 595 | } |
Kalle Valo | a40d3e4 | 2013-09-01 10:02:00 +0300 | [diff] [blame] | 596 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 597 | read_index = src_ring->hw_index; |
| 598 | |
Michal Kazior | 9936194 | 2014-07-14 16:25:25 +0300 | [diff] [blame] | 599 | if (read_index == sw_index) |
Kalle Valo | a40d3e4 | 2013-09-01 10:02:00 +0300 | [diff] [blame] | 600 | return -EIO; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 601 | |
Kalle Valo | a40d3e4 | 2013-09-01 10:02:00 +0300 | [diff] [blame] | 602 | sbase = src_ring->shadow_base; |
| 603 | sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 604 | |
Kalle Valo | a40d3e4 | 2013-09-01 10:02:00 +0300 | [diff] [blame] | 605 | /* Return data from completed source descriptor */ |
| 606 | *bufferp = __le32_to_cpu(sdesc->addr); |
| 607 | *nbytesp = __le16_to_cpu(sdesc->nbytes); |
| 608 | *transfer_idp = MS(__le16_to_cpu(sdesc->flags), |
| 609 | CE_DESC_FLAGS_META_DATA); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 610 | |
Kalle Valo | a40d3e4 | 2013-09-01 10:02:00 +0300 | [diff] [blame] | 611 | if (per_transfer_contextp) |
| 612 | *per_transfer_contextp = |
| 613 | src_ring->per_transfer_context[sw_index]; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 614 | |
Kalle Valo | a40d3e4 | 2013-09-01 10:02:00 +0300 | [diff] [blame] | 615 | /* sanity */ |
| 616 | src_ring->per_transfer_context[sw_index] = NULL; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 617 | |
Kalle Valo | a40d3e4 | 2013-09-01 10:02:00 +0300 | [diff] [blame] | 618 | /* Update sw_index */ |
| 619 | sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); |
| 620 | src_ring->sw_index = sw_index; |
| 621 | |
| 622 | return 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 623 | } |
| 624 | |
| 625 | /* NB: Modeled after ath10k_ce_completed_send_next */ |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 626 | int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 627 | void **per_transfer_contextp, |
| 628 | u32 *bufferp, |
| 629 | unsigned int *nbytesp, |
| 630 | unsigned int *transfer_idp) |
| 631 | { |
Michal Kazior | d21fb95 | 2013-08-27 13:08:03 +0200 | [diff] [blame] | 632 | struct ath10k_ce_ring *src_ring; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 633 | unsigned int nentries_mask; |
| 634 | unsigned int sw_index; |
| 635 | unsigned int write_index; |
| 636 | int ret; |
| 637 | struct ath10k *ar; |
| 638 | struct ath10k_pci *ar_pci; |
| 639 | |
| 640 | src_ring = ce_state->src_ring; |
| 641 | |
| 642 | if (!src_ring) |
| 643 | return -EIO; |
| 644 | |
| 645 | ar = ce_state->ar; |
| 646 | ar_pci = ath10k_pci_priv(ar); |
| 647 | |
| 648 | spin_lock_bh(&ar_pci->ce_lock); |
| 649 | |
| 650 | nentries_mask = src_ring->nentries_mask; |
| 651 | sw_index = src_ring->sw_index; |
| 652 | write_index = src_ring->write_index; |
| 653 | |
| 654 | if (write_index != sw_index) { |
| 655 | struct ce_desc *base = src_ring->base_addr_owner_space; |
| 656 | struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index); |
| 657 | |
| 658 | /* Return data from completed source descriptor */ |
| 659 | *bufferp = __le32_to_cpu(desc->addr); |
| 660 | *nbytesp = __le16_to_cpu(desc->nbytes); |
| 661 | *transfer_idp = MS(__le16_to_cpu(desc->flags), |
| 662 | CE_DESC_FLAGS_META_DATA); |
| 663 | |
| 664 | if (per_transfer_contextp) |
| 665 | *per_transfer_contextp = |
| 666 | src_ring->per_transfer_context[sw_index]; |
| 667 | |
| 668 | /* sanity */ |
| 669 | src_ring->per_transfer_context[sw_index] = NULL; |
| 670 | |
| 671 | /* Update sw_index */ |
| 672 | sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); |
| 673 | src_ring->sw_index = sw_index; |
| 674 | ret = 0; |
| 675 | } else { |
| 676 | ret = -EIO; |
| 677 | } |
| 678 | |
| 679 | spin_unlock_bh(&ar_pci->ce_lock); |
| 680 | |
| 681 | return ret; |
| 682 | } |
| 683 | |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 684 | int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 685 | void **per_transfer_contextp, |
| 686 | u32 *bufferp, |
| 687 | unsigned int *nbytesp, |
| 688 | unsigned int *transfer_idp) |
| 689 | { |
| 690 | struct ath10k *ar = ce_state->ar; |
| 691 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 692 | int ret; |
| 693 | |
| 694 | spin_lock_bh(&ar_pci->ce_lock); |
| 695 | ret = ath10k_ce_completed_send_next_nolock(ce_state, |
| 696 | per_transfer_contextp, |
| 697 | bufferp, nbytesp, |
| 698 | transfer_idp); |
| 699 | spin_unlock_bh(&ar_pci->ce_lock); |
| 700 | |
| 701 | return ret; |
| 702 | } |
| 703 | |
| 704 | /* |
| 705 | * Guts of interrupt handler for per-engine interrupts on a particular CE. |
| 706 | * |
| 707 | * Invokes registered callbacks for recv_complete, |
| 708 | * send_complete, and watermarks. |
| 709 | */ |
| 710 | void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) |
| 711 | { |
| 712 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | 2aa3911 | 2013-08-27 13:08:02 +0200 | [diff] [blame] | 713 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 714 | u32 ctrl_addr = ce_state->ctrl_addr; |
Kalle Valo | 3aebe54 | 2013-09-01 10:02:07 +0300 | [diff] [blame] | 715 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 716 | spin_lock_bh(&ar_pci->ce_lock); |
| 717 | |
| 718 | /* Clear the copy-complete interrupts that will be handled here. */ |
| 719 | ath10k_ce_engine_int_status_clear(ar, ctrl_addr, |
| 720 | HOST_IS_COPY_COMPLETE_MASK); |
| 721 | |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 722 | spin_unlock_bh(&ar_pci->ce_lock); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 723 | |
Michal Kazior | 5440ce2 | 2013-09-03 15:09:58 +0200 | [diff] [blame] | 724 | if (ce_state->recv_cb) |
| 725 | ce_state->recv_cb(ce_state); |
| 726 | |
| 727 | if (ce_state->send_cb) |
| 728 | ce_state->send_cb(ce_state); |
| 729 | |
| 730 | spin_lock_bh(&ar_pci->ce_lock); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 731 | |
| 732 | /* |
| 733 | * Misc CE interrupts are not being handled, but still need |
| 734 | * to be cleared. |
| 735 | */ |
| 736 | ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK); |
| 737 | |
| 738 | spin_unlock_bh(&ar_pci->ce_lock); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 739 | } |
| 740 | |
| 741 | /* |
| 742 | * Handler for per-engine interrupts on ALL active CEs. |
| 743 | * This is used in cases where the system is sharing a |
| 744 | * single interrput for all CEs |
| 745 | */ |
| 746 | |
| 747 | void ath10k_ce_per_engine_service_any(struct ath10k *ar) |
| 748 | { |
Michal Kazior | c0c378f | 2014-08-07 11:03:28 +0200 | [diff] [blame] | 749 | int ce_id; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 750 | u32 intr_summary; |
| 751 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 752 | intr_summary = CE_INTERRUPT_SUMMARY(ar); |
| 753 | |
Michal Kazior | fad6ed7 | 2013-11-08 08:01:23 +0100 | [diff] [blame] | 754 | for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 755 | if (intr_summary & (1 << ce_id)) |
| 756 | intr_summary &= ~(1 << ce_id); |
| 757 | else |
| 758 | /* no intr pending on this CE */ |
| 759 | continue; |
| 760 | |
| 761 | ath10k_ce_per_engine_service(ar, ce_id); |
| 762 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 763 | } |
| 764 | |
| 765 | /* |
| 766 | * Adjust interrupts for the copy complete handler. |
| 767 | * If it's needed for either send or recv, then unmask |
| 768 | * this interrupt; otherwise, mask it. |
| 769 | * |
| 770 | * Called with ce_lock held. |
| 771 | */ |
Michal Kazior | 145cc12 | 2014-08-22 14:23:32 +0200 | [diff] [blame^] | 772 | static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 773 | { |
| 774 | u32 ctrl_addr = ce_state->ctrl_addr; |
| 775 | struct ath10k *ar = ce_state->ar; |
Michal Kazior | 145cc12 | 2014-08-22 14:23:32 +0200 | [diff] [blame^] | 776 | bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 777 | |
| 778 | if ((!disable_copy_compl_intr) && |
| 779 | (ce_state->send_cb || ce_state->recv_cb)) |
| 780 | ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr); |
| 781 | else |
| 782 | ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); |
| 783 | |
| 784 | ath10k_ce_watermark_intr_disable(ar, ctrl_addr); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 785 | } |
| 786 | |
Michal Kazior | 28642f4 | 2013-11-08 08:01:31 +0100 | [diff] [blame] | 787 | int ath10k_ce_disable_interrupts(struct ath10k *ar) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 788 | { |
Michal Kazior | c0c378f | 2014-08-07 11:03:28 +0200 | [diff] [blame] | 789 | int ce_id; |
Kalle Valo | 3aebe54 | 2013-09-01 10:02:07 +0300 | [diff] [blame] | 790 | |
Michal Kazior | fad6ed7 | 2013-11-08 08:01:23 +0100 | [diff] [blame] | 791 | for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { |
| 792 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 793 | |
| 794 | ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); |
Michal Kazior | 93e0daa | 2013-11-08 08:01:27 +0100 | [diff] [blame] | 795 | ath10k_ce_error_intr_disable(ar, ctrl_addr); |
| 796 | ath10k_ce_watermark_intr_disable(ar, ctrl_addr); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 797 | } |
Michal Kazior | 28642f4 | 2013-11-08 08:01:31 +0100 | [diff] [blame] | 798 | |
Michal Kazior | 28642f4 | 2013-11-08 08:01:31 +0100 | [diff] [blame] | 799 | return 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 800 | } |
| 801 | |
Michal Kazior | 145cc12 | 2014-08-22 14:23:32 +0200 | [diff] [blame^] | 802 | void ath10k_ce_enable_interrupts(struct ath10k *ar) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 803 | { |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 804 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
Michal Kazior | 145cc12 | 2014-08-22 14:23:32 +0200 | [diff] [blame^] | 805 | int ce_id; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 806 | |
Michal Kazior | 145cc12 | 2014-08-22 14:23:32 +0200 | [diff] [blame^] | 807 | for (ce_id = 0; ce_id < CE_COUNT; ce_id++) |
| 808 | ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 809 | } |
| 810 | |
| 811 | static int ath10k_ce_init_src_ring(struct ath10k *ar, |
| 812 | unsigned int ce_id, |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 813 | const struct ce_attr *attr) |
| 814 | { |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 815 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 816 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; |
| 817 | struct ath10k_ce_ring *src_ring = ce_state->src_ring; |
| 818 | u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 819 | |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 820 | nentries = roundup_pow_of_two(attr->src_nentries); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 821 | |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 822 | memset(src_ring->per_transfer_context, 0, |
| 823 | nentries * sizeof(*src_ring->per_transfer_context)); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 824 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 825 | src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); |
Michal Kazior | 432358e | 2013-07-31 10:55:11 +0200 | [diff] [blame] | 826 | src_ring->sw_index &= src_ring->nentries_mask; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 827 | src_ring->hw_index = src_ring->sw_index; |
| 828 | |
| 829 | src_ring->write_index = |
| 830 | ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); |
Michal Kazior | 432358e | 2013-07-31 10:55:11 +0200 | [diff] [blame] | 831 | src_ring->write_index &= src_ring->nentries_mask; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 832 | |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 833 | ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, |
| 834 | src_ring->base_addr_ce_space); |
| 835 | ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); |
| 836 | ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); |
| 837 | ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); |
| 838 | ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); |
| 839 | ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); |
| 840 | |
| 841 | ath10k_dbg(ATH10K_DBG_BOOT, |
| 842 | "boot init ce src ring id %d entries %d base_addr %p\n", |
| 843 | ce_id, nentries, src_ring->base_addr_owner_space); |
| 844 | |
| 845 | return 0; |
| 846 | } |
| 847 | |
| 848 | static int ath10k_ce_init_dest_ring(struct ath10k *ar, |
| 849 | unsigned int ce_id, |
| 850 | const struct ce_attr *attr) |
| 851 | { |
| 852 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 853 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; |
| 854 | struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; |
| 855 | u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); |
| 856 | |
| 857 | nentries = roundup_pow_of_two(attr->dest_nentries); |
| 858 | |
| 859 | memset(dest_ring->per_transfer_context, 0, |
| 860 | nentries * sizeof(*dest_ring->per_transfer_context)); |
| 861 | |
| 862 | dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); |
| 863 | dest_ring->sw_index &= dest_ring->nentries_mask; |
| 864 | dest_ring->write_index = |
| 865 | ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); |
| 866 | dest_ring->write_index &= dest_ring->nentries_mask; |
| 867 | |
| 868 | ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, |
| 869 | dest_ring->base_addr_ce_space); |
| 870 | ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); |
| 871 | ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); |
| 872 | ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); |
| 873 | ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); |
| 874 | |
| 875 | ath10k_dbg(ATH10K_DBG_BOOT, |
| 876 | "boot ce dest ring id %d entries %d base_addr %p\n", |
| 877 | ce_id, nentries, dest_ring->base_addr_owner_space); |
| 878 | |
| 879 | return 0; |
| 880 | } |
| 881 | |
| 882 | static struct ath10k_ce_ring * |
| 883 | ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, |
| 884 | const struct ce_attr *attr) |
| 885 | { |
| 886 | struct ath10k_ce_ring *src_ring; |
| 887 | u32 nentries = attr->src_nentries; |
| 888 | dma_addr_t base_addr; |
| 889 | |
| 890 | nentries = roundup_pow_of_two(nentries); |
| 891 | |
| 892 | src_ring = kzalloc(sizeof(*src_ring) + |
| 893 | (nentries * |
| 894 | sizeof(*src_ring->per_transfer_context)), |
| 895 | GFP_KERNEL); |
| 896 | if (src_ring == NULL) |
| 897 | return ERR_PTR(-ENOMEM); |
| 898 | |
| 899 | src_ring->nentries = nentries; |
| 900 | src_ring->nentries_mask = nentries - 1; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 901 | |
| 902 | /* |
| 903 | * Legacy platforms that do not support cache |
| 904 | * coherent DMA are unsupported |
| 905 | */ |
| 906 | src_ring->base_addr_owner_space_unaligned = |
Michal Kazior | 68c0324 | 2014-03-28 10:02:35 +0200 | [diff] [blame] | 907 | dma_alloc_coherent(ar->dev, |
| 908 | (nentries * sizeof(struct ce_desc) + |
| 909 | CE_DESC_RING_ALIGN), |
| 910 | &base_addr, GFP_KERNEL); |
Janusz Dziedzic | 9c5ae69 | 2013-08-09 08:39:13 +0200 | [diff] [blame] | 911 | if (!src_ring->base_addr_owner_space_unaligned) { |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 912 | kfree(src_ring); |
| 913 | return ERR_PTR(-ENOMEM); |
Janusz Dziedzic | 9c5ae69 | 2013-08-09 08:39:13 +0200 | [diff] [blame] | 914 | } |
| 915 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 916 | src_ring->base_addr_ce_space_unaligned = base_addr; |
| 917 | |
| 918 | src_ring->base_addr_owner_space = PTR_ALIGN( |
| 919 | src_ring->base_addr_owner_space_unaligned, |
| 920 | CE_DESC_RING_ALIGN); |
| 921 | src_ring->base_addr_ce_space = ALIGN( |
| 922 | src_ring->base_addr_ce_space_unaligned, |
| 923 | CE_DESC_RING_ALIGN); |
| 924 | |
| 925 | /* |
| 926 | * Also allocate a shadow src ring in regular |
| 927 | * mem to use for faster access. |
| 928 | */ |
| 929 | src_ring->shadow_base_unaligned = |
| 930 | kmalloc((nentries * sizeof(struct ce_desc) + |
| 931 | CE_DESC_RING_ALIGN), GFP_KERNEL); |
Janusz Dziedzic | 9c5ae69 | 2013-08-09 08:39:13 +0200 | [diff] [blame] | 932 | if (!src_ring->shadow_base_unaligned) { |
Michal Kazior | 68c0324 | 2014-03-28 10:02:35 +0200 | [diff] [blame] | 933 | dma_free_coherent(ar->dev, |
| 934 | (nentries * sizeof(struct ce_desc) + |
| 935 | CE_DESC_RING_ALIGN), |
| 936 | src_ring->base_addr_owner_space, |
| 937 | src_ring->base_addr_ce_space); |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 938 | kfree(src_ring); |
| 939 | return ERR_PTR(-ENOMEM); |
Janusz Dziedzic | 9c5ae69 | 2013-08-09 08:39:13 +0200 | [diff] [blame] | 940 | } |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 941 | |
| 942 | src_ring->shadow_base = PTR_ALIGN( |
| 943 | src_ring->shadow_base_unaligned, |
| 944 | CE_DESC_RING_ALIGN); |
| 945 | |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 946 | return src_ring; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 947 | } |
| 948 | |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 949 | static struct ath10k_ce_ring * |
| 950 | ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, |
| 951 | const struct ce_attr *attr) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 952 | { |
Michal Kazior | d21fb95 | 2013-08-27 13:08:03 +0200 | [diff] [blame] | 953 | struct ath10k_ce_ring *dest_ring; |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 954 | u32 nentries; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 955 | dma_addr_t base_addr; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 956 | |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 957 | nentries = roundup_pow_of_two(attr->dest_nentries); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 958 | |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 959 | dest_ring = kzalloc(sizeof(*dest_ring) + |
| 960 | (nentries * |
| 961 | sizeof(*dest_ring->per_transfer_context)), |
| 962 | GFP_KERNEL); |
| 963 | if (dest_ring == NULL) |
| 964 | return ERR_PTR(-ENOMEM); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 965 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 966 | dest_ring->nentries = nentries; |
| 967 | dest_ring->nentries_mask = nentries - 1; |
| 968 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 969 | /* |
| 970 | * Legacy platforms that do not support cache |
| 971 | * coherent DMA are unsupported |
| 972 | */ |
| 973 | dest_ring->base_addr_owner_space_unaligned = |
Michal Kazior | 68c0324 | 2014-03-28 10:02:35 +0200 | [diff] [blame] | 974 | dma_alloc_coherent(ar->dev, |
| 975 | (nentries * sizeof(struct ce_desc) + |
| 976 | CE_DESC_RING_ALIGN), |
| 977 | &base_addr, GFP_KERNEL); |
Janusz Dziedzic | 9c5ae69 | 2013-08-09 08:39:13 +0200 | [diff] [blame] | 978 | if (!dest_ring->base_addr_owner_space_unaligned) { |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 979 | kfree(dest_ring); |
| 980 | return ERR_PTR(-ENOMEM); |
Janusz Dziedzic | 9c5ae69 | 2013-08-09 08:39:13 +0200 | [diff] [blame] | 981 | } |
| 982 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 983 | dest_ring->base_addr_ce_space_unaligned = base_addr; |
| 984 | |
| 985 | /* |
| 986 | * Correctly initialize memory to 0 to prevent garbage |
| 987 | * data crashing system when download firmware |
| 988 | */ |
| 989 | memset(dest_ring->base_addr_owner_space_unaligned, 0, |
| 990 | nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN); |
| 991 | |
| 992 | dest_ring->base_addr_owner_space = PTR_ALIGN( |
| 993 | dest_ring->base_addr_owner_space_unaligned, |
| 994 | CE_DESC_RING_ALIGN); |
| 995 | dest_ring->base_addr_ce_space = ALIGN( |
| 996 | dest_ring->base_addr_ce_space_unaligned, |
| 997 | CE_DESC_RING_ALIGN); |
| 998 | |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 999 | return dest_ring; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1000 | } |
| 1001 | |
| 1002 | /* |
| 1003 | * Initialize a Copy Engine based on caller-supplied attributes. |
| 1004 | * This may be called once to initialize both source and destination |
| 1005 | * rings or it may be called twice for separate source and destination |
| 1006 | * initialization. It may be that only one side or the other is |
| 1007 | * initialized by software/firmware. |
| 1008 | */ |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 1009 | int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, |
Michal Kazior | 145cc12 | 2014-08-22 14:23:32 +0200 | [diff] [blame^] | 1010 | const struct ce_attr *attr, |
| 1011 | void (*send_cb)(struct ath10k_ce_pipe *), |
| 1012 | void (*recv_cb)(struct ath10k_ce_pipe *)) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1013 | { |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 1014 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 1015 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; |
Michal Kazior | ba7ee55 | 2013-08-13 07:54:57 +0200 | [diff] [blame] | 1016 | int ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1017 | |
Michal Kazior | 60f85be | 2013-10-16 16:46:24 +0300 | [diff] [blame] | 1018 | /* |
| 1019 | * Make sure there's enough CE ringbuffer entries for HTT TX to avoid |
| 1020 | * additional TX locking checks. |
| 1021 | * |
| 1022 | * For the lack of a better place do the check here. |
| 1023 | */ |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1024 | BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC > |
Michal Kazior | 60f85be | 2013-10-16 16:46:24 +0300 | [diff] [blame] | 1025 | (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); |
Michal Kazior | a16942e | 2014-02-27 18:50:04 +0200 | [diff] [blame] | 1026 | BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC > |
Michal Kazior | 60f85be | 2013-10-16 16:46:24 +0300 | [diff] [blame] | 1027 | (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); |
| 1028 | |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 1029 | spin_lock_bh(&ar_pci->ce_lock); |
| 1030 | ce_state->ar = ar; |
| 1031 | ce_state->id = ce_id; |
| 1032 | ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); |
| 1033 | ce_state->attr_flags = attr->flags; |
| 1034 | ce_state->src_sz_max = attr->src_sz_max; |
Michal Kazior | 145cc12 | 2014-08-22 14:23:32 +0200 | [diff] [blame^] | 1035 | if (attr->src_nentries) |
| 1036 | ce_state->send_cb = send_cb; |
| 1037 | if (attr->dest_nentries) |
| 1038 | ce_state->recv_cb = recv_cb; |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 1039 | spin_unlock_bh(&ar_pci->ce_lock); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1040 | |
| 1041 | if (attr->src_nentries) { |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 1042 | ret = ath10k_ce_init_src_ring(ar, ce_id, attr); |
Michal Kazior | ba7ee55 | 2013-08-13 07:54:57 +0200 | [diff] [blame] | 1043 | if (ret) { |
| 1044 | ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", |
| 1045 | ce_id, ret); |
Michal Kazior | c0c378f | 2014-08-07 11:03:28 +0200 | [diff] [blame] | 1046 | return ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1047 | } |
| 1048 | } |
| 1049 | |
| 1050 | if (attr->dest_nentries) { |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 1051 | ret = ath10k_ce_init_dest_ring(ar, ce_id, attr); |
Michal Kazior | ba7ee55 | 2013-08-13 07:54:57 +0200 | [diff] [blame] | 1052 | if (ret) { |
| 1053 | ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", |
| 1054 | ce_id, ret); |
Michal Kazior | c0c378f | 2014-08-07 11:03:28 +0200 | [diff] [blame] | 1055 | return ret; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1056 | } |
| 1057 | } |
| 1058 | |
Michal Kazior | c0c378f | 2014-08-07 11:03:28 +0200 | [diff] [blame] | 1059 | return 0; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1060 | } |
| 1061 | |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 1062 | static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1063 | { |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 1064 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); |
| 1065 | |
| 1066 | ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); |
| 1067 | ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); |
| 1068 | ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0); |
| 1069 | ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0); |
| 1070 | } |
| 1071 | |
| 1072 | static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) |
| 1073 | { |
| 1074 | u32 ctrl_addr = ath10k_ce_base_address(ce_id); |
| 1075 | |
| 1076 | ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); |
| 1077 | ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); |
| 1078 | ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0); |
| 1079 | } |
| 1080 | |
| 1081 | void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) |
| 1082 | { |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 1083 | ath10k_ce_deinit_src_ring(ar, ce_id); |
| 1084 | ath10k_ce_deinit_dest_ring(ar, ce_id); |
Michal Kazior | 25d0dbc | 2014-03-28 10:02:38 +0200 | [diff] [blame] | 1085 | } |
| 1086 | |
| 1087 | int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, |
| 1088 | const struct ce_attr *attr) |
| 1089 | { |
| 1090 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 1091 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; |
| 1092 | int ret; |
| 1093 | |
| 1094 | if (attr->src_nentries) { |
| 1095 | ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr); |
| 1096 | if (IS_ERR(ce_state->src_ring)) { |
| 1097 | ret = PTR_ERR(ce_state->src_ring); |
| 1098 | ath10k_err("failed to allocate copy engine source ring %d: %d\n", |
| 1099 | ce_id, ret); |
| 1100 | ce_state->src_ring = NULL; |
| 1101 | return ret; |
| 1102 | } |
| 1103 | } |
| 1104 | |
| 1105 | if (attr->dest_nentries) { |
| 1106 | ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id, |
| 1107 | attr); |
| 1108 | if (IS_ERR(ce_state->dest_ring)) { |
| 1109 | ret = PTR_ERR(ce_state->dest_ring); |
| 1110 | ath10k_err("failed to allocate copy engine destination ring %d: %d\n", |
| 1111 | ce_id, ret); |
| 1112 | ce_state->dest_ring = NULL; |
| 1113 | return ret; |
| 1114 | } |
| 1115 | } |
| 1116 | |
| 1117 | return 0; |
| 1118 | } |
| 1119 | |
| 1120 | void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) |
| 1121 | { |
| 1122 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
| 1123 | struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1124 | |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1125 | if (ce_state->src_ring) { |
| 1126 | kfree(ce_state->src_ring->shadow_base_unaligned); |
Michal Kazior | 68c0324 | 2014-03-28 10:02:35 +0200 | [diff] [blame] | 1127 | dma_free_coherent(ar->dev, |
| 1128 | (ce_state->src_ring->nentries * |
| 1129 | sizeof(struct ce_desc) + |
| 1130 | CE_DESC_RING_ALIGN), |
| 1131 | ce_state->src_ring->base_addr_owner_space, |
| 1132 | ce_state->src_ring->base_addr_ce_space); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1133 | kfree(ce_state->src_ring); |
| 1134 | } |
| 1135 | |
| 1136 | if (ce_state->dest_ring) { |
Michal Kazior | 68c0324 | 2014-03-28 10:02:35 +0200 | [diff] [blame] | 1137 | dma_free_coherent(ar->dev, |
| 1138 | (ce_state->dest_ring->nentries * |
| 1139 | sizeof(struct ce_desc) + |
| 1140 | CE_DESC_RING_ALIGN), |
| 1141 | ce_state->dest_ring->base_addr_owner_space, |
| 1142 | ce_state->dest_ring->base_addr_ce_space); |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1143 | kfree(ce_state->dest_ring); |
| 1144 | } |
Michal Kazior | 39e4086 | 2013-08-27 13:07:58 +0200 | [diff] [blame] | 1145 | |
Michal Kazior | 39e4086 | 2013-08-27 13:07:58 +0200 | [diff] [blame] | 1146 | ce_state->src_ring = NULL; |
| 1147 | ce_state->dest_ring = NULL; |
Kalle Valo | 5e3dd15 | 2013-06-12 20:52:10 +0300 | [diff] [blame] | 1148 | } |