Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Intel Wireless Multicomm 3200 WiFi driver |
| 3 | * |
| 4 | * Copyright (C) 2009 Intel Corporation. All rights reserved. |
| 5 | * |
| 6 | * Redistribution and use in source and binary forms, with or without |
| 7 | * modification, are permitted provided that the following conditions |
| 8 | * are met: |
| 9 | * |
| 10 | * * Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * * Redistributions in binary form must reproduce the above copyright |
| 13 | * notice, this list of conditions and the following disclaimer in |
| 14 | * the documentation and/or other materials provided with the |
| 15 | * distribution. |
| 16 | * * Neither the name of Intel Corporation nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived |
| 18 | * from this software without specific prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | * |
| 32 | * |
| 33 | * Intel Corporation <ilw@linux.intel.com> |
| 34 | * Samuel Ortiz <samuel.ortiz@intel.com> |
| 35 | * Zhu Yi <yi.zhu@intel.com> |
| 36 | * |
| 37 | */ |
| 38 | |
| 39 | /* |
| 40 | * iwm Tx theory of operation: |
| 41 | * |
| 42 | * 1) We receive a 802.3 frame from the stack |
| 43 | * 2) We convert it to a 802.11 frame [iwm_xmit_frame] |
| 44 | * 3) We queue it to its corresponding tx queue [iwm_xmit_frame] |
| 45 | * 4) We schedule the tx worker. There is one worker per tx |
| 46 | * queue. [iwm_xmit_frame] |
| 47 | * 5) The tx worker is scheduled |
| 48 | * 6) We go through every queued skb on the tx queue, and for each |
| 49 | * and every one of them: [iwm_tx_worker] |
| 50 | * a) We check if we have enough Tx credits (see below for a Tx |
| 51 | * credits description) for the frame length. [iwm_tx_worker] |
| 52 | * b) If we do, we aggregate the Tx frame into a UDMA one, by |
| 53 | * concatenating one REPLY_TX command per Tx frame. [iwm_tx_worker] |
| 54 | * c) When we run out of credits, or when we reach the maximum |
| 55 | * concatenation size, we actually send the concatenated UDMA |
| 56 | * frame. [iwm_tx_worker] |
| 57 | * |
| 58 | * When we run out of Tx credits, the skbs are filling the tx queue, |
| 59 | * and eventually we will stop the netdev queue. [iwm_tx_worker] |
| 60 | * The tx queue is emptied as we're getting new tx credits, by |
| 61 | * scheduling the tx_worker. [iwm_tx_credit_inc] |
| 62 | * The netdev queue is started again when we have enough tx credits, |
| 63 | * and when our tx queue has some reasonable amout of space available |
| 64 | * (i.e. half of the max size). [iwm_tx_worker] |
| 65 | */ |
| 66 | |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 67 | #include <linux/slab.h> |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 68 | #include <linux/skbuff.h> |
| 69 | #include <linux/netdevice.h> |
| 70 | #include <linux/ieee80211.h> |
| 71 | |
| 72 | #include "iwm.h" |
| 73 | #include "debug.h" |
| 74 | #include "commands.h" |
| 75 | #include "hal.h" |
| 76 | #include "umac.h" |
| 77 | #include "bus.h" |
| 78 | |
| 79 | #define IWM_UMAC_PAGE_ALLOC_WRAP 0xffff |
| 80 | |
| 81 | #define BYTES_TO_PAGES(n) (1 + ((n) >> ilog2(IWM_UMAC_PAGE_SIZE)) - \ |
| 82 | (((n) & (IWM_UMAC_PAGE_SIZE - 1)) == 0)) |
| 83 | |
| 84 | #define pool_id_to_queue(id) ((id < IWM_TX_CMD_QUEUE) ? id : id - 1) |
| 85 | #define queue_to_pool_id(q) ((q < IWM_TX_CMD_QUEUE) ? q : q + 1) |
| 86 | |
| 87 | /* require to hold tx_credit lock */ |
| 88 | static int iwm_tx_credit_get(struct iwm_tx_credit *tx_credit, int id) |
| 89 | { |
| 90 | struct pool_entry *pool = &tx_credit->pools[id]; |
| 91 | struct spool_entry *spool = &tx_credit->spools[pool->sid]; |
| 92 | int spool_pages; |
| 93 | |
| 94 | /* number of pages can be taken from spool by this pool */ |
| 95 | spool_pages = spool->max_pages - spool->alloc_pages + |
| 96 | max(pool->min_pages - pool->alloc_pages, 0); |
| 97 | |
| 98 | return min(pool->max_pages - pool->alloc_pages, spool_pages); |
| 99 | } |
| 100 | |
| 101 | static bool iwm_tx_credit_ok(struct iwm_priv *iwm, int id, int nb) |
| 102 | { |
| 103 | u32 npages = BYTES_TO_PAGES(nb); |
| 104 | |
| 105 | if (npages <= iwm_tx_credit_get(&iwm->tx_credit, id)) |
| 106 | return 1; |
| 107 | |
| 108 | set_bit(id, &iwm->tx_credit.full_pools_map); |
| 109 | |
| 110 | IWM_DBG_TX(iwm, DBG, "LINK: stop txq[%d], available credit: %d\n", |
| 111 | pool_id_to_queue(id), |
| 112 | iwm_tx_credit_get(&iwm->tx_credit, id)); |
| 113 | |
| 114 | return 0; |
| 115 | } |
| 116 | |
| 117 | void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages) |
| 118 | { |
| 119 | struct pool_entry *pool; |
| 120 | struct spool_entry *spool; |
| 121 | int freed_pages; |
| 122 | int queue; |
| 123 | |
| 124 | BUG_ON(id >= IWM_MACS_OUT_GROUPS); |
| 125 | |
| 126 | pool = &iwm->tx_credit.pools[id]; |
| 127 | spool = &iwm->tx_credit.spools[pool->sid]; |
| 128 | |
| 129 | freed_pages = total_freed_pages - pool->total_freed_pages; |
| 130 | IWM_DBG_TX(iwm, DBG, "Free %d pages for pool[%d]\n", freed_pages, id); |
| 131 | |
| 132 | if (!freed_pages) { |
| 133 | IWM_DBG_TX(iwm, DBG, "No pages are freed by UMAC\n"); |
| 134 | return; |
| 135 | } else if (freed_pages < 0) |
| 136 | freed_pages += IWM_UMAC_PAGE_ALLOC_WRAP + 1; |
| 137 | |
| 138 | if (pool->alloc_pages > pool->min_pages) { |
| 139 | int spool_pages = pool->alloc_pages - pool->min_pages; |
| 140 | spool_pages = min(spool_pages, freed_pages); |
| 141 | spool->alloc_pages -= spool_pages; |
| 142 | } |
| 143 | |
| 144 | pool->alloc_pages -= freed_pages; |
| 145 | pool->total_freed_pages = total_freed_pages; |
| 146 | |
| 147 | IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, " |
| 148 | "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages, |
| 149 | pool->total_freed_pages, pool->sid, spool->alloc_pages); |
| 150 | |
| 151 | if (test_bit(id, &iwm->tx_credit.full_pools_map) && |
| 152 | (pool->alloc_pages < pool->max_pages / 2)) { |
| 153 | clear_bit(id, &iwm->tx_credit.full_pools_map); |
| 154 | |
| 155 | queue = pool_id_to_queue(id); |
| 156 | |
| 157 | IWM_DBG_TX(iwm, DBG, "LINK: start txq[%d], available " |
| 158 | "credit: %d\n", queue, |
| 159 | iwm_tx_credit_get(&iwm->tx_credit, id)); |
| 160 | queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker); |
| 161 | } |
| 162 | } |
| 163 | |
| 164 | static void iwm_tx_credit_dec(struct iwm_priv *iwm, int id, int alloc_pages) |
| 165 | { |
| 166 | struct pool_entry *pool; |
| 167 | struct spool_entry *spool; |
| 168 | int spool_pages; |
| 169 | |
| 170 | IWM_DBG_TX(iwm, DBG, "Allocate %d pages for pool[%d]\n", |
| 171 | alloc_pages, id); |
| 172 | |
| 173 | BUG_ON(id >= IWM_MACS_OUT_GROUPS); |
| 174 | |
| 175 | pool = &iwm->tx_credit.pools[id]; |
| 176 | spool = &iwm->tx_credit.spools[pool->sid]; |
| 177 | |
| 178 | spool_pages = pool->alloc_pages + alloc_pages - pool->min_pages; |
| 179 | |
| 180 | if (pool->alloc_pages >= pool->min_pages) |
| 181 | spool->alloc_pages += alloc_pages; |
| 182 | else if (spool_pages > 0) |
| 183 | spool->alloc_pages += spool_pages; |
| 184 | |
| 185 | pool->alloc_pages += alloc_pages; |
| 186 | |
| 187 | IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, " |
| 188 | "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages, |
| 189 | pool->total_freed_pages, pool->sid, spool->alloc_pages); |
| 190 | } |
| 191 | |
| 192 | int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb) |
| 193 | { |
| 194 | u32 npages = BYTES_TO_PAGES(nb); |
| 195 | int ret = 0; |
| 196 | |
| 197 | spin_lock(&iwm->tx_credit.lock); |
| 198 | |
| 199 | if (!iwm_tx_credit_ok(iwm, id, nb)) { |
| 200 | IWM_DBG_TX(iwm, DBG, "No credit avaliable for pool[%d]\n", id); |
| 201 | ret = -ENOSPC; |
| 202 | goto out; |
| 203 | } |
| 204 | |
| 205 | iwm_tx_credit_dec(iwm, id, npages); |
| 206 | |
| 207 | out: |
| 208 | spin_unlock(&iwm->tx_credit.lock); |
| 209 | return ret; |
| 210 | } |
| 211 | |
| 212 | /* |
| 213 | * Since we're on an SDIO or USB bus, we are not sharing memory |
| 214 | * for storing to be transmitted frames. The host needs to push |
| 215 | * them upstream. As a consequence there needs to be a way for |
| 216 | * the target to let us know if it can actually take more TX frames |
| 217 | * or not. This is what Tx credits are for. |
| 218 | * |
| 219 | * For each Tx HW queue, we have a Tx pool, and then we have one |
| 220 | * unique super pool (spool), which is actually a global pool of |
| 221 | * all the UMAC pages. |
| 222 | * For each Tx pool we have a min_pages, a max_pages fields, and a |
| 223 | * alloc_pages fields. The alloc_pages tracks the number of pages |
| 224 | * currently allocated from the tx pool. |
| 225 | * Here are the rules to check if given a tx frame we have enough |
| 226 | * tx credits for it: |
| 227 | * 1) We translate the frame length into a number of UMAC pages. |
| 228 | * Let's call them n_pages. |
| 229 | * 2) For the corresponding tx pool, we check if n_pages + |
| 230 | * pool->alloc_pages is higher than pool->min_pages. min_pages |
| 231 | * represent a set of pre-allocated pages on the tx pool. If |
| 232 | * that's the case, then we need to allocate those pages from |
| 233 | * the spool. We can do so until we reach spool->max_pages. |
| 234 | * 3) Each tx pool is not allowed to allocate more than pool->max_pages |
| 235 | * from the spool, so once we're over min_pages, we can allocate |
| 236 | * pages from the spool, but not more than max_pages. |
| 237 | * |
| 238 | * When the tx code path needs to send a tx frame, it checks first |
| 239 | * if it has enough tx credits, following those rules. [iwm_tx_credit_get] |
| 240 | * If it does, it then updates the pool and spool counters and |
| 241 | * then send the frame. [iwm_tx_credit_alloc and iwm_tx_credit_dec] |
| 242 | * On the other side, when the UMAC is done transmitting frames, it |
| 243 | * will send a credit update notification to the host. This is when |
| 244 | * the pool and spool counters gets to be decreased. [iwm_tx_credit_inc, |
| 245 | * called from rx.c:iwm_ntf_tx_credit_update] |
| 246 | * |
| 247 | */ |
| 248 | void iwm_tx_credit_init_pools(struct iwm_priv *iwm, |
| 249 | struct iwm_umac_notif_alive *alive) |
| 250 | { |
| 251 | int i, sid, pool_pages; |
| 252 | |
| 253 | spin_lock(&iwm->tx_credit.lock); |
| 254 | |
| 255 | iwm->tx_credit.pool_nr = le16_to_cpu(alive->page_grp_count); |
| 256 | iwm->tx_credit.full_pools_map = 0; |
| 257 | memset(&iwm->tx_credit.spools[0], 0, sizeof(struct spool_entry)); |
| 258 | |
| 259 | IWM_DBG_TX(iwm, DBG, "Pools number is %d\n", iwm->tx_credit.pool_nr); |
| 260 | |
| 261 | for (i = 0; i < iwm->tx_credit.pool_nr; i++) { |
| 262 | __le32 page_grp_state = alive->page_grp_state[i]; |
| 263 | |
| 264 | iwm->tx_credit.pools[i].id = GET_VAL32(page_grp_state, |
| 265 | UMAC_ALIVE_PAGE_STS_GRP_NUM); |
| 266 | iwm->tx_credit.pools[i].sid = GET_VAL32(page_grp_state, |
| 267 | UMAC_ALIVE_PAGE_STS_SGRP_NUM); |
| 268 | iwm->tx_credit.pools[i].min_pages = GET_VAL32(page_grp_state, |
| 269 | UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE); |
| 270 | iwm->tx_credit.pools[i].max_pages = GET_VAL32(page_grp_state, |
| 271 | UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE); |
| 272 | iwm->tx_credit.pools[i].alloc_pages = 0; |
| 273 | iwm->tx_credit.pools[i].total_freed_pages = 0; |
| 274 | |
| 275 | sid = iwm->tx_credit.pools[i].sid; |
| 276 | pool_pages = iwm->tx_credit.pools[i].min_pages; |
| 277 | |
| 278 | if (iwm->tx_credit.spools[sid].max_pages == 0) { |
| 279 | iwm->tx_credit.spools[sid].id = sid; |
| 280 | iwm->tx_credit.spools[sid].max_pages = |
| 281 | GET_VAL32(page_grp_state, |
| 282 | UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE); |
| 283 | iwm->tx_credit.spools[sid].alloc_pages = 0; |
| 284 | } |
| 285 | |
| 286 | iwm->tx_credit.spools[sid].alloc_pages += pool_pages; |
| 287 | |
| 288 | IWM_DBG_TX(iwm, DBG, "Pool idx: %d, id: %d, sid: %d, capacity " |
| 289 | "min: %d, max: %d, pool alloc: %d, total_free: %d, " |
| 290 | "super poll alloc: %d\n", |
| 291 | i, iwm->tx_credit.pools[i].id, |
| 292 | iwm->tx_credit.pools[i].sid, |
| 293 | iwm->tx_credit.pools[i].min_pages, |
| 294 | iwm->tx_credit.pools[i].max_pages, |
| 295 | iwm->tx_credit.pools[i].alloc_pages, |
| 296 | iwm->tx_credit.pools[i].total_freed_pages, |
| 297 | iwm->tx_credit.spools[sid].alloc_pages); |
| 298 | } |
| 299 | |
| 300 | spin_unlock(&iwm->tx_credit.lock); |
| 301 | } |
| 302 | |
| 303 | #define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr) |
| 304 | |
| 305 | static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb, |
| 306 | int pool_id, u8 *buf) |
| 307 | { |
| 308 | struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf; |
| 309 | struct iwm_udma_wifi_cmd udma_cmd; |
| 310 | struct iwm_umac_cmd umac_cmd; |
| 311 | struct iwm_tx_info *tx_info = skb_to_tx_info(skb); |
| 312 | |
| 313 | udma_cmd.count = cpu_to_le16(skb->len + |
| 314 | sizeof(struct iwm_umac_fw_cmd_hdr)); |
| 315 | /* set EOP to 0 here. iwm_udma_wifi_hdr_set_eop() will be |
| 316 | * called later to set EOP for the last packet. */ |
| 317 | udma_cmd.eop = 0; |
| 318 | udma_cmd.credit_group = pool_id; |
| 319 | udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid; |
| 320 | udma_cmd.lmac_offset = 0; |
| 321 | |
| 322 | umac_cmd.id = REPLY_TX; |
| 323 | umac_cmd.count = cpu_to_le16(skb->len); |
| 324 | umac_cmd.color = tx_info->color; |
| 325 | umac_cmd.resp = 0; |
| 326 | umac_cmd.seq_num = cpu_to_le16(iwm_alloc_wifi_cmd_seq(iwm)); |
| 327 | |
| 328 | iwm_build_udma_wifi_hdr(iwm, &hdr->hw_hdr, &udma_cmd); |
| 329 | iwm_build_umac_hdr(iwm, &hdr->sw_hdr, &umac_cmd); |
| 330 | |
| 331 | memcpy(buf + sizeof(*hdr), skb->data, skb->len); |
| 332 | |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 333 | return umac_cmd.seq_num; |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 334 | } |
| 335 | |
| 336 | static int iwm_tx_send_concat_packets(struct iwm_priv *iwm, |
| 337 | struct iwm_tx_queue *txq) |
| 338 | { |
| 339 | int ret; |
| 340 | |
| 341 | if (!txq->concat_count) |
| 342 | return 0; |
| 343 | |
| 344 | IWM_DBG_TX(iwm, DBG, "Send concatenated Tx: queue %d, %d bytes\n", |
| 345 | txq->id, txq->concat_count); |
| 346 | |
| 347 | /* mark EOP for the last packet */ |
| 348 | iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1); |
| 349 | |
| 350 | ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count); |
| 351 | |
| 352 | txq->concat_count = 0; |
| 353 | txq->concat_ptr = txq->concat_buf; |
| 354 | |
| 355 | return ret; |
| 356 | } |
| 357 | |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 358 | void iwm_tx_worker(struct work_struct *work) |
| 359 | { |
| 360 | struct iwm_priv *iwm; |
| 361 | struct iwm_tx_info *tx_info = NULL; |
| 362 | struct sk_buff *skb; |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 363 | struct iwm_tx_queue *txq; |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 364 | struct iwm_sta_info *sta_info; |
| 365 | struct iwm_tid_info *tid_info; |
| 366 | int cmdlen, ret, pool_id; |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 367 | |
| 368 | txq = container_of(work, struct iwm_tx_queue, worker); |
| 369 | iwm = container_of(txq, struct iwm_priv, txq[txq->id]); |
| 370 | |
| 371 | pool_id = queue_to_pool_id(txq->id); |
| 372 | |
| 373 | while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) && |
| 374 | !skb_queue_empty(&txq->queue)) { |
| 375 | |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 376 | spin_lock_bh(&txq->lock); |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 377 | skb = skb_dequeue(&txq->queue); |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 378 | spin_unlock_bh(&txq->lock); |
| 379 | |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 380 | tx_info = skb_to_tx_info(skb); |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 381 | sta_info = &iwm->sta_table[tx_info->sta]; |
| 382 | if (!sta_info->valid) { |
| 383 | IWM_ERR(iwm, "Trying to send a frame to unknown STA\n"); |
| 384 | kfree_skb(skb); |
| 385 | continue; |
| 386 | } |
| 387 | |
| 388 | tid_info = &sta_info->tid_info[tx_info->tid]; |
| 389 | |
| 390 | mutex_lock(&tid_info->mutex); |
| 391 | |
| 392 | /* |
| 393 | * If the RAxTID is stopped, we queue the skb to the stopped |
| 394 | * queue. |
| 395 | * Whenever we'll get a UMAC notification to resume the tx flow |
| 396 | * for this RAxTID, we'll merge back the stopped queue into the |
| 397 | * regular queue. See iwm_ntf_stop_resume_tx() from rx.c. |
| 398 | */ |
| 399 | if (tid_info->stopped) { |
| 400 | IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n", |
| 401 | tx_info->sta, tx_info->tid); |
| 402 | spin_lock_bh(&txq->lock); |
| 403 | skb_queue_tail(&txq->stopped_queue, skb); |
| 404 | spin_unlock_bh(&txq->lock); |
| 405 | |
| 406 | mutex_unlock(&tid_info->mutex); |
| 407 | continue; |
| 408 | } |
| 409 | |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 410 | cmdlen = IWM_UDMA_HDR_LEN + skb->len; |
| 411 | |
| 412 | IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: " |
| 413 | "%d, color: %d\n", txq->id, skb, tx_info->sta, |
| 414 | tx_info->color); |
| 415 | |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 416 | if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE) |
| 417 | iwm_tx_send_concat_packets(iwm, txq); |
| 418 | |
| 419 | ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen); |
| 420 | if (ret) { |
| 421 | IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue " |
| 422 | "%d, Tx worker stopped\n", txq->id); |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 423 | spin_lock_bh(&txq->lock); |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 424 | skb_queue_head(&txq->queue, skb); |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 425 | spin_unlock_bh(&txq->lock); |
| 426 | |
| 427 | mutex_unlock(&tid_info->mutex); |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 428 | break; |
| 429 | } |
| 430 | |
| 431 | txq->concat_ptr = txq->concat_buf + txq->concat_count; |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 432 | tid_info->last_seq_num = |
| 433 | iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr); |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 434 | txq->concat_count += ALIGN(cmdlen, 16); |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 435 | |
| 436 | mutex_unlock(&tid_info->mutex); |
Samuel Ortiz | 6b65b6a | 2009-11-24 11:33:33 +0800 | [diff] [blame] | 437 | |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 438 | kfree_skb(skb); |
| 439 | } |
| 440 | |
| 441 | iwm_tx_send_concat_packets(iwm, txq); |
| 442 | |
| 443 | if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) && |
| 444 | !test_bit(pool_id, &iwm->tx_credit.full_pools_map) && |
| 445 | (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) { |
| 446 | IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id); |
| 447 | netif_wake_subqueue(iwm_to_ndev(iwm), txq->id); |
| 448 | } |
| 449 | } |
| 450 | |
| 451 | int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
| 452 | { |
| 453 | struct iwm_priv *iwm = ndev_to_iwm(netdev); |
| 454 | struct net_device *ndev = iwm_to_ndev(iwm); |
| 455 | struct wireless_dev *wdev = iwm_to_wdev(iwm); |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 456 | struct iwm_tx_info *tx_info; |
| 457 | struct iwm_tx_queue *txq; |
| 458 | struct iwm_sta_info *sta_info; |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 459 | u8 *dst_addr, sta_id; |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 460 | u16 queue; |
| 461 | int ret; |
| 462 | |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 463 | |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 464 | if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) { |
| 465 | IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: " |
| 466 | "not associated\n"); |
| 467 | netif_tx_stop_all_queues(netdev); |
| 468 | goto drop; |
| 469 | } |
| 470 | |
| 471 | queue = skb_get_queue_mapping(skb); |
| 472 | BUG_ON(queue >= IWM_TX_DATA_QUEUES); /* no iPAN yet */ |
| 473 | |
| 474 | txq = &iwm->txq[queue]; |
| 475 | |
| 476 | /* No free space for Tx, tx_worker is too slow */ |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 477 | if ((skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) || |
| 478 | (skb_queue_len(&txq->stopped_queue) > IWM_TX_LIST_SIZE)) { |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 479 | IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue); |
| 480 | netif_stop_subqueue(netdev, queue); |
| 481 | return NETDEV_TX_BUSY; |
| 482 | } |
| 483 | |
| 484 | ret = ieee80211_data_from_8023(skb, netdev->dev_addr, wdev->iftype, |
| 485 | iwm->bssid, 0); |
| 486 | if (ret) { |
| 487 | IWM_ERR(iwm, "build wifi header failed\n"); |
| 488 | goto drop; |
| 489 | } |
| 490 | |
| 491 | dst_addr = ((struct ieee80211_hdr *)(skb->data))->addr1; |
| 492 | |
| 493 | for (sta_id = 0; sta_id < IWM_STA_TABLE_NUM; sta_id++) { |
| 494 | sta_info = &iwm->sta_table[sta_id]; |
| 495 | if (sta_info->valid && |
| 496 | !memcmp(dst_addr, sta_info->addr, ETH_ALEN)) |
| 497 | break; |
| 498 | } |
| 499 | |
| 500 | if (sta_id == IWM_STA_TABLE_NUM) { |
| 501 | IWM_ERR(iwm, "STA %pM not found in sta_table, Tx ignored\n", |
| 502 | dst_addr); |
| 503 | goto drop; |
| 504 | } |
| 505 | |
| 506 | tx_info = skb_to_tx_info(skb); |
| 507 | tx_info->sta = sta_id; |
| 508 | tx_info->color = sta_info->color; |
| 509 | /* UMAC uses TID 8 (vs. 0) for non QoS packets */ |
| 510 | if (sta_info->qos) |
| 511 | tx_info->tid = skb->priority; |
| 512 | else |
| 513 | tx_info->tid = IWM_UMAC_MGMT_TID; |
| 514 | |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 515 | spin_lock_bh(&iwm->txq[queue].lock); |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 516 | skb_queue_tail(&iwm->txq[queue].queue, skb); |
Samuel Ortiz | a7af530 | 2009-11-24 11:33:31 +0800 | [diff] [blame] | 517 | spin_unlock_bh(&iwm->txq[queue].lock); |
Zhu Yi | bb9f869 | 2009-05-21 21:20:45 +0800 | [diff] [blame] | 518 | |
| 519 | queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker); |
| 520 | |
| 521 | ndev->stats.tx_packets++; |
| 522 | ndev->stats.tx_bytes += skb->len; |
| 523 | return NETDEV_TX_OK; |
| 524 | |
| 525 | drop: |
| 526 | ndev->stats.tx_dropped++; |
| 527 | dev_kfree_skb_any(skb); |
| 528 | return NETDEV_TX_OK; |
| 529 | } |