blob: 9a1c00623247f8a3d8fea54de3f08058faa58cdd [file] [log] [blame]
Ron Rindjunsky1053d352008-05-05 10:22:43 +08001/******************************************************************************
2 *
Emmanuel Grumbach51368bf2013-12-30 13:15:54 +02003 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
Luciano Coelho4cbb8e502015-08-18 16:02:38 +03004 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 Intel Deutschland GmbH
Ron Rindjunsky1053d352008-05-05 10:22:43 +08006 *
7 * Portions of this file are derived from the ipw3945 project, as well
8 * as portions of the ieee80211 subsystem header files.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called LICENSE.
25 *
26 * Contact Information:
Emmanuel Grumbachcb2f8272015-11-17 15:39:56 +020027 * Intel Linux Wireless <linuxwifi@intel.com>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080028 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29 *
30 *****************************************************************************/
Tomas Winklerfd4abac2008-05-15 13:54:07 +080031#include <linux/etherdevice.h>
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +030032#include <linux/ieee80211.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090033#include <linux/slab.h>
Emmanuel Grumbach253a6342011-07-11 07:39:46 -070034#include <linux/sched.h>
Luca Coelho71b12302016-03-11 12:12:16 +020035#include <linux/pm_runtime.h>
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +030036#include <net/ip6_checksum.h>
37#include <net/tso.h>
Emmanuel Grumbach253a6342011-07-11 07:39:46 -070038
Emmanuel Grumbach522376d2011-09-06 09:31:19 -070039#include "iwl-debug.h"
40#include "iwl-csr.h"
41#include "iwl-prph.h"
Ron Rindjunsky1053d352008-05-05 10:22:43 +080042#include "iwl-io.h"
Avri Altman680073b2014-07-14 09:40:27 +030043#include "iwl-scd.h"
Emmanuel Grumbached277c92012-02-09 16:08:15 +020044#include "iwl-op-mode.h"
Johannes Berg6468a012012-05-16 19:13:54 +020045#include "internal.h"
Johannes Berg6238b002012-04-02 15:04:33 +020046/* FIXME: need to abstract out TX command (once we know what it looks like) */
Johannes Berg1023fdc2012-05-15 12:16:34 +020047#include "dvm/commands.h"
Ron Rindjunsky1053d352008-05-05 10:22:43 +080048
Emmanuel Grumbach522376d2011-09-06 09:31:19 -070049#define IWL_TX_CRC_SIZE 4
50#define IWL_TX_DELIMITER_SIZE 4
51
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +020052/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
53 * DMA services
54 *
55 * Theory of operation
56 *
57 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
58 * of buffer descriptors, each of which points to one or more data buffers for
59 * the device to read from or fill. Driver and device exchange status of each
60 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
61 * entries in each circular buffer, to protect against confusing empty and full
62 * queue states.
63 *
64 * The device reads or writes the data in the queues via the device's several
65 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
66 *
67 * For Tx queue, there are low mark and high mark limits. If, after queuing
68 * the packet for Tx, free space become < low mark, Tx queue stopped. When
69 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
70 * Tx queue resumed.
71 *
72 ***************************************************/
73static int iwl_queue_space(const struct iwl_queue *q)
74{
Ido Yariva9b29242013-07-15 11:51:48 -040075 unsigned int max;
76 unsigned int used;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +020077
Ido Yariva9b29242013-07-15 11:51:48 -040078 /*
79 * To avoid ambiguity between empty and completely full queues, there
Johannes Berg83f32a42014-04-24 09:57:40 +020080 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
81 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
82 * to reserve any queue entries for this purpose.
Ido Yariva9b29242013-07-15 11:51:48 -040083 */
Johannes Berg83f32a42014-04-24 09:57:40 +020084 if (q->n_window < TFD_QUEUE_SIZE_MAX)
Ido Yariva9b29242013-07-15 11:51:48 -040085 max = q->n_window;
86 else
Johannes Berg83f32a42014-04-24 09:57:40 +020087 max = TFD_QUEUE_SIZE_MAX - 1;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +020088
Ido Yariva9b29242013-07-15 11:51:48 -040089 /*
Johannes Berg83f32a42014-04-24 09:57:40 +020090 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
91 * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
Ido Yariva9b29242013-07-15 11:51:48 -040092 */
Johannes Berg83f32a42014-04-24 09:57:40 +020093 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
Ido Yariva9b29242013-07-15 11:51:48 -040094
95 if (WARN_ON(used > max))
96 return 0;
97
98 return max - used;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +020099}
100
101/*
102 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
103 */
Johannes Berg83f32a42014-04-24 09:57:40 +0200104static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200105{
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200106 q->n_window = slots_num;
107 q->id = id;
108
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200109 /* slots_num must be power-of-two size, otherwise
110 * get_cmd_index is broken. */
111 if (WARN_ON(!is_power_of_2(slots_num)))
112 return -EINVAL;
113
114 q->low_mark = q->n_window / 4;
115 if (q->low_mark < 4)
116 q->low_mark = 4;
117
118 q->high_mark = q->n_window / 8;
119 if (q->high_mark < 2)
120 q->high_mark = 2;
121
122 q->write_ptr = 0;
123 q->read_ptr = 0;
124
125 return 0;
126}
127
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200128static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
129 struct iwl_dma_ptr *ptr, size_t size)
130{
131 if (WARN_ON(ptr->addr))
132 return -EINVAL;
133
134 ptr->addr = dma_alloc_coherent(trans->dev, size,
135 &ptr->dma, GFP_KERNEL);
136 if (!ptr->addr)
137 return -ENOMEM;
138 ptr->size = size;
139 return 0;
140}
141
142static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
143 struct iwl_dma_ptr *ptr)
144{
145 if (unlikely(!ptr->addr))
146 return;
147
148 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
149 memset(ptr, 0, sizeof(*ptr));
150}
151
152static void iwl_pcie_txq_stuck_timer(unsigned long data)
153{
154 struct iwl_txq *txq = (void *)data;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200155 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
156 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
157 u32 scd_sram_addr = trans_pcie->scd_base_addr +
158 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
159 u8 buf[16];
160 int i;
161
162 spin_lock(&txq->lock);
163 /* check if triggered erroneously */
164 if (txq->q.read_ptr == txq->q.write_ptr) {
165 spin_unlock(&txq->lock);
166 return;
167 }
168 spin_unlock(&txq->lock);
169
170 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +0200171 jiffies_to_msecs(txq->wd_timeout));
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200172 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
173 txq->q.read_ptr, txq->q.write_ptr);
174
Emmanuel Grumbach4fd442d2012-12-24 14:27:11 +0200175 iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200176
177 iwl_print_hex_error(trans, buf, sizeof(buf));
178
179 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
180 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
181 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
182
183 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
184 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
185 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
186 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
187 u32 tbl_dw =
Emmanuel Grumbach4fd442d2012-12-24 14:27:11 +0200188 iwl_trans_read_mem32(trans,
189 trans_pcie->scd_base_addr +
190 SCD_TRANS_TBL_OFFSET_QUEUE(i));
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200191
192 if (i & 0x1)
193 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
194 else
195 tbl_dw = tbl_dw & 0x0000FFFF;
196
197 IWL_ERR(trans,
198 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
199 i, active ? "" : "in", fifo, tbl_dw,
Johannes Berg83f32a42014-04-24 09:57:40 +0200200 iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
201 (TFD_QUEUE_SIZE_MAX - 1),
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200202 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
203 }
204
Liad Kaufman4c9706d2014-04-27 16:46:09 +0300205 iwl_force_nmi(trans);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200206}
207
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200208/*
209 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300210 */
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200211static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
212 struct iwl_txq *txq, u16 byte_cnt)
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300213{
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700214 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
Johannes Berg20d3b642012-05-16 22:54:29 +0200215 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300216 int write_ptr = txq->q.write_ptr;
217 int txq_id = txq->q.id;
218 u8 sec_ctl = 0;
219 u8 sta_id = 0;
220 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
221 __le16 bc_ent;
Emmanuel Grumbach132f98c2011-09-20 15:37:24 -0700222 struct iwl_tx_cmd *tx_cmd =
Johannes Bergbf8440e2012-03-19 17:12:06 +0100223 (void *) txq->entries[txq->q.write_ptr].cmd->payload;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300224
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700225 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
226
Emmanuel Grumbach132f98c2011-09-20 15:37:24 -0700227 sta_id = tx_cmd->sta_id;
228 sec_ctl = tx_cmd->sec_ctl;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300229
230 switch (sec_ctl & TX_CMD_SEC_MSK) {
231 case TX_CMD_SEC_CCM:
Johannes Berg4325f6c2013-05-08 13:09:08 +0200232 len += IEEE80211_CCMP_MIC_LEN;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300233 break;
234 case TX_CMD_SEC_TKIP:
Johannes Berg4325f6c2013-05-08 13:09:08 +0200235 len += IEEE80211_TKIP_ICV_LEN;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300236 break;
237 case TX_CMD_SEC_WEP:
Johannes Berg4325f6c2013-05-08 13:09:08 +0200238 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300239 break;
240 }
241
Emmanuel Grumbach046db342012-12-05 15:07:54 +0200242 if (trans_pcie->bc_table_dword)
243 len = DIV_ROUND_UP(len, 4);
244
Emmanuel Grumbach31f920b2015-07-02 14:53:02 +0300245 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
246 return;
247
Emmanuel Grumbach046db342012-12-05 15:07:54 +0200248 bc_ent = cpu_to_le16(len | (sta_id << 12));
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300249
250 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
251
252 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
253 scd_bc_tbl[txq_id].
254 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
255}
256
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200257static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
258 struct iwl_txq *txq)
259{
260 struct iwl_trans_pcie *trans_pcie =
261 IWL_TRANS_GET_PCIE_TRANS(trans);
262 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
263 int txq_id = txq->q.id;
264 int read_ptr = txq->q.read_ptr;
265 u8 sta_id = 0;
266 __le16 bc_ent;
267 struct iwl_tx_cmd *tx_cmd =
268 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
269
270 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
271
272 if (txq_id != trans_pcie->cmd_queue)
273 sta_id = tx_cmd->sta_id;
274
275 bc_ent = cpu_to_le16(1 | (sta_id << 12));
276 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
277
278 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
279 scd_bc_tbl[txq_id].
280 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
281}
282
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200283/*
284 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800285 */
Johannes Bergea68f462014-02-27 14:36:55 +0100286static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
287 struct iwl_txq *txq)
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800288{
Emmanuel Grumbach23e76d12014-01-20 09:50:29 +0200289 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800290 u32 reg = 0;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800291 int txq_id = txq->q.id;
292
Johannes Bergea68f462014-02-27 14:36:55 +0100293 lockdep_assert_held(&txq->lock);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800294
Eliad Peller50453882014-02-05 19:12:24 +0200295 /*
296 * explicitly wake up the NIC if:
297 * 1. shadow registers aren't enabled
298 * 2. NIC is woken up for CMD regardless of shadow outside this function
299 * 3. there is a chance that the NIC is asleep
300 */
301 if (!trans->cfg->base_params->shadow_reg_enable &&
302 txq_id != trans_pcie->cmd_queue &&
303 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800304 /*
Eliad Peller50453882014-02-05 19:12:24 +0200305 * wake up nic if it's powered down ...
306 * uCode will wake up, and interrupt us again, so next
307 * time we'll skip this part.
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800308 */
Eliad Peller50453882014-02-05 19:12:24 +0200309 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
310
311 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
312 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
313 txq_id, reg);
314 iwl_set_bit(trans, CSR_GP_CNTRL,
315 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
Johannes Bergea68f462014-02-27 14:36:55 +0100316 txq->need_update = true;
Eliad Peller50453882014-02-05 19:12:24 +0200317 return;
318 }
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800319 }
Eliad Peller50453882014-02-05 19:12:24 +0200320
321 /*
322 * if not in power-save mode, uCode will never sleep when we're
323 * trying to tx (during RFKILL, we're not trying to tx).
324 */
325 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
Emmanuel Grumbach0cd58ea2015-11-24 13:24:24 +0200326 if (!txq->block)
327 iwl_write32(trans, HBUS_TARG_WRPTR,
328 txq->q.write_ptr | (txq_id << 8));
Johannes Bergea68f462014-02-27 14:36:55 +0100329}
Eliad Peller50453882014-02-05 19:12:24 +0200330
Johannes Bergea68f462014-02-27 14:36:55 +0100331void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
332{
333 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
334 int i;
335
336 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
337 struct iwl_txq *txq = &trans_pcie->txq[i];
338
Emmanuel Grumbachd090f872014-05-13 08:10:51 +0300339 spin_lock_bh(&txq->lock);
Johannes Bergea68f462014-02-27 14:36:55 +0100340 if (trans_pcie->txq[i].need_update) {
341 iwl_pcie_txq_inc_wr_ptr(trans, txq);
342 trans_pcie->txq[i].need_update = false;
343 }
Emmanuel Grumbachd090f872014-05-13 08:10:51 +0300344 spin_unlock_bh(&txq->lock);
Johannes Bergea68f462014-02-27 14:36:55 +0100345 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800346}
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800347
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200348static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
Johannes Berg214d14d2011-05-04 07:50:44 -0700349{
350 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
351
352 dma_addr_t addr = get_unaligned_le32(&tb->lo);
353 if (sizeof(dma_addr_t) > sizeof(u32))
354 addr |=
355 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
356
357 return addr;
358}
359
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200360static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
361 dma_addr_t addr, u16 len)
Johannes Berg214d14d2011-05-04 07:50:44 -0700362{
363 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
364 u16 hi_n_len = len << 4;
365
366 put_unaligned_le32(addr, &tb->lo);
367 if (sizeof(dma_addr_t) > sizeof(u32))
368 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
369
370 tb->hi_n_len = cpu_to_le16(hi_n_len);
371
372 tfd->num_tbs = idx + 1;
373}
374
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200375static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
Johannes Berg214d14d2011-05-04 07:50:44 -0700376{
377 return tfd->num_tbs & 0x1f;
378}
379
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200380static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
Johannes Berg98891752013-02-26 11:28:19 +0100381 struct iwl_cmd_meta *meta,
382 struct iwl_tfd *tfd)
Johannes Berg214d14d2011-05-04 07:50:44 -0700383{
Johannes Berg214d14d2011-05-04 07:50:44 -0700384 int i;
385 int num_tbs;
386
Johannes Berg214d14d2011-05-04 07:50:44 -0700387 /* Sanity check on number of chunks */
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200388 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
Johannes Berg214d14d2011-05-04 07:50:44 -0700389
390 if (num_tbs >= IWL_NUM_OF_TBS) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700391 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
Johannes Berg214d14d2011-05-04 07:50:44 -0700392 /* @todo issue fatal error, it is quite serious situation */
393 return;
394 }
395
Sara Sharon8de437c2016-06-09 17:56:38 +0300396 /* first TB is never freed - it's the bidirectional DMA data */
Johannes Berg214d14d2011-05-04 07:50:44 -0700397
Johannes Berg206eea72015-04-17 16:38:31 +0200398 for (i = 1; i < num_tbs; i++) {
399 if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
400 dma_unmap_page(trans->dev,
401 iwl_pcie_tfd_tb_get_addr(tfd, i),
402 iwl_pcie_tfd_tb_get_len(tfd, i),
403 DMA_TO_DEVICE);
404 else
405 dma_unmap_single(trans->dev,
406 iwl_pcie_tfd_tb_get_addr(tfd, i),
407 iwl_pcie_tfd_tb_get_len(tfd, i),
408 DMA_TO_DEVICE);
409 }
Emmanuel Grumbachebed6332012-05-16 22:35:58 +0200410 tfd->num_tbs = 0;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700411}
412
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200413/*
414 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700415 * @trans - transport private data
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700416 * @txq - tx queue
Emmanuel Grumbachebed6332012-05-16 22:35:58 +0200417 * @dma_dir - the direction of the DMA mapping
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700418 *
419 * Does NOT advance any TFD circular buffer read/write indexes
420 * Does NOT free the TFD itself (which is within circular buffer)
421 */
Johannes Berg98891752013-02-26 11:28:19 +0100422static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700423{
424 struct iwl_tfd *tfd_tmp = txq->tfds;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700425
Johannes Berg83f32a42014-04-24 09:57:40 +0200426 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
427 * idx is bounded by n_window
428 */
Emmanuel Grumbachebed6332012-05-16 22:35:58 +0200429 int rd_ptr = txq->q.read_ptr;
430 int idx = get_cmd_index(&txq->q, rd_ptr);
431
Johannes Berg015c15e2012-03-05 11:24:24 -0800432 lockdep_assert_held(&txq->lock);
433
Johannes Berg83f32a42014-04-24 09:57:40 +0200434 /* We have only q->n_window txq->entries, but we use
435 * TFD_QUEUE_SIZE_MAX tfds
436 */
Johannes Berg98891752013-02-26 11:28:19 +0100437 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
Johannes Berg214d14d2011-05-04 07:50:44 -0700438
439 /* free SKB */
Johannes Bergbf8440e2012-03-19 17:12:06 +0100440 if (txq->entries) {
Johannes Berg214d14d2011-05-04 07:50:44 -0700441 struct sk_buff *skb;
442
Emmanuel Grumbachebed6332012-05-16 22:35:58 +0200443 skb = txq->entries[idx].skb;
Johannes Berg214d14d2011-05-04 07:50:44 -0700444
Emmanuel Grumbach909e9b22011-09-15 11:46:30 -0700445 /* Can be called from irqs-disabled context
446 * If skb is not NULL, it means that the whole queue is being
447 * freed and that the queue is not empty - free the skb
448 */
Johannes Berg214d14d2011-05-04 07:50:44 -0700449 if (skb) {
Emmanuel Grumbached277c92012-02-09 16:08:15 +0200450 iwl_op_mode_free_skb(trans->op_mode, skb);
Emmanuel Grumbachebed6332012-05-16 22:35:58 +0200451 txq->entries[idx].skb = NULL;
Johannes Berg214d14d2011-05-04 07:50:44 -0700452 }
453 }
454}
455
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200456static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
Johannes Berg6d6e68f2014-04-23 19:00:56 +0200457 dma_addr_t addr, u16 len, bool reset)
Johannes Berg214d14d2011-05-04 07:50:44 -0700458{
459 struct iwl_queue *q;
460 struct iwl_tfd *tfd, *tfd_tmp;
461 u32 num_tbs;
462
463 q = &txq->q;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700464 tfd_tmp = txq->tfds;
Johannes Berg214d14d2011-05-04 07:50:44 -0700465 tfd = &tfd_tmp[q->write_ptr];
466
467 if (reset)
468 memset(tfd, 0, sizeof(*tfd));
469
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200470 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
Johannes Berg214d14d2011-05-04 07:50:44 -0700471
472 /* Each TFD can point to a maximum 20 Tx buffers */
473 if (num_tbs >= IWL_NUM_OF_TBS) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700474 IWL_ERR(trans, "Error can not send more than %d chunks\n",
Johannes Berg20d3b642012-05-16 22:54:29 +0200475 IWL_NUM_OF_TBS);
Johannes Berg214d14d2011-05-04 07:50:44 -0700476 return -EINVAL;
477 }
478
Eliad Peller1092b9b2013-07-16 17:53:43 +0300479 if (WARN(addr & ~IWL_TX_DMA_MASK,
480 "Unaligned address = %llx\n", (unsigned long long)addr))
Johannes Berg214d14d2011-05-04 07:50:44 -0700481 return -EINVAL;
482
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200483 iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
Johannes Berg214d14d2011-05-04 07:50:44 -0700484
Johannes Berg206eea72015-04-17 16:38:31 +0200485 return num_tbs;
Johannes Berg214d14d2011-05-04 07:50:44 -0700486}
487
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200488static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
489 struct iwl_txq *txq, int slots_num,
490 u32 txq_id)
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800491{
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200492 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
493 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
Sara Sharon8de437c2016-06-09 17:56:38 +0300494 size_t tb0_buf_sz;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200495 int i;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800496
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200497 if (WARN_ON(txq->entries || txq->tfds))
498 return -EINVAL;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800499
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200500 setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
501 (unsigned long)txq);
502 txq->trans_pcie = trans_pcie;
503
504 txq->q.n_window = slots_num;
505
506 txq->entries = kcalloc(slots_num,
507 sizeof(struct iwl_pcie_txq_entry),
508 GFP_KERNEL);
509
510 if (!txq->entries)
511 goto error;
512
513 if (txq_id == trans_pcie->cmd_queue)
514 for (i = 0; i < slots_num; i++) {
515 txq->entries[i].cmd =
516 kmalloc(sizeof(struct iwl_device_cmd),
517 GFP_KERNEL);
518 if (!txq->entries[i].cmd)
519 goto error;
520 }
521
522 /* Circular buffer of transmit frame descriptors (TFDs),
523 * shared with device */
524 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
525 &txq->q.dma_addr, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +0000526 if (!txq->tfds)
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200527 goto error;
Johannes Berg38c0f3342013-02-27 13:18:50 +0100528
Sara Sharon8de437c2016-06-09 17:56:38 +0300529 BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs));
Johannes Berg38c0f3342013-02-27 13:18:50 +0100530
Sara Sharon8de437c2016-06-09 17:56:38 +0300531 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
Johannes Berg38c0f3342013-02-27 13:18:50 +0100532
Sara Sharon8de437c2016-06-09 17:56:38 +0300533 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
534 &txq->first_tb_dma,
Johannes Berg38c0f3342013-02-27 13:18:50 +0100535 GFP_KERNEL);
Sara Sharon8de437c2016-06-09 17:56:38 +0300536 if (!txq->first_tb_bufs)
Johannes Berg38c0f3342013-02-27 13:18:50 +0100537 goto err_free_tfds;
538
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200539 txq->q.id = txq_id;
540
541 return 0;
Johannes Berg38c0f3342013-02-27 13:18:50 +0100542err_free_tfds:
543 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200544error:
545 if (txq->entries && txq_id == trans_pcie->cmd_queue)
546 for (i = 0; i < slots_num; i++)
547 kfree(txq->entries[i].cmd);
548 kfree(txq->entries);
549 txq->entries = NULL;
550
551 return -ENOMEM;
552
553}
554
555static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
556 int slots_num, u32 txq_id)
557{
558 int ret;
559
Johannes Berg43aa6162014-02-27 14:24:36 +0100560 txq->need_update = false;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200561
562 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
563 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
564 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
565
566 /* Initialize queue's high/low-water marks, and head/tail indexes */
Johannes Berg83f32a42014-04-24 09:57:40 +0200567 ret = iwl_queue_init(&txq->q, slots_num, txq_id);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200568 if (ret)
569 return ret;
570
571 spin_lock_init(&txq->lock);
Emmanuel Grumbach39555252016-01-14 09:39:21 +0200572 __skb_queue_head_init(&txq->overflow_q);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200573
574 /*
575 * Tell nic where to find circular buffer of Tx Frame Descriptors for
576 * given Tx queue, and enable the DMA channel used for that queue.
577 * Circular buffer (TFD queue in DRAM) physical base address */
578 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
579 txq->q.dma_addr >> 8);
580
581 return 0;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800582}
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800583
Johannes Berg21cb3222016-06-21 13:11:48 +0200584static void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
585 struct sk_buff *skb)
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +0300586{
Johannes Berg21cb3222016-06-21 13:11:48 +0200587 struct page **page_ptr;
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +0300588
Johannes Berg21cb3222016-06-21 13:11:48 +0200589 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +0300590
Johannes Berg21cb3222016-06-21 13:11:48 +0200591 if (*page_ptr) {
592 __free_page(*page_ptr);
593 *page_ptr = NULL;
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +0300594 }
595}
596
Sara Sharon01d11cd2016-03-09 17:38:47 +0200597static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
598{
599 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
600
601 lockdep_assert_held(&trans_pcie->reg_lock);
602
603 if (trans_pcie->ref_cmd_in_flight) {
604 trans_pcie->ref_cmd_in_flight = false;
605 IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
Luca Coelhoc24c7f52016-03-30 20:59:27 +0300606 iwl_trans_unref(trans);
Sara Sharon01d11cd2016-03-09 17:38:47 +0200607 }
608
609 if (!trans->cfg->base_params->apmg_wake_up_wa)
610 return;
611 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
612 return;
613
614 trans_pcie->cmd_hold_nic_awake = false;
615 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
616 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
617}
618
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +0200619/*
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200620 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800621 */
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200622static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800623{
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200624 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
625 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
626 struct iwl_queue *q = &txq->q;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800627
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200628 spin_lock_bh(&txq->lock);
629 while (q->write_ptr != q->read_ptr) {
Emmanuel Grumbachb9676132013-06-13 11:45:59 +0300630 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
631 txq_id, q->read_ptr);
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +0300632
633 if (txq_id != trans_pcie->cmd_queue) {
634 struct sk_buff *skb = txq->entries[q->read_ptr].skb;
635
636 if (WARN_ON_ONCE(!skb))
637 continue;
638
Johannes Berg21cb3222016-06-21 13:11:48 +0200639 iwl_pcie_free_tso_page(trans_pcie, skb);
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +0300640 }
Johannes Berg98891752013-02-26 11:28:19 +0100641 iwl_pcie_txq_free_tfd(trans, txq);
Johannes Berg83f32a42014-04-24 09:57:40 +0200642 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
Sara Sharon01d11cd2016-03-09 17:38:47 +0200643
644 if (q->read_ptr == q->write_ptr) {
645 unsigned long flags;
646
647 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
648 if (txq_id != trans_pcie->cmd_queue) {
649 IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
650 q->id);
Luca Coelhoc24c7f52016-03-30 20:59:27 +0300651 iwl_trans_unref(trans);
Sara Sharon01d11cd2016-03-09 17:38:47 +0200652 } else {
653 iwl_pcie_clear_cmd_in_flight(trans);
654 }
655 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
656 }
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200657 }
Emmanuel Grumbachb9676132013-06-13 11:45:59 +0300658 txq->active = false;
Emmanuel Grumbach39555252016-01-14 09:39:21 +0200659
660 while (!skb_queue_empty(&txq->overflow_q)) {
661 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
662
663 iwl_op_mode_free_skb(trans->op_mode, skb);
664 }
665
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200666 spin_unlock_bh(&txq->lock);
Emmanuel Grumbach8a487b12013-06-13 13:10:00 +0300667
668 /* just in case - this queue may have been stopped */
669 iwl_wake_queue(trans, txq);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200670}
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800671
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200672/*
673 * iwl_pcie_txq_free - Deallocate DMA queue.
674 * @txq: Transmit queue to deallocate.
675 *
676 * Empty queue by removing and destroying all BD's.
677 * Free all buffers.
678 * 0-fill, but do not free "txq" descriptor structure.
679 */
680static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
681{
682 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
683 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
684 struct device *dev = trans->dev;
685 int i;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800686
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200687 if (WARN_ON(!txq))
688 return;
689
690 iwl_pcie_txq_unmap(trans, txq_id);
691
692 /* De-alloc array of command/tx buffers */
693 if (txq_id == trans_pcie->cmd_queue)
694 for (i = 0; i < txq->q.n_window; i++) {
Johannes Berg5d4185a2014-09-09 21:16:06 +0200695 kzfree(txq->entries[i].cmd);
696 kzfree(txq->entries[i].free_buf);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200697 }
698
699 /* De-alloc circular buffer of TFDs */
Johannes Berg83f32a42014-04-24 09:57:40 +0200700 if (txq->tfds) {
701 dma_free_coherent(dev,
702 sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
703 txq->tfds, txq->q.dma_addr);
Johannes Bergd21fa2d2013-01-08 00:25:21 +0100704 txq->q.dma_addr = 0;
Johannes Berg83f32a42014-04-24 09:57:40 +0200705 txq->tfds = NULL;
Johannes Berg38c0f3342013-02-27 13:18:50 +0100706
707 dma_free_coherent(dev,
Sara Sharon8de437c2016-06-09 17:56:38 +0300708 sizeof(*txq->first_tb_bufs) * txq->q.n_window,
709 txq->first_tb_bufs, txq->first_tb_dma);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200710 }
711
712 kfree(txq->entries);
713 txq->entries = NULL;
714
715 del_timer_sync(&txq->stuck_timer);
716
717 /* 0-fill queue descriptor structure */
718 memset(txq, 0, sizeof(*txq));
719}
720
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200721void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
722{
723 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Johannes Berg22dc3c92013-01-09 00:47:07 +0100724 int nq = trans->cfg->base_params->num_of_queues;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200725 int chan;
726 u32 reg_val;
Johannes Berg22dc3c92013-01-09 00:47:07 +0100727 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
728 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200729
730 /* make sure all queue are not stopped/used */
731 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
732 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
733
734 trans_pcie->scd_base_addr =
735 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
736
737 WARN_ON(scd_base_addr != 0 &&
738 scd_base_addr != trans_pcie->scd_base_addr);
739
Johannes Berg22dc3c92013-01-09 00:47:07 +0100740 /* reset context data, TX status and translation data */
741 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
742 SCD_CONTEXT_MEM_LOWER_BOUND,
743 NULL, clear_dwords);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200744
745 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
746 trans_pcie->scd_bc_tbls.dma >> 10);
747
748 /* The chain extension of the SCD doesn't work well. This feature is
749 * enabled by default by the HW, so we need to disable it manually.
750 */
Emmanuel Grumbache03bbb62014-04-13 10:49:16 +0300751 if (trans->cfg->base_params->scd_chain_ext_wa)
752 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200753
754 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +0200755 trans_pcie->cmd_fifo,
756 trans_pcie->cmd_q_wdg_timeout);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200757
758 /* Activate all Tx DMA/FIFO channels */
Avri Altman680073b2014-07-14 09:40:27 +0300759 iwl_scd_activate_fifos(trans);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200760
761 /* Enable DMA channel */
762 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
763 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
764 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
765 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
766
767 /* Update FH chicken bits */
768 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
769 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
770 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
771
772 /* Enable L1-Active */
Eran Harary3073d8c2013-12-29 14:09:59 +0200773 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
774 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
775 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200776}
777
Johannes Bergddaf5a52013-01-08 11:25:44 +0100778void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
779{
780 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
781 int txq_id;
782
783 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
784 txq_id++) {
785 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
786
787 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
788 txq->q.dma_addr >> 8);
789 iwl_pcie_txq_unmap(trans, txq_id);
790 txq->q.read_ptr = 0;
791 txq->q.write_ptr = 0;
792 }
793
794 /* Tell NIC where to find the "keep warm" buffer */
795 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
796 trans_pcie->kw.dma >> 4);
797
Emmanuel Grumbachcd8f4382015-01-29 21:34:00 +0200798 /*
799 * Send 0 as the scd_base_addr since the device may have be reset
800 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
801 * contain garbage.
802 */
803 iwl_pcie_tx_start(trans, 0);
Johannes Bergddaf5a52013-01-08 11:25:44 +0100804}
805
Emmanuel Grumbach36277232015-02-25 15:49:39 +0200806static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
807{
808 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
809 unsigned long flags;
810 int ch, ret;
811 u32 mask = 0;
812
813 spin_lock(&trans_pcie->irq_lock);
814
Emmanuel Grumbach23ba9342015-12-17 11:55:13 +0200815 if (!iwl_trans_grab_nic_access(trans, &flags))
Emmanuel Grumbach36277232015-02-25 15:49:39 +0200816 goto out;
817
818 /* Stop each Tx DMA channel */
819 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
820 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
821 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
822 }
823
824 /* Wait for DMA channels to be idle */
825 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
826 if (ret < 0)
827 IWL_ERR(trans,
828 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
829 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
830
831 iwl_trans_release_nic_access(trans, &flags);
832
833out:
834 spin_unlock(&trans_pcie->irq_lock);
835}
836
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200837/*
838 * iwl_pcie_tx_stop - Stop all Tx DMA channels
839 */
840int iwl_pcie_tx_stop(struct iwl_trans *trans)
841{
842 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach36277232015-02-25 15:49:39 +0200843 int txq_id;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200844
845 /* Turn off all Tx DMA fifos */
Avri Altman680073b2014-07-14 09:40:27 +0300846 iwl_scd_deactivate_fifos(trans);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200847
Emmanuel Grumbach36277232015-02-25 15:49:39 +0200848 /* Turn off all Tx DMA channels */
849 iwl_pcie_tx_stop_fh(trans);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200850
Emmanuel Grumbachfba1c622013-12-19 22:19:17 +0200851 /*
852 * This function can be called before the op_mode disabled the
853 * queues. This happens when we have an rfkill interrupt.
854 * Since we stop Tx altogether - mark the queues as stopped.
855 */
856 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
857 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
858
859 /* This can happen: start_hw, stop_device */
860 if (!trans_pcie->txq)
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200861 return 0;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200862
863 /* Unmap DMA from host system and free skb's */
864 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
865 txq_id++)
866 iwl_pcie_txq_unmap(trans, txq_id);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800867
868 return 0;
869}
870
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200871/*
872 * iwl_trans_tx_free - Free TXQ Context
873 *
874 * Destroy all TX DMA queues and structures
875 */
876void iwl_pcie_tx_free(struct iwl_trans *trans)
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300877{
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200878 int txq_id;
879 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300880
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200881 /* Tx queues */
882 if (trans_pcie->txq) {
883 for (txq_id = 0;
884 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
885 iwl_pcie_txq_free(trans, txq_id);
886 }
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300887
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200888 kfree(trans_pcie->txq);
889 trans_pcie->txq = NULL;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300890
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200891 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300892
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200893 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +0300894}
895
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200896/*
897 * iwl_pcie_tx_alloc - allocate TX context
898 * Allocate all Tx DMA structures and initialize them
899 */
900static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
901{
902 int ret;
903 int txq_id, slots_num;
904 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
905
906 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
907 sizeof(struct iwlagn_scd_bc_tbl);
908
909 /*It is not allowed to alloc twice, so warn when this happens.
910 * We cannot rely on the previous allocation, so free and fail */
911 if (WARN_ON(trans_pcie->txq)) {
912 ret = -EINVAL;
913 goto error;
914 }
915
916 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
917 scd_bc_tbls_size);
918 if (ret) {
919 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
920 goto error;
921 }
922
923 /* Alloc keep-warm buffer */
924 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
925 if (ret) {
926 IWL_ERR(trans, "Keep Warm allocation failed\n");
927 goto error;
928 }
929
930 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
931 sizeof(struct iwl_txq), GFP_KERNEL);
932 if (!trans_pcie->txq) {
933 IWL_ERR(trans, "Not enough memory for txq\n");
Dan Carpenter2ab9ba02013-08-11 02:03:21 +0300934 ret = -ENOMEM;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200935 goto error;
936 }
937
938 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
939 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
940 txq_id++) {
941 slots_num = (txq_id == trans_pcie->cmd_queue) ?
942 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
943 ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
944 slots_num, txq_id);
945 if (ret) {
946 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
947 goto error;
948 }
949 }
950
951 return 0;
952
953error:
954 iwl_pcie_tx_free(trans);
955
956 return ret;
957}
958int iwl_pcie_tx_init(struct iwl_trans *trans)
959{
960 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
961 int ret;
962 int txq_id, slots_num;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200963 bool alloc = false;
964
965 if (!trans_pcie->txq) {
966 ret = iwl_pcie_tx_alloc(trans);
967 if (ret)
968 goto error;
969 alloc = true;
970 }
971
Emmanuel Grumbach7b70bd62013-12-11 10:22:28 +0200972 spin_lock(&trans_pcie->irq_lock);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200973
974 /* Turn off all Tx DMA fifos */
Avri Altman680073b2014-07-14 09:40:27 +0300975 iwl_scd_deactivate_fifos(trans);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200976
977 /* Tell NIC where to find the "keep warm" buffer */
978 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
979 trans_pcie->kw.dma >> 4);
980
Emmanuel Grumbach7b70bd62013-12-11 10:22:28 +0200981 spin_unlock(&trans_pcie->irq_lock);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +0200982
983 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
984 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
985 txq_id++) {
986 slots_num = (txq_id == trans_pcie->cmd_queue) ?
987 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
988 ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
989 slots_num, txq_id);
990 if (ret) {
991 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
992 goto error;
993 }
994 }
995
Haim Dreyfuss94ce9e52015-06-14 11:17:07 +0300996 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
Emmanuel Grumbachcb6bb122015-01-25 10:36:31 +0200997 if (trans->cfg->base_params->num_of_queues > 20)
998 iwl_set_bits_prph(trans, SCD_GP_CTRL,
999 SCD_GP_CTRL_ENABLE_31_QUEUES);
1000
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001001 return 0;
1002error:
1003 /*Upon error, free only if we allocated something */
1004 if (alloc)
1005 iwl_pcie_tx_free(trans);
1006 return ret;
1007}
1008
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001009static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001010{
Emmanuel Grumbache0b8d4052015-01-20 17:02:40 +02001011 lockdep_assert_held(&txq->lock);
1012
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001013 if (!txq->wd_timeout)
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001014 return;
1015
1016 /*
Emmanuel Grumbache0b8d4052015-01-20 17:02:40 +02001017 * station is asleep and we send data - that must
1018 * be uAPSD or PS-Poll. Don't rearm the timer.
1019 */
1020 if (txq->frozen)
1021 return;
1022
1023 /*
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001024 * if empty delete timer, otherwise move timer forward
1025 * since we're making progress on this queue
1026 */
1027 if (txq->q.read_ptr == txq->q.write_ptr)
1028 del_timer(&txq->stuck_timer);
1029 else
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001030 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001031}
1032
1033/* Frees buffers until index _not_ inclusive */
Emmanuel Grumbachf6d497c2012-11-14 23:32:57 +02001034void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1035 struct sk_buff_head *skbs)
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001036{
1037 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1038 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
Johannes Berg83f32a42014-04-24 09:57:40 +02001039 int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001040 struct iwl_queue *q = &txq->q;
1041 int last_to_free;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001042
1043 /* This function is not meant to release cmd queue*/
1044 if (WARN_ON(txq_id == trans_pcie->cmd_queue))
Emmanuel Grumbachf6d497c2012-11-14 23:32:57 +02001045 return;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001046
Johannes Berg2bfb5092012-12-27 21:43:48 +01001047 spin_lock_bh(&txq->lock);
Emmanuel Grumbachf6d497c2012-11-14 23:32:57 +02001048
Emmanuel Grumbachb9676132013-06-13 11:45:59 +03001049 if (!txq->active) {
1050 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1051 txq_id, ssn);
1052 goto out;
1053 }
1054
Emmanuel Grumbachf6d497c2012-11-14 23:32:57 +02001055 if (txq->q.read_ptr == tfd_num)
1056 goto out;
1057
1058 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1059 txq_id, txq->q.read_ptr, tfd_num, ssn);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001060
1061 /*Since we free until index _not_ inclusive, the one before index is
1062 * the last we will free. This one must be used */
Johannes Berg83f32a42014-04-24 09:57:40 +02001063 last_to_free = iwl_queue_dec_wrap(tfd_num);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001064
Emmanuel Grumbach6ca6ebc2012-11-14 23:38:08 +02001065 if (!iwl_queue_used(q, last_to_free)) {
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001066 IWL_ERR(trans,
1067 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
Johannes Berg83f32a42014-04-24 09:57:40 +02001068 __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001069 q->write_ptr, q->read_ptr);
Emmanuel Grumbachf6d497c2012-11-14 23:32:57 +02001070 goto out;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001071 }
1072
1073 if (WARN_ON(!skb_queue_empty(skbs)))
Emmanuel Grumbachf6d497c2012-11-14 23:32:57 +02001074 goto out;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001075
1076 for (;
Emmanuel Grumbachf6d497c2012-11-14 23:32:57 +02001077 q->read_ptr != tfd_num;
Johannes Berg83f32a42014-04-24 09:57:40 +02001078 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +03001079 struct sk_buff *skb = txq->entries[txq->q.read_ptr].skb;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001080
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +03001081 if (WARN_ON_ONCE(!skb))
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001082 continue;
1083
Johannes Berg21cb3222016-06-21 13:11:48 +02001084 iwl_pcie_free_tso_page(trans_pcie, skb);
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +03001085
1086 __skb_queue_tail(skbs, skb);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001087
1088 txq->entries[txq->q.read_ptr].skb = NULL;
1089
1090 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
1091
Johannes Berg98891752013-02-26 11:28:19 +01001092 iwl_pcie_txq_free_tfd(trans, txq);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001093 }
1094
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001095 iwl_pcie_txq_progress(txq);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001096
Emmanuel Grumbach39555252016-01-14 09:39:21 +02001097 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
1098 test_bit(txq_id, trans_pcie->queue_stopped)) {
Emmanuel Grumbach685b3462016-02-23 11:34:17 +02001099 struct sk_buff_head overflow_skbs;
Emmanuel Grumbach39555252016-01-14 09:39:21 +02001100
Emmanuel Grumbach685b3462016-02-23 11:34:17 +02001101 __skb_queue_head_init(&overflow_skbs);
1102 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
Emmanuel Grumbach39555252016-01-14 09:39:21 +02001103
1104 /*
1105 * This is tricky: we are in reclaim path which is non
1106 * re-entrant, so noone will try to take the access the
1107 * txq data from that path. We stopped tx, so we can't
1108 * have tx as well. Bottom line, we can unlock and re-lock
1109 * later.
1110 */
1111 spin_unlock_bh(&txq->lock);
1112
Emmanuel Grumbach685b3462016-02-23 11:34:17 +02001113 while (!skb_queue_empty(&overflow_skbs)) {
1114 struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
Johannes Berg21cb3222016-06-21 13:11:48 +02001115 struct iwl_device_cmd *dev_cmd_ptr;
1116
1117 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1118 trans_pcie->dev_cmd_offs);
Emmanuel Grumbach39555252016-01-14 09:39:21 +02001119
1120 /*
1121 * Note that we can very well be overflowing again.
1122 * In that case, iwl_queue_space will be small again
1123 * and we won't wake mac80211's queue.
1124 */
Johannes Berg21cb3222016-06-21 13:11:48 +02001125 iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id);
Emmanuel Grumbach39555252016-01-14 09:39:21 +02001126 }
1127 spin_lock_bh(&txq->lock);
1128
1129 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
1130 iwl_wake_queue(trans, txq);
1131 }
Eliad Peller7616f332014-11-20 17:33:43 +02001132
1133 if (q->read_ptr == q->write_ptr) {
1134 IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
Luca Coelhoc24c7f52016-03-30 20:59:27 +03001135 iwl_trans_unref(trans);
Eliad Peller7616f332014-11-20 17:33:43 +02001136 }
1137
Emmanuel Grumbachf6d497c2012-11-14 23:32:57 +02001138out:
Johannes Berg2bfb5092012-12-27 21:43:48 +01001139 spin_unlock_bh(&txq->lock);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001140}
1141
Eliad Peller7616f332014-11-20 17:33:43 +02001142static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1143 const struct iwl_host_cmd *cmd)
Eliad Peller804d4c52014-11-20 14:36:26 +02001144{
1145 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1146 int ret;
1147
1148 lockdep_assert_held(&trans_pcie->reg_lock);
1149
Eliad Peller7616f332014-11-20 17:33:43 +02001150 if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
1151 !trans_pcie->ref_cmd_in_flight) {
1152 trans_pcie->ref_cmd_in_flight = true;
1153 IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
Luca Coelhoc24c7f52016-03-30 20:59:27 +03001154 iwl_trans_ref(trans);
Eliad Peller7616f332014-11-20 17:33:43 +02001155 }
1156
Eliad Peller804d4c52014-11-20 14:36:26 +02001157 /*
1158 * wake up the NIC to make sure that the firmware will see the host
1159 * command - we will let the NIC sleep once all the host commands
1160 * returned. This needs to be done only on NICs that have
1161 * apmg_wake_up_wa set.
1162 */
Ilan Peerfc8a3502015-05-13 14:34:07 +03001163 if (trans->cfg->base_params->apmg_wake_up_wa &&
1164 !trans_pcie->cmd_hold_nic_awake) {
Eliad Peller804d4c52014-11-20 14:36:26 +02001165 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1166 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
Eliad Peller804d4c52014-11-20 14:36:26 +02001167
1168 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1169 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1170 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1171 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1172 15000);
1173 if (ret < 0) {
1174 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1175 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
Eliad Peller804d4c52014-11-20 14:36:26 +02001176 IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
1177 return -EIO;
1178 }
Ilan Peerfc8a3502015-05-13 14:34:07 +03001179 trans_pcie->cmd_hold_nic_awake = true;
Eliad Peller804d4c52014-11-20 14:36:26 +02001180 }
1181
1182 return 0;
1183}
1184
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001185/*
1186 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
1187 *
1188 * When FW advances 'R' index, all entries between old and new 'R' index
1189 * need to be reclaimed. As result, some free space forms. If there is
1190 * enough free space (> low mark), wake the stack that feeds us.
1191 */
1192static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1193{
1194 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1195 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1196 struct iwl_queue *q = &txq->q;
Emmanuel Grumbachb9439492013-12-22 15:09:40 +02001197 unsigned long flags;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001198 int nfreed = 0;
1199
1200 lockdep_assert_held(&txq->lock);
1201
Johannes Berg83f32a42014-04-24 09:57:40 +02001202 if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001203 IWL_ERR(trans,
1204 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
Johannes Berg83f32a42014-04-24 09:57:40 +02001205 __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001206 q->write_ptr, q->read_ptr);
1207 return;
1208 }
1209
Johannes Berg83f32a42014-04-24 09:57:40 +02001210 for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
1211 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001212
1213 if (nfreed++ > 0) {
1214 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1215 idx, q->write_ptr, q->read_ptr);
Liad Kaufman4c9706d2014-04-27 16:46:09 +03001216 iwl_force_nmi(trans);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001217 }
1218 }
1219
Eliad Peller804d4c52014-11-20 14:36:26 +02001220 if (q->read_ptr == q->write_ptr) {
Emmanuel Grumbachb9439492013-12-22 15:09:40 +02001221 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
Eliad Peller804d4c52014-11-20 14:36:26 +02001222 iwl_pcie_clear_cmd_in_flight(trans);
Emmanuel Grumbachb9439492013-12-22 15:09:40 +02001223 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1224 }
1225
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001226 iwl_pcie_txq_progress(txq);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001227}
1228
1229static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
Emmanuel Grumbach1ce86582012-06-04 16:48:17 +03001230 u16 txq_id)
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001231{
Johannes Berg20d3b642012-05-16 22:54:29 +02001232 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001233 u32 tbl_dw_addr;
1234 u32 tbl_dw;
1235 u16 scd_q2ratid;
1236
1237 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1238
Emmanuel Grumbach105183b2011-08-25 23:11:02 -07001239 tbl_dw_addr = trans_pcie->scd_base_addr +
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001240 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
1241
Emmanuel Grumbach4fd442d2012-12-24 14:27:11 +02001242 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001243
1244 if (txq_id & 0x1)
1245 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1246 else
1247 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1248
Emmanuel Grumbach4fd442d2012-12-24 14:27:11 +02001249 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001250
1251 return 0;
1252}
1253
Emmanuel Grumbachbd5f6a32013-04-28 14:05:22 +03001254/* Receiver address (actually, Rx station's index into station table),
1255 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
1256#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
1257
Johannes Bergfea77952014-08-01 11:58:47 +02001258void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001259 const struct iwl_trans_txq_scd_cfg *cfg,
1260 unsigned int wdg_timeout)
Johannes Berg70a18c52012-03-05 11:24:44 -08001261{
Johannes Berg9eae88f2012-03-15 13:26:52 -07001262 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001263 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
Johannes Bergd4578ea2014-08-01 12:17:40 +02001264 int fifo = -1;
Emmanuel Grumbach4beaf6c2012-05-29 11:29:10 +03001265
Johannes Berg9eae88f2012-03-15 13:26:52 -07001266 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
1267 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001268
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001269 txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
1270
Johannes Bergd4578ea2014-08-01 12:17:40 +02001271 if (cfg) {
1272 fifo = cfg->fifo;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001273
Avri Altman002a9e22014-07-24 19:25:10 +03001274 /* Disable the scheduler prior configuring the cmd queue */
Emmanuel Grumbach3a736bc2014-09-10 11:16:41 +03001275 if (txq_id == trans_pcie->cmd_queue &&
1276 trans_pcie->scd_set_active)
Avri Altman002a9e22014-07-24 19:25:10 +03001277 iwl_scd_enable_set_active(trans, 0);
1278
Johannes Bergd4578ea2014-08-01 12:17:40 +02001279 /* Stop this Tx queue before configuring it */
1280 iwl_scd_txq_set_inactive(trans, txq_id);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001281
Johannes Bergd4578ea2014-08-01 12:17:40 +02001282 /* Set this queue as a chain-building queue unless it is CMD */
1283 if (txq_id != trans_pcie->cmd_queue)
1284 iwl_scd_txq_set_chain(trans, txq_id);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001285
Johannes Berg64ba8932014-08-01 13:33:46 +02001286 if (cfg->aggregate) {
Johannes Bergd4578ea2014-08-01 12:17:40 +02001287 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
Emmanuel Grumbach4beaf6c2012-05-29 11:29:10 +03001288
Johannes Bergd4578ea2014-08-01 12:17:40 +02001289 /* Map receiver-address / traffic-ID to this queue */
1290 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
Emmanuel Grumbachf4772522013-07-24 14:15:21 +03001291
Johannes Bergd4578ea2014-08-01 12:17:40 +02001292 /* enable aggregations for the queue */
1293 iwl_scd_txq_enable_agg(trans, txq_id);
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001294 txq->ampdu = true;
Johannes Bergd4578ea2014-08-01 12:17:40 +02001295 } else {
1296 /*
1297 * disable aggregations for the queue, this will also
1298 * make the ra_tid mapping configuration irrelevant
1299 * since it is now a non-AGG queue.
1300 */
1301 iwl_scd_txq_disable_agg(trans, txq_id);
1302
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001303 ssn = txq->q.read_ptr;
Johannes Bergd4578ea2014-08-01 12:17:40 +02001304 }
Emmanuel Grumbach4beaf6c2012-05-29 11:29:10 +03001305 }
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001306
1307 /* Place first TFD at index corresponding to start sequence number.
1308 * Assumes that ssn_idx is valid (!= 0xFFF) */
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001309 txq->q.read_ptr = (ssn & 0xff);
1310 txq->q.write_ptr = (ssn & 0xff);
Emmanuel Grumbach0294d9e2015-01-05 16:52:55 +02001311 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1312 (ssn & 0xff) | (txq_id << 8));
Emmanuel Grumbach1ce86582012-06-04 16:48:17 +03001313
Johannes Bergd4578ea2014-08-01 12:17:40 +02001314 if (cfg) {
1315 u8 frame_limit = cfg->frame_limit;
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001316
Johannes Bergd4578ea2014-08-01 12:17:40 +02001317 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1318
1319 /* Set up Tx window size and frame limit for this queue */
1320 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1321 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1322 iwl_trans_write_mem32(trans,
1323 trans_pcie->scd_base_addr +
Johannes Berg9eae88f2012-03-15 13:26:52 -07001324 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1325 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
Johannes Bergd4578ea2014-08-01 12:17:40 +02001326 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
Johannes Berg9eae88f2012-03-15 13:26:52 -07001327 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
Johannes Bergd4578ea2014-08-01 12:17:40 +02001328 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001329
Johannes Bergd4578ea2014-08-01 12:17:40 +02001330 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
1331 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
1332 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1333 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
1334 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
1335 SCD_QUEUE_STTS_REG_MSK);
Avri Altman002a9e22014-07-24 19:25:10 +03001336
1337 /* enable the scheduler for this queue (only) */
Emmanuel Grumbach3a736bc2014-09-10 11:16:41 +03001338 if (txq_id == trans_pcie->cmd_queue &&
1339 trans_pcie->scd_set_active)
Avri Altman002a9e22014-07-24 19:25:10 +03001340 iwl_scd_enable_set_active(trans, BIT(txq_id));
Emmanuel Grumbach0294d9e2015-01-05 16:52:55 +02001341
1342 IWL_DEBUG_TX_QUEUES(trans,
1343 "Activate queue %d on FIFO %d WrPtr: %d\n",
1344 txq_id, fifo, ssn & 0xff);
1345 } else {
1346 IWL_DEBUG_TX_QUEUES(trans,
1347 "Activate queue %d WrPtr: %d\n",
1348 txq_id, ssn & 0xff);
Johannes Bergd4578ea2014-08-01 12:17:40 +02001349 }
1350
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001351 txq->active = true;
Emmanuel Grumbach4beaf6c2012-05-29 11:29:10 +03001352}
1353
Liad Kaufman42db09c2016-05-02 14:01:14 +03001354void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
1355 bool shared_mode)
1356{
1357 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1358 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1359
1360 txq->ampdu = !shared_mode;
1361}
1362
Johannes Bergd4578ea2014-08-01 12:17:40 +02001363void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
1364 bool configure_scd)
Emmanuel Grumbach288712a2011-08-25 23:11:25 -07001365{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001366 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach986ea6c2012-09-30 16:25:43 +02001367 u32 stts_addr = trans_pcie->scd_base_addr +
1368 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1369 static const u32 zero_val[4] = {};
Emmanuel Grumbach288712a2011-08-25 23:11:25 -07001370
Emmanuel Grumbache0b8d4052015-01-20 17:02:40 +02001371 trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
1372 trans_pcie->txq[txq_id].frozen = false;
1373
Emmanuel Grumbachfba1c622013-12-19 22:19:17 +02001374 /*
1375 * Upon HW Rfkill - we stop the device, and then stop the queues
1376 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1377 * allow the op_mode to call txq_disable after it already called
1378 * stop_device.
1379 */
Johannes Berg9eae88f2012-03-15 13:26:52 -07001380 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
Emmanuel Grumbachfba1c622013-12-19 22:19:17 +02001381 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1382 "queue %d not used", txq_id);
Johannes Berg9eae88f2012-03-15 13:26:52 -07001383 return;
Emmanuel Grumbachbc237732011-11-21 13:25:31 +02001384 }
1385
Johannes Bergd4578ea2014-08-01 12:17:40 +02001386 if (configure_scd) {
1387 iwl_scd_txq_set_inactive(trans, txq_id);
Emmanuel Grumbachac928f82012-10-14 16:36:36 +02001388
Johannes Bergd4578ea2014-08-01 12:17:40 +02001389 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
1390 ARRAY_SIZE(zero_val));
1391 }
Emmanuel Grumbach986ea6c2012-09-30 16:25:43 +02001392
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001393 iwl_pcie_txq_unmap(trans, txq_id);
Johannes Berg68972c42013-06-11 19:05:27 +02001394 trans_pcie->txq[txq_id].ampdu = false;
Emmanuel Grumbach6c3fd3f2012-10-18 12:38:37 +02001395
Emmanuel Grumbach1ce86582012-06-04 16:48:17 +03001396 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
Emmanuel Grumbach48d42c42011-07-10 10:47:01 +03001397}
1398
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001399/*************** HOST COMMAND QUEUE FUNCTIONS *****/
1400
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001401/*
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001402 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001403 * @priv: device private data point
Eliad Pellere89044d2013-07-16 17:33:26 +03001404 * @cmd: a pointer to the ucode command structure
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001405 *
Eliad Pellere89044d2013-07-16 17:33:26 +03001406 * The function returns < 0 values to indicate the operation
1407 * failed. On success, it returns the index (>= 0) of command in the
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001408 * command queue.
1409 */
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001410static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1411 struct iwl_host_cmd *cmd)
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001412{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001413 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001414 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001415 struct iwl_queue *q = &txq->q;
Johannes Bergc2acea82009-07-24 11:13:05 -07001416 struct iwl_device_cmd *out_cmd;
1417 struct iwl_cmd_meta *out_meta;
Emmanuel Grumbachb9439492013-12-22 15:09:40 +02001418 unsigned long flags;
Johannes Bergf4feb8a2012-10-19 14:24:43 +02001419 void *dup_buf = NULL;
Tomas Winklerf3674222008-08-04 16:00:44 +08001420 dma_addr_t phys_addr;
Johannes Bergf4feb8a2012-10-19 14:24:43 +02001421 int idx;
Sara Sharon8de437c2016-06-09 17:56:38 +03001422 u16 copy_size, cmd_size, tb0_size;
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001423 bool had_nocopy = false;
Aviya Erenfeldab021652015-06-09 16:45:52 +03001424 u8 group_id = iwl_cmd_groupid(cmd->id);
Emmanuel Grumbachb9439492013-12-22 15:09:40 +02001425 int i, ret;
Emmanuel Grumbach96791422012-07-24 01:58:32 +03001426 u32 cmd_pos;
Johannes Berg1afbfb62013-02-26 11:32:26 +01001427 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1428 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001429
Johannes Berg88742c92015-06-30 15:31:22 +02001430 if (WARN(!trans_pcie->wide_cmd_header &&
1431 group_id > IWL_ALWAYS_LONG_GROUP,
Aviya Erenfeldab021652015-06-09 16:45:52 +03001432 "unsupported wide command %#x\n", cmd->id))
1433 return -EINVAL;
1434
1435 if (group_id != 0) {
1436 copy_size = sizeof(struct iwl_cmd_header_wide);
1437 cmd_size = sizeof(struct iwl_cmd_header_wide);
1438 } else {
1439 copy_size = sizeof(struct iwl_cmd_header);
1440 cmd_size = sizeof(struct iwl_cmd_header);
1441 }
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001442
1443 /* need one for the header if the first is NOCOPY */
Johannes Berg1afbfb62013-02-26 11:32:26 +01001444 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001445
Johannes Berg1afbfb62013-02-26 11:32:26 +01001446 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
Johannes Berg8a964f42013-02-25 16:01:34 +01001447 cmddata[i] = cmd->data[i];
1448 cmdlen[i] = cmd->len[i];
1449
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001450 if (!cmd->len[i])
1451 continue;
Johannes Berg8a964f42013-02-25 16:01:34 +01001452
Sara Sharon8de437c2016-06-09 17:56:38 +03001453 /* need at least IWL_FIRST_TB_SIZE copied */
1454 if (copy_size < IWL_FIRST_TB_SIZE) {
1455 int copy = IWL_FIRST_TB_SIZE - copy_size;
Johannes Berg8a964f42013-02-25 16:01:34 +01001456
1457 if (copy > cmdlen[i])
1458 copy = cmdlen[i];
1459 cmdlen[i] -= copy;
1460 cmddata[i] += copy;
1461 copy_size += copy;
1462 }
1463
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001464 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1465 had_nocopy = true;
Johannes Bergf4feb8a2012-10-19 14:24:43 +02001466 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1467 idx = -EINVAL;
1468 goto free_dup_buf;
1469 }
1470 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1471 /*
1472 * This is also a chunk that isn't copied
1473 * to the static buffer so set had_nocopy.
1474 */
1475 had_nocopy = true;
1476
1477 /* only allowed once */
1478 if (WARN_ON(dup_buf)) {
1479 idx = -EINVAL;
1480 goto free_dup_buf;
1481 }
1482
Johannes Berg8a964f42013-02-25 16:01:34 +01001483 dup_buf = kmemdup(cmddata[i], cmdlen[i],
Johannes Bergf4feb8a2012-10-19 14:24:43 +02001484 GFP_ATOMIC);
1485 if (!dup_buf)
1486 return -ENOMEM;
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001487 } else {
1488 /* NOCOPY must not be followed by normal! */
Johannes Bergf4feb8a2012-10-19 14:24:43 +02001489 if (WARN_ON(had_nocopy)) {
1490 idx = -EINVAL;
1491 goto free_dup_buf;
1492 }
Johannes Berg8a964f42013-02-25 16:01:34 +01001493 copy_size += cmdlen[i];
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001494 }
1495 cmd_size += cmd->len[i];
1496 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001497
Johannes Berg3e41ace2011-04-18 09:12:37 -07001498 /*
1499 * If any of the command structures end up being larger than
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001500 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
1501 * allocated into separate TFDs, then we will need to
1502 * increase the size of the buffers.
Johannes Berg3e41ace2011-04-18 09:12:37 -07001503 */
Johannes Berg2a79e452012-09-26 13:32:13 +02001504 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
1505 "Command %s (%#x) is too large (%d bytes)\n",
Sharon Dvir39bdb172015-10-15 18:18:09 +03001506 iwl_get_cmd_string(trans, cmd->id),
1507 cmd->id, copy_size)) {
Johannes Bergf4feb8a2012-10-19 14:24:43 +02001508 idx = -EINVAL;
1509 goto free_dup_buf;
1510 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001511
Johannes Berg015c15e2012-03-05 11:24:24 -08001512 spin_lock_bh(&txq->lock);
Stanislaw Gruszka3598e172011-03-31 17:36:26 +02001513
Johannes Bergc2acea82009-07-24 11:13:05 -07001514 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
Johannes Berg015c15e2012-03-05 11:24:24 -08001515 spin_unlock_bh(&txq->lock);
Stanislaw Gruszka3598e172011-03-31 17:36:26 +02001516
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001517 IWL_ERR(trans, "No space in command queue\n");
Johannes Berg0e781842012-03-06 13:30:49 -08001518 iwl_op_mode_cmd_queue_full(trans->op_mode);
Johannes Bergf4feb8a2012-10-19 14:24:43 +02001519 idx = -ENOSPC;
1520 goto free_dup_buf;
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001521 }
1522
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001523 idx = get_cmd_index(q, q->write_ptr);
Johannes Bergbf8440e2012-03-19 17:12:06 +01001524 out_cmd = txq->entries[idx].cmd;
1525 out_meta = &txq->entries[idx].meta;
Johannes Bergc2acea82009-07-24 11:13:05 -07001526
Daniel C Halperin8ce73f32009-07-31 14:28:06 -07001527 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
Johannes Bergc2acea82009-07-24 11:13:05 -07001528 if (cmd->flags & CMD_WANT_SKB)
1529 out_meta->source = cmd;
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001530
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001531 /* set up the header */
Aviya Erenfeldab021652015-06-09 16:45:52 +03001532 if (group_id != 0) {
1533 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1534 out_cmd->hdr_wide.group_id = group_id;
1535 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1536 out_cmd->hdr_wide.length =
1537 cpu_to_le16(cmd_size -
1538 sizeof(struct iwl_cmd_header_wide));
1539 out_cmd->hdr_wide.reserved = 0;
1540 out_cmd->hdr_wide.sequence =
1541 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1542 INDEX_TO_SEQ(q->write_ptr));
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001543
Aviya Erenfeldab021652015-06-09 16:45:52 +03001544 cmd_pos = sizeof(struct iwl_cmd_header_wide);
1545 copy_size = sizeof(struct iwl_cmd_header_wide);
1546 } else {
1547 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
1548 out_cmd->hdr.sequence =
1549 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1550 INDEX_TO_SEQ(q->write_ptr));
1551 out_cmd->hdr.group_id = 0;
1552
1553 cmd_pos = sizeof(struct iwl_cmd_header);
1554 copy_size = sizeof(struct iwl_cmd_header);
1555 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001556
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001557 /* and copy the data that needs to be copied */
Johannes Berg1afbfb62013-02-26 11:32:26 +01001558 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
Johannes Berg4d075002014-04-24 10:41:31 +02001559 int copy;
Johannes Berg8a964f42013-02-25 16:01:34 +01001560
Emmanuel Grumbachcc904c72013-03-14 08:35:06 +02001561 if (!cmd->len[i])
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001562 continue;
Johannes Berg8a964f42013-02-25 16:01:34 +01001563
Johannes Berg4d075002014-04-24 10:41:31 +02001564 /* copy everything if not nocopy/dup */
1565 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1566 IWL_HCMD_DFL_DUP))) {
1567 copy = cmd->len[i];
1568
1569 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1570 cmd_pos += copy;
1571 copy_size += copy;
1572 continue;
1573 }
1574
1575 /*
Sara Sharon8de437c2016-06-09 17:56:38 +03001576 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
1577 * in total (for bi-directional DMA), but copy up to what
Johannes Berg4d075002014-04-24 10:41:31 +02001578 * we can fit into the payload for debug dump purposes.
1579 */
1580 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1581
1582 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1583 cmd_pos += copy;
1584
1585 /* However, treat copy_size the proper way, we need it below */
Sara Sharon8de437c2016-06-09 17:56:38 +03001586 if (copy_size < IWL_FIRST_TB_SIZE) {
1587 copy = IWL_FIRST_TB_SIZE - copy_size;
Johannes Berg8a964f42013-02-25 16:01:34 +01001588
1589 if (copy > cmd->len[i])
1590 copy = cmd->len[i];
Johannes Berg8a964f42013-02-25 16:01:34 +01001591 copy_size += copy;
1592 }
Emmanuel Grumbach96791422012-07-24 01:58:32 +03001593 }
1594
Johannes Bergd9fb6462012-03-26 08:23:39 -07001595 IWL_DEBUG_HC(trans,
Aviya Erenfeldab021652015-06-09 16:45:52 +03001596 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
Sharon Dvir39bdb172015-10-15 18:18:09 +03001597 iwl_get_cmd_string(trans, cmd->id),
Aviya Erenfeldab021652015-06-09 16:45:52 +03001598 group_id, out_cmd->hdr.cmd,
1599 le16_to_cpu(out_cmd->hdr.sequence),
Johannes Berg20d3b642012-05-16 22:54:29 +02001600 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001601
Sara Sharon8de437c2016-06-09 17:56:38 +03001602 /* start the TFD with the minimum copy bytes */
1603 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
1604 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
Johannes Berg38c0f3342013-02-27 13:18:50 +01001605 iwl_pcie_txq_build_tfd(trans, txq,
Sara Sharon8de437c2016-06-09 17:56:38 +03001606 iwl_pcie_get_first_tb_dma(txq, idx),
1607 tb0_size, true);
Johannes Berg8a964f42013-02-25 16:01:34 +01001608
Johannes Berg38c0f3342013-02-27 13:18:50 +01001609 /* map first command fragment, if any remains */
Sara Sharon8de437c2016-06-09 17:56:38 +03001610 if (copy_size > tb0_size) {
Johannes Berg38c0f3342013-02-27 13:18:50 +01001611 phys_addr = dma_map_single(trans->dev,
Sara Sharon8de437c2016-06-09 17:56:38 +03001612 ((u8 *)&out_cmd->hdr) + tb0_size,
1613 copy_size - tb0_size,
Johannes Berg38c0f3342013-02-27 13:18:50 +01001614 DMA_TO_DEVICE);
1615 if (dma_mapping_error(trans->dev, phys_addr)) {
1616 iwl_pcie_tfd_unmap(trans, out_meta,
1617 &txq->tfds[q->write_ptr]);
1618 idx = -ENOMEM;
1619 goto out;
1620 }
1621
1622 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
Sara Sharon8de437c2016-06-09 17:56:38 +03001623 copy_size - tb0_size, false);
Johannes Berg2c46f722011-04-28 07:27:10 -07001624 }
1625
Johannes Berg8a964f42013-02-25 16:01:34 +01001626 /* map the remaining (adjusted) nocopy/dup fragments */
Johannes Berg1afbfb62013-02-26 11:32:26 +01001627 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
Johannes Berg8a964f42013-02-25 16:01:34 +01001628 const void *data = cmddata[i];
Johannes Bergf4feb8a2012-10-19 14:24:43 +02001629
Johannes Berg8a964f42013-02-25 16:01:34 +01001630 if (!cmdlen[i])
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001631 continue;
Johannes Bergf4feb8a2012-10-19 14:24:43 +02001632 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1633 IWL_HCMD_DFL_DUP)))
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001634 continue;
Johannes Bergf4feb8a2012-10-19 14:24:43 +02001635 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1636 data = dup_buf;
1637 phys_addr = dma_map_single(trans->dev, (void *)data,
Johannes Berg98891752013-02-26 11:28:19 +01001638 cmdlen[i], DMA_TO_DEVICE);
Emmanuel Grumbach1042db22012-01-03 16:56:15 +02001639 if (dma_mapping_error(trans->dev, phys_addr)) {
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001640 iwl_pcie_tfd_unmap(trans, out_meta,
Johannes Berg98891752013-02-26 11:28:19 +01001641 &txq->tfds[q->write_ptr]);
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001642 idx = -ENOMEM;
1643 goto out;
1644 }
1645
Johannes Berg6d6e68f2014-04-23 19:00:56 +02001646 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001647 }
Reinette Chatredf833b12009-04-21 10:55:48 -07001648
Johannes Berg206eea72015-04-17 16:38:31 +02001649 BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
1650 sizeof(out_meta->flags) * BITS_PER_BYTE);
Emmanuel Grumbachafaf6b52011-07-08 08:46:09 -07001651 out_meta->flags = cmd->flags;
Johannes Bergf4feb8a2012-10-19 14:24:43 +02001652 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
Johannes Berg5d4185a2014-09-09 21:16:06 +02001653 kzfree(txq->entries[idx].free_buf);
Johannes Bergf4feb8a2012-10-19 14:24:43 +02001654 txq->entries[idx].free_buf = dup_buf;
Johannes Berg2c46f722011-04-28 07:27:10 -07001655
Aviya Erenfeldab021652015-06-09 16:45:52 +03001656 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
Reinette Chatredf833b12009-04-21 10:55:48 -07001657
Johannes Berg7c5ba4a2012-04-09 17:46:54 -07001658 /* start timer if queue currently empty */
Emmanuel Grumbach4cf677f2015-01-12 14:38:29 +02001659 if (q->read_ptr == q->write_ptr && txq->wd_timeout)
1660 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
Johannes Berg7c5ba4a2012-04-09 17:46:54 -07001661
Emmanuel Grumbachb9439492013-12-22 15:09:40 +02001662 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
Eliad Peller7616f332014-11-20 17:33:43 +02001663 ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
Eliad Peller804d4c52014-11-20 14:36:26 +02001664 if (ret < 0) {
1665 idx = ret;
1666 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1667 goto out;
Emmanuel Grumbachb9439492013-12-22 15:09:40 +02001668 }
1669
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001670 /* Increment and update queue's write index */
Johannes Berg83f32a42014-04-24 09:57:40 +02001671 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001672 iwl_pcie_txq_inc_wr_ptr(trans, txq);
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001673
Emmanuel Grumbachb9439492013-12-22 15:09:40 +02001674 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1675
Johannes Berg2c46f722011-04-28 07:27:10 -07001676 out:
Johannes Berg015c15e2012-03-05 11:24:24 -08001677 spin_unlock_bh(&txq->lock);
Johannes Bergf4feb8a2012-10-19 14:24:43 +02001678 free_dup_buf:
1679 if (idx < 0)
1680 kfree(dup_buf);
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -08001681 return idx;
Tomas Winklerfd4abac2008-05-15 13:54:07 +08001682}
1683
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001684/*
1685 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
Tomas Winkler17b88922008-05-29 16:35:12 +08001686 * @rxb: Rx buffer to reclaim
Tomas Winkler17b88922008-05-29 16:35:12 +08001687 */
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001688void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
Johannes Bergf7e64692015-06-23 21:58:17 +02001689 struct iwl_rx_cmd_buffer *rxb)
Tomas Winkler17b88922008-05-29 16:35:12 +08001690{
Zhu Yi2f301222009-10-09 17:19:45 +08001691 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Tomas Winkler17b88922008-05-29 16:35:12 +08001692 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
Sharon Dvir39bdb172015-10-15 18:18:09 +03001693 u8 group_id = iwl_cmd_groupid(pkt->hdr.group_id);
1694 u32 cmd_id;
Tomas Winkler17b88922008-05-29 16:35:12 +08001695 int txq_id = SEQ_TO_QUEUE(sequence);
1696 int index = SEQ_TO_INDEX(sequence);
Tomas Winkler17b88922008-05-29 16:35:12 +08001697 int cmd_index;
Johannes Bergc2acea82009-07-24 11:13:05 -07001698 struct iwl_device_cmd *cmd;
1699 struct iwl_cmd_meta *meta;
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001700 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach990aa6d2012-11-14 12:39:52 +02001701 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
Tomas Winkler17b88922008-05-29 16:35:12 +08001702
1703 /* If a Tx command is being handled and it isn't in the actual
1704 * command queue then there a command routing bug has been introduced
1705 * in the queue management code. */
Meenakshi Venkataramanc6f600f2012-03-08 11:29:12 -08001706 if (WARN(txq_id != trans_pcie->cmd_queue,
Johannes Berg13bb9482010-08-23 10:46:33 +02001707 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
Johannes Berg20d3b642012-05-16 22:54:29 +02001708 txq_id, trans_pcie->cmd_queue, sequence,
1709 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
1710 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -07001711 iwl_print_hex_error(trans, pkt, 32);
Johannes Berg55d6a3c2008-09-23 19:18:43 +02001712 return;
Winkler, Tomas01ef93232008-11-07 09:58:45 -08001713 }
Tomas Winkler17b88922008-05-29 16:35:12 +08001714
Johannes Berg2bfb5092012-12-27 21:43:48 +01001715 spin_lock_bh(&txq->lock);
Johannes Berg015c15e2012-03-05 11:24:24 -08001716
Johannes Berg4ce7cc22011-05-13 11:57:40 -07001717 cmd_index = get_cmd_index(&txq->q, index);
Johannes Bergbf8440e2012-03-19 17:12:06 +01001718 cmd = txq->entries[cmd_index].cmd;
1719 meta = &txq->entries[cmd_index].meta;
Sharon Dvir39bdb172015-10-15 18:18:09 +03001720 cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
Tomas Winkler17b88922008-05-29 16:35:12 +08001721
Johannes Berg98891752013-02-26 11:28:19 +01001722 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
Reinette Chatrec33de622009-10-30 14:36:10 -07001723
Tomas Winkler17b88922008-05-29 16:35:12 +08001724 /* Input error checking is done when commands are added to queue. */
Johannes Bergc2acea82009-07-24 11:13:05 -07001725 if (meta->flags & CMD_WANT_SKB) {
Johannes Berg48a2d662012-03-05 11:24:39 -08001726 struct page *p = rxb_steal_page(rxb);
Stanislaw Gruszka2624e962011-04-20 16:02:58 +02001727
Johannes Berg65b94a42012-03-05 11:24:38 -08001728 meta->source->resp_pkt = pkt;
1729 meta->source->_rx_page_addr = (unsigned long)page_address(p);
Johannes Bergb2cf4102012-04-09 17:46:51 -07001730 meta->source->_rx_page_order = trans_pcie->rx_page_order;
Stanislaw Gruszka2624e962011-04-20 16:02:58 +02001731 }
Tomas Winkler17b88922008-05-29 16:35:12 +08001732
Emmanuel Grumbachdcbb4742015-11-24 15:17:37 +02001733 if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
1734 iwl_op_mode_async_cb(trans->op_mode, cmd);
1735
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001736 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
Tomas Winkler17b88922008-05-29 16:35:12 +08001737
Johannes Bergc2acea82009-07-24 11:13:05 -07001738 if (!(meta->flags & CMD_ASYNC)) {
Arik Nemtsoveb7ff772013-12-01 12:30:38 +02001739 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
Wey-Yi Guy05c89b92011-10-10 07:26:48 -07001740 IWL_WARN(trans,
1741 "HCMD_ACTIVE already clear for command %s\n",
Sharon Dvir39bdb172015-10-15 18:18:09 +03001742 iwl_get_cmd_string(trans, cmd_id));
Wey-Yi Guy05c89b92011-10-10 07:26:48 -07001743 }
Arik Nemtsoveb7ff772013-12-01 12:30:38 +02001744 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001745 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
Sharon Dvir39bdb172015-10-15 18:18:09 +03001746 iwl_get_cmd_string(trans, cmd_id));
Emmanuel Grumbachf946b522012-10-25 17:25:52 +02001747 wake_up(&trans_pcie->wait_command_queue);
Tomas Winkler17b88922008-05-29 16:35:12 +08001748 }
Stanislaw Gruszka3598e172011-03-31 17:36:26 +02001749
Luciano Coelho4cbb8e502015-08-18 16:02:38 +03001750 if (meta->flags & CMD_MAKE_TRANS_IDLE) {
1751 IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n",
1752 iwl_get_cmd_string(trans, cmd->hdr.cmd));
1753 set_bit(STATUS_TRANS_IDLE, &trans->status);
1754 wake_up(&trans_pcie->d0i3_waitq);
1755 }
1756
1757 if (meta->flags & CMD_WAKE_UP_TRANS) {
1758 IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n",
1759 iwl_get_cmd_string(trans, cmd->hdr.cmd));
1760 clear_bit(STATUS_TRANS_IDLE, &trans->status);
1761 wake_up(&trans_pcie->d0i3_waitq);
1762 }
1763
Zhu Yidd487442010-03-22 02:28:41 -07001764 meta->flags = 0;
Stanislaw Gruszka3598e172011-03-31 17:36:26 +02001765
Johannes Berg2bfb5092012-12-27 21:43:48 +01001766 spin_unlock_bh(&txq->lock);
Tomas Winkler17b88922008-05-29 16:35:12 +08001767}
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001768
Johannes Berg9439eac2013-10-09 09:59:25 +02001769#define HOST_COMPLETE_TIMEOUT (2 * HZ)
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001770
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001771static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
1772 struct iwl_host_cmd *cmd)
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001773{
1774 int ret;
1775
1776 /* An asynchronous command can not expect an SKB to be set. */
1777 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1778 return -EINVAL;
1779
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001780 ret = iwl_pcie_enqueue_hcmd(trans, cmd);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001781 if (ret < 0) {
Johannes Berg721c32f2012-03-06 13:30:40 -08001782 IWL_ERR(trans,
Todd Previteb36b1102011-11-10 06:55:02 -08001783 "Error sending %s: enqueue_hcmd failed: %d\n",
Sharon Dvir39bdb172015-10-15 18:18:09 +03001784 iwl_get_cmd_string(trans, cmd->id), ret);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001785 return ret;
1786 }
1787 return 0;
1788}
1789
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001790static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1791 struct iwl_host_cmd *cmd)
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001792{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001793 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001794 int cmd_idx;
1795 int ret;
1796
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001797 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
Sharon Dvir39bdb172015-10-15 18:18:09 +03001798 iwl_get_cmd_string(trans, cmd->id));
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001799
Arik Nemtsoveb7ff772013-12-01 12:30:38 +02001800 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1801 &trans->status),
Johannes Bergbcbb8c92013-10-28 15:50:55 +01001802 "Command %s: a command is already active!\n",
Sharon Dvir39bdb172015-10-15 18:18:09 +03001803 iwl_get_cmd_string(trans, cmd->id)))
Johannes Berg2cc39c92012-03-06 13:30:41 -08001804 return -EIO;
Johannes Berg2cc39c92012-03-06 13:30:41 -08001805
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001806 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
Sharon Dvir39bdb172015-10-15 18:18:09 +03001807 iwl_get_cmd_string(trans, cmd->id));
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001808
Luca Coelho71b12302016-03-11 12:12:16 +02001809 if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
1810 ret = wait_event_timeout(trans_pcie->d0i3_waitq,
1811 pm_runtime_active(&trans_pcie->pci_dev->dev),
1812 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
1813 if (!ret) {
1814 IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
1815 return -ETIMEDOUT;
1816 }
1817 }
1818
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001819 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001820 if (cmd_idx < 0) {
1821 ret = cmd_idx;
Arik Nemtsoveb7ff772013-12-01 12:30:38 +02001822 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
Johannes Berg721c32f2012-03-06 13:30:40 -08001823 IWL_ERR(trans,
Todd Previteb36b1102011-11-10 06:55:02 -08001824 "Error sending %s: enqueue_hcmd failed: %d\n",
Sharon Dvir39bdb172015-10-15 18:18:09 +03001825 iwl_get_cmd_string(trans, cmd->id), ret);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001826 return ret;
1827 }
1828
Emmanuel Grumbachb9439492013-12-22 15:09:40 +02001829 ret = wait_event_timeout(trans_pcie->wait_command_queue,
1830 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1831 &trans->status),
1832 HOST_COMPLETE_TIMEOUT);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001833 if (!ret) {
Johannes Berg6dde8c42013-10-31 18:30:38 +01001834 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1835 struct iwl_queue *q = &txq->q;
Wey-Yi Guyd10630a2011-10-10 07:26:46 -07001836
Johannes Berg6dde8c42013-10-31 18:30:38 +01001837 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
Sharon Dvir39bdb172015-10-15 18:18:09 +03001838 iwl_get_cmd_string(trans, cmd->id),
Johannes Berg6dde8c42013-10-31 18:30:38 +01001839 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001840
Johannes Berg6dde8c42013-10-31 18:30:38 +01001841 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1842 q->read_ptr, q->write_ptr);
Wey-Yi Guyd10630a2011-10-10 07:26:46 -07001843
Arik Nemtsoveb7ff772013-12-01 12:30:38 +02001844 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
Johannes Berg6dde8c42013-10-31 18:30:38 +01001845 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
Sharon Dvir39bdb172015-10-15 18:18:09 +03001846 iwl_get_cmd_string(trans, cmd->id));
Johannes Berg6dde8c42013-10-31 18:30:38 +01001847 ret = -ETIMEDOUT;
Emmanuel Grumbach42550a52013-09-11 14:16:20 +03001848
Liad Kaufman4c9706d2014-04-27 16:46:09 +03001849 iwl_force_nmi(trans);
Arik Nemtsov2a988e92013-12-01 13:50:40 +02001850 iwl_trans_fw_error(trans);
Emmanuel Grumbach42550a52013-09-11 14:16:20 +03001851
Johannes Berg6dde8c42013-10-31 18:30:38 +01001852 goto cancel;
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001853 }
1854
Arik Nemtsoveb7ff772013-12-01 12:30:38 +02001855 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
Johannes Bergd18aa872012-11-06 16:36:21 +01001856 IWL_ERR(trans, "FW error in SYNC CMD %s\n",
Sharon Dvir39bdb172015-10-15 18:18:09 +03001857 iwl_get_cmd_string(trans, cmd->id));
Johannes Bergb656fa32013-05-03 11:56:17 +02001858 dump_stack();
Johannes Bergd18aa872012-11-06 16:36:21 +01001859 ret = -EIO;
1860 goto cancel;
1861 }
1862
Eran Harary1094fa22013-06-02 12:40:34 +03001863 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
Arik Nemtsoveb7ff772013-12-01 12:30:38 +02001864 test_bit(STATUS_RFKILL, &trans->status)) {
Emmanuel Grumbachf946b522012-10-25 17:25:52 +02001865 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1866 ret = -ERFKILL;
1867 goto cancel;
1868 }
1869
Johannes Berg65b94a42012-03-05 11:24:38 -08001870 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001871 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
Sharon Dvir39bdb172015-10-15 18:18:09 +03001872 iwl_get_cmd_string(trans, cmd->id));
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001873 ret = -EIO;
1874 goto cancel;
1875 }
1876
1877 return 0;
1878
1879cancel:
1880 if (cmd->flags & CMD_WANT_SKB) {
1881 /*
1882 * Cancel the CMD_WANT_SKB flag for the cmd in the
1883 * TX cmd queue. Otherwise in case the cmd comes
1884 * in later, it will possibly set an invalid
1885 * address (cmd->meta.source).
1886 */
Johannes Bergbf8440e2012-03-19 17:12:06 +01001887 trans_pcie->txq[trans_pcie->cmd_queue].
1888 entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001889 }
Emmanuel Grumbach9cac4942011-11-10 06:55:20 -08001890
Johannes Berg65b94a42012-03-05 11:24:38 -08001891 if (cmd->resp_pkt) {
1892 iwl_free_resp(cmd);
1893 cmd->resp_pkt = NULL;
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001894 }
1895
1896 return ret;
1897}
1898
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001899int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001900{
Eran Harary4f593342013-05-13 07:53:26 +03001901 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
Arik Nemtsoveb7ff772013-12-01 12:30:38 +02001902 test_bit(STATUS_RFKILL, &trans->status)) {
Emmanuel Grumbach754d7d92013-03-13 22:16:20 +02001903 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1904 cmd->id);
Emmanuel Grumbachf946b522012-10-25 17:25:52 +02001905 return -ERFKILL;
Emmanuel Grumbach754d7d92013-03-13 22:16:20 +02001906 }
Emmanuel Grumbachf946b522012-10-25 17:25:52 +02001907
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001908 if (cmd->flags & CMD_ASYNC)
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001909 return iwl_pcie_send_hcmd_async(trans, cmd);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001910
Emmanuel Grumbachf946b522012-10-25 17:25:52 +02001911 /* We still can fail on RFKILL that can be asserted while we wait */
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02001912 return iwl_pcie_send_hcmd_sync(trans, cmd);
Emmanuel Grumbach253a6342011-07-11 07:39:46 -07001913}
1914
Emmanuel Grumbach3a0b2a42015-10-14 22:10:50 +03001915static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
1916 struct iwl_txq *txq, u8 hdr_len,
1917 struct iwl_cmd_meta *out_meta,
1918 struct iwl_device_cmd *dev_cmd, u16 tb1_len)
1919{
1920 struct iwl_queue *q = &txq->q;
1921 u16 tb2_len;
1922 int i;
1923
1924 /*
1925 * Set up TFD's third entry to point directly to remainder
1926 * of skb's head, if any
1927 */
1928 tb2_len = skb_headlen(skb) - hdr_len;
1929
1930 if (tb2_len > 0) {
1931 dma_addr_t tb2_phys = dma_map_single(trans->dev,
1932 skb->data + hdr_len,
1933 tb2_len, DMA_TO_DEVICE);
1934 if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
1935 iwl_pcie_tfd_unmap(trans, out_meta,
1936 &txq->tfds[q->write_ptr]);
1937 return -EINVAL;
1938 }
1939 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
1940 }
1941
1942 /* set up the remaining entries to point to the data */
1943 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1944 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1945 dma_addr_t tb_phys;
1946 int tb_idx;
1947
1948 if (!skb_frag_size(frag))
1949 continue;
1950
1951 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
1952 skb_frag_size(frag), DMA_TO_DEVICE);
1953
1954 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
1955 iwl_pcie_tfd_unmap(trans, out_meta,
1956 &txq->tfds[q->write_ptr]);
1957 return -EINVAL;
1958 }
1959 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
1960 skb_frag_size(frag), false);
1961
1962 out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
1963 }
1964
1965 trace_iwlwifi_dev_tx(trans->dev, skb,
1966 &txq->tfds[txq->q.write_ptr],
1967 sizeof(struct iwl_tfd),
Sara Sharon8de437c2016-06-09 17:56:38 +03001968 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
Emmanuel Grumbach3a0b2a42015-10-14 22:10:50 +03001969 skb->data + hdr_len, tb2_len);
1970 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1971 hdr_len, skb->len - hdr_len);
1972 return 0;
1973}
1974
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +03001975#ifdef CONFIG_INET
1976static struct iwl_tso_hdr_page *
1977get_page_hdr(struct iwl_trans *trans, size_t len)
1978{
1979 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1980 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
1981
1982 if (!p->page)
1983 goto alloc;
1984
1985 /* enough room on this page */
1986 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
1987 return p;
1988
1989 /* We don't have enough room on this page, get a new one. */
1990 __free_page(p->page);
1991
1992alloc:
1993 p->page = alloc_page(GFP_ATOMIC);
1994 if (!p->page)
1995 return NULL;
1996 p->pos = page_address(p->page);
1997 return p;
1998}
1999
2000static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
2001 bool ipv6, unsigned int len)
2002{
2003 if (ipv6) {
2004 struct ipv6hdr *iphv6 = iph;
2005
2006 tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr,
2007 len + tcph->doff * 4,
2008 IPPROTO_TCP, 0);
2009 } else {
2010 struct iphdr *iphv4 = iph;
2011
2012 ip_send_check(iphv4);
2013 tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr,
2014 len + tcph->doff * 4,
2015 IPPROTO_TCP, 0);
2016 }
2017}
2018
2019static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2020 struct iwl_txq *txq, u8 hdr_len,
2021 struct iwl_cmd_meta *out_meta,
2022 struct iwl_device_cmd *dev_cmd, u16 tb1_len)
2023{
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +03002024 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
2025 struct ieee80211_hdr *hdr = (void *)skb->data;
2026 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
2027 unsigned int mss = skb_shinfo(skb)->gso_size;
2028 struct iwl_queue *q = &txq->q;
2029 u16 length, iv_len, amsdu_pad;
2030 u8 *start_hdr;
2031 struct iwl_tso_hdr_page *hdr_page;
Johannes Berg21cb3222016-06-21 13:11:48 +02002032 struct page **page_ptr;
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +03002033 int ret;
2034 struct tso_t tso;
2035
2036 /* if the packet is protected, then it must be CCMP or GCMP */
2037 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
2038 iv_len = ieee80211_has_protected(hdr->frame_control) ?
2039 IEEE80211_CCMP_HDR_LEN : 0;
2040
2041 trace_iwlwifi_dev_tx(trans->dev, skb,
2042 &txq->tfds[txq->q.write_ptr],
2043 sizeof(struct iwl_tfd),
Sara Sharon8de437c2016-06-09 17:56:38 +03002044 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +03002045 NULL, 0);
2046
2047 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
2048 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
2049 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
2050 amsdu_pad = 0;
2051
2052 /* total amount of header we may need for this A-MSDU */
2053 hdr_room = DIV_ROUND_UP(total_len, mss) *
2054 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
2055
2056 /* Our device supports 9 segments at most, it will fit in 1 page */
2057 hdr_page = get_page_hdr(trans, hdr_room);
2058 if (!hdr_page)
2059 return -ENOMEM;
2060
2061 get_page(hdr_page->page);
2062 start_hdr = hdr_page->pos;
Johannes Berg21cb3222016-06-21 13:11:48 +02002063 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
2064 *page_ptr = hdr_page->page;
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +03002065 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
2066 hdr_page->pos += iv_len;
2067
2068 /*
2069 * Pull the ieee80211 header + IV to be able to use TSO core,
2070 * we will restore it for the tx_status flow.
2071 */
2072 skb_pull(skb, hdr_len + iv_len);
2073
2074 tso_start(skb, &tso);
2075
2076 while (total_len) {
2077 /* this is the data left for this subframe */
2078 unsigned int data_left =
2079 min_t(unsigned int, mss, total_len);
2080 struct sk_buff *csum_skb = NULL;
2081 unsigned int hdr_tb_len;
2082 dma_addr_t hdr_tb_phys;
2083 struct tcphdr *tcph;
2084 u8 *iph;
2085
2086 total_len -= data_left;
2087
2088 memset(hdr_page->pos, 0, amsdu_pad);
2089 hdr_page->pos += amsdu_pad;
2090 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
2091 data_left)) & 0x3;
2092 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
2093 hdr_page->pos += ETH_ALEN;
2094 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
2095 hdr_page->pos += ETH_ALEN;
2096
2097 length = snap_ip_tcp_hdrlen + data_left;
2098 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
2099 hdr_page->pos += sizeof(length);
2100
2101 /*
2102 * This will copy the SNAP as well which will be considered
2103 * as MAC header.
2104 */
2105 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
2106 iph = hdr_page->pos + 8;
2107 tcph = (void *)(iph + ip_hdrlen);
2108
2109 /* For testing on current hardware only */
2110 if (trans_pcie->sw_csum_tx) {
2111 csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
2112 GFP_ATOMIC);
2113 if (!csum_skb) {
2114 ret = -ENOMEM;
2115 goto out_unmap;
2116 }
2117
2118 iwl_compute_pseudo_hdr_csum(iph, tcph,
2119 skb->protocol ==
2120 htons(ETH_P_IPV6),
2121 data_left);
2122
2123 memcpy(skb_put(csum_skb, tcp_hdrlen(skb)),
2124 tcph, tcp_hdrlen(skb));
2125 skb_set_transport_header(csum_skb, 0);
2126 csum_skb->csum_start =
2127 (unsigned char *)tcp_hdr(csum_skb) -
2128 csum_skb->head;
2129 }
2130
2131 hdr_page->pos += snap_ip_tcp_hdrlen;
2132
2133 hdr_tb_len = hdr_page->pos - start_hdr;
2134 hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
2135 hdr_tb_len, DMA_TO_DEVICE);
2136 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
2137 dev_kfree_skb(csum_skb);
2138 ret = -EINVAL;
2139 goto out_unmap;
2140 }
2141 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
2142 hdr_tb_len, false);
2143 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
2144 hdr_tb_len);
2145
2146 /* prepare the start_hdr for the next subframe */
2147 start_hdr = hdr_page->pos;
2148
2149 /* put the payload */
2150 while (data_left) {
2151 unsigned int size = min_t(unsigned int, tso.size,
2152 data_left);
2153 dma_addr_t tb_phys;
2154
2155 if (trans_pcie->sw_csum_tx)
2156 memcpy(skb_put(csum_skb, size), tso.data, size);
2157
2158 tb_phys = dma_map_single(trans->dev, tso.data,
2159 size, DMA_TO_DEVICE);
2160 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
2161 dev_kfree_skb(csum_skb);
2162 ret = -EINVAL;
2163 goto out_unmap;
2164 }
2165
2166 iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
2167 size, false);
2168 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
2169 size);
2170
2171 data_left -= size;
2172 tso_build_data(skb, &tso, size);
2173 }
2174
2175 /* For testing on early hardware only */
2176 if (trans_pcie->sw_csum_tx) {
2177 __wsum csum;
2178
2179 csum = skb_checksum(csum_skb,
2180 skb_checksum_start_offset(csum_skb),
2181 csum_skb->len -
2182 skb_checksum_start_offset(csum_skb),
2183 0);
2184 dev_kfree_skb(csum_skb);
2185 dma_sync_single_for_cpu(trans->dev, hdr_tb_phys,
2186 hdr_tb_len, DMA_TO_DEVICE);
2187 tcph->check = csum_fold(csum);
2188 dma_sync_single_for_device(trans->dev, hdr_tb_phys,
2189 hdr_tb_len, DMA_TO_DEVICE);
2190 }
2191 }
2192
2193 /* re -add the WiFi header and IV */
2194 skb_push(skb, hdr_len + iv_len);
2195
2196 return 0;
2197
2198out_unmap:
2199 iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr]);
2200 return ret;
2201}
2202#else /* CONFIG_INET */
2203static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2204 struct iwl_txq *txq, u8 hdr_len,
2205 struct iwl_cmd_meta *out_meta,
2206 struct iwl_device_cmd *dev_cmd, u16 tb1_len)
2207{
2208 /* No A-MSDU without CONFIG_INET */
2209 WARN_ON(1);
2210
2211 return -1;
2212}
2213#endif /* CONFIG_INET */
2214
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002215int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2216 struct iwl_device_cmd *dev_cmd, int txq_id)
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07002217{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07002218 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Johannes Berg206eea72015-04-17 16:38:31 +02002219 struct ieee80211_hdr *hdr;
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002220 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
2221 struct iwl_cmd_meta *out_meta;
2222 struct iwl_txq *txq;
2223 struct iwl_queue *q;
Johannes Berg38c0f3342013-02-27 13:18:50 +01002224 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
2225 void *tb1_addr;
Emmanuel Grumbach3a0b2a42015-10-14 22:10:50 +03002226 u16 len, tb1_len;
Johannes Bergea68f462014-02-27 14:36:55 +01002227 bool wait_write_ptr;
Johannes Berg206eea72015-04-17 16:38:31 +02002228 __le16 fc;
2229 u8 hdr_len;
Johannes Berg68972c42013-06-11 19:05:27 +02002230 u16 wifi_seq;
Sara Sharonc772a3d32016-03-13 17:19:38 +02002231 bool amsdu;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07002232
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002233 txq = &trans_pcie->txq[txq_id];
2234 q = &txq->q;
Emmanuel Grumbach39644e92011-09-15 11:46:29 -07002235
Johannes Berg961de6a2013-07-04 18:00:08 +02002236 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
2237 "TX on unused queue %d\n", txq_id))
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002238 return -EINVAL;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07002239
Emmanuel Grumbach41837ca92015-10-21 09:00:07 +03002240 if (unlikely(trans_pcie->sw_csum_tx &&
2241 skb->ip_summed == CHECKSUM_PARTIAL)) {
2242 int offs = skb_checksum_start_offset(skb);
2243 int csum_offs = offs + skb->csum_offset;
2244 __wsum csum;
2245
2246 if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16)))
2247 return -1;
2248
2249 csum = skb_checksum(skb, offs, skb->len - offs, 0);
2250 *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
Emmanuel Grumbach39555252016-01-14 09:39:21 +02002251
2252 skb->ip_summed = CHECKSUM_UNNECESSARY;
Emmanuel Grumbach41837ca92015-10-21 09:00:07 +03002253 }
2254
Johannes Berg206eea72015-04-17 16:38:31 +02002255 if (skb_is_nonlinear(skb) &&
2256 skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
2257 __skb_linearize(skb))
2258 return -ENOMEM;
2259
2260 /* mac80211 always puts the full header into the SKB's head,
2261 * so there's no need to check if it's readable there
2262 */
2263 hdr = (struct ieee80211_hdr *)skb->data;
2264 fc = hdr->frame_control;
2265 hdr_len = ieee80211_hdrlen(fc);
2266
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002267 spin_lock(&txq->lock);
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07002268
Emmanuel Grumbach39555252016-01-14 09:39:21 +02002269 if (iwl_queue_space(q) < q->high_mark) {
2270 iwl_stop_queue(trans, txq);
2271
2272 /* don't put the packet on the ring, if there is no room */
2273 if (unlikely(iwl_queue_space(q) < 3)) {
Johannes Berg21cb3222016-06-21 13:11:48 +02002274 struct iwl_device_cmd **dev_cmd_ptr;
Emmanuel Grumbach39555252016-01-14 09:39:21 +02002275
Johannes Berg21cb3222016-06-21 13:11:48 +02002276 dev_cmd_ptr = (void *)((u8 *)skb->cb +
2277 trans_pcie->dev_cmd_offs);
2278
2279 *dev_cmd_ptr = dev_cmd;
Emmanuel Grumbach39555252016-01-14 09:39:21 +02002280 __skb_queue_tail(&txq->overflow_q, skb);
2281
2282 spin_unlock(&txq->lock);
2283 return 0;
2284 }
2285 }
2286
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002287 /* In AGG mode, the index in the ring must correspond to the WiFi
2288 * sequence number. This is a HW requirements to help the SCD to parse
2289 * the BA.
2290 * Check here that the packets are in the right place on the ring.
2291 */
Johannes Berg9a886582013-02-15 19:25:00 +01002292 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
Eliad Peller1092b9b2013-07-16 17:53:43 +03002293 WARN_ONCE(txq->ampdu &&
Johannes Berg68972c42013-06-11 19:05:27 +02002294 (wifi_seq & 0xff) != q->write_ptr,
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002295 "Q: %d WiFi Seq %d tfdNum %d",
2296 txq_id, wifi_seq, q->write_ptr);
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07002297
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002298 /* Set up driver data for this TFD */
2299 txq->entries[q->write_ptr].skb = skb;
2300 txq->entries[q->write_ptr].cmd = dev_cmd;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07002301
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002302 dev_cmd->hdr.sequence =
2303 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2304 INDEX_TO_SEQ(q->write_ptr)));
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07002305
Sara Sharon8de437c2016-06-09 17:56:38 +03002306 tb0_phys = iwl_pcie_get_first_tb_dma(txq, q->write_ptr);
Johannes Berg38c0f3342013-02-27 13:18:50 +01002307 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
2308 offsetof(struct iwl_tx_cmd, scratch);
2309
2310 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
2311 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
2312
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002313 /* Set up first empty entry in queue's array of Tx/cmd buffers */
2314 out_meta = &txq->entries[q->write_ptr].meta;
Johannes Berg206eea72015-04-17 16:38:31 +02002315 out_meta->flags = 0;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07002316
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002317 /*
Johannes Berg38c0f3342013-02-27 13:18:50 +01002318 * The second TB (tb1) points to the remainder of the TX command
2319 * and the 802.11 header - dword aligned size
2320 * (This calculation modifies the TX command, so do it before the
2321 * setup of the first TB)
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002322 */
Johannes Berg38c0f3342013-02-27 13:18:50 +01002323 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
Sara Sharon8de437c2016-06-09 17:56:38 +03002324 hdr_len - IWL_FIRST_TB_SIZE;
Sara Sharonc772a3d32016-03-13 17:19:38 +02002325 /* do not align A-MSDU to dword as the subframe header aligns it */
2326 amsdu = ieee80211_is_data_qos(fc) &&
2327 (*ieee80211_get_qos_ctl(hdr) &
2328 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
2329 if (trans_pcie->sw_csum_tx || !amsdu) {
2330 tb1_len = ALIGN(len, 4);
2331 /* Tell NIC about any 2-byte padding after MAC header */
2332 if (tb1_len != len)
2333 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2334 } else {
2335 tb1_len = len;
2336 }
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002337
Sara Sharon8de437c2016-06-09 17:56:38 +03002338 /* The first TB points to bi-directional DMA data */
2339 memcpy(&txq->first_tb_bufs[q->write_ptr], &dev_cmd->hdr,
2340 IWL_FIRST_TB_SIZE);
Johannes Berg38c0f3342013-02-27 13:18:50 +01002341 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
Sara Sharon8de437c2016-06-09 17:56:38 +03002342 IWL_FIRST_TB_SIZE, true);
Johannes Berg38c0f3342013-02-27 13:18:50 +01002343
2344 /* there must be data left over for TB1 or this code must be changed */
Sara Sharon8de437c2016-06-09 17:56:38 +03002345 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
Johannes Berg38c0f3342013-02-27 13:18:50 +01002346
2347 /* map the data for TB1 */
Sara Sharon8de437c2016-06-09 17:56:38 +03002348 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
Johannes Berg38c0f3342013-02-27 13:18:50 +01002349 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
2350 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002351 goto out_err;
Johannes Berg6d6e68f2014-04-23 19:00:56 +02002352 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
Johannes Berg38c0f3342013-02-27 13:18:50 +01002353
Sara Sharonc772a3d32016-03-13 17:19:38 +02002354 if (amsdu) {
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +03002355 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
2356 out_meta, dev_cmd,
2357 tb1_len)))
2358 goto out_err;
2359 } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
2360 out_meta, dev_cmd, tb1_len))) {
Emmanuel Grumbach3a0b2a42015-10-14 22:10:50 +03002361 goto out_err;
Emmanuel Grumbach6eb5e5292015-10-18 09:31:24 +03002362 }
Johannes Berg206eea72015-04-17 16:38:31 +02002363
Johannes Berg38c0f3342013-02-27 13:18:50 +01002364 /* Set up entry for this TFD in Tx byte-count array */
2365 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
2366
Johannes Bergea68f462014-02-27 14:36:55 +01002367 wait_write_ptr = ieee80211_has_morefrags(fc);
Johannes Berg7c5ba4a2012-04-09 17:46:54 -07002368
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002369 /* start timer if queue currently empty */
Eliad Peller7616f332014-11-20 17:33:43 +02002370 if (q->read_ptr == q->write_ptr) {
Emmanuel Grumbachaecdc632015-07-29 23:06:41 +03002371 if (txq->wd_timeout) {
2372 /*
2373 * If the TXQ is active, then set the timer, if not,
2374 * set the timer in remainder so that the timer will
2375 * be armed with the right value when the station will
2376 * wake up.
2377 */
2378 if (!txq->frozen)
2379 mod_timer(&txq->stuck_timer,
2380 jiffies + txq->wd_timeout);
2381 else
2382 txq->frozen_expiry_remainder = txq->wd_timeout;
2383 }
Eliad Peller7616f332014-11-20 17:33:43 +02002384 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
Luca Coelhoc24c7f52016-03-30 20:59:27 +03002385 iwl_trans_ref(trans);
Eliad Peller7616f332014-11-20 17:33:43 +02002386 }
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002387
2388 /* Tell device the write index *just past* this latest filled TFD */
Johannes Berg83f32a42014-04-24 09:57:40 +02002389 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
Johannes Bergea68f462014-02-27 14:36:55 +01002390 if (!wait_write_ptr)
2391 iwl_pcie_txq_inc_wr_ptr(trans, txq);
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002392
2393 /*
2394 * At this point the frame is "transmitted" successfully
Johannes Berg43aa6162014-02-27 14:24:36 +01002395 * and we will get a TX status notification eventually.
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002396 */
Emmanuel Grumbachf02831b2012-11-14 14:44:18 +02002397 spin_unlock(&txq->lock);
2398 return 0;
2399out_err:
2400 spin_unlock(&txq->lock);
2401 return -1;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07002402}