blob: 44b3d53e9c76008a2e5a4dcec2629c5b31bd4b72 [file] [log] [blame]
Wey-Yi Guyb305a082010-03-16 17:41:22 -07001/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-5000-hw.h"
40
41/**
42 * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
43 */
44void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
45 struct iwl_tx_queue *txq,
46 u16 byte_cnt)
47{
48 struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
49 int write_ptr = txq->q.write_ptr;
50 int txq_id = txq->q.id;
51 u8 sec_ctl = 0;
52 u8 sta_id = 0;
53 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
54 __le16 bc_ent;
55
56 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
57
58 if (txq_id != IWL_CMD_QUEUE_NUM) {
59 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
60 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
61
62 switch (sec_ctl & TX_CMD_SEC_MSK) {
63 case TX_CMD_SEC_CCM:
64 len += CCMP_MIC_LEN;
65 break;
66 case TX_CMD_SEC_TKIP:
67 len += TKIP_ICV_LEN;
68 break;
69 case TX_CMD_SEC_WEP:
70 len += WEP_IV_LEN + WEP_ICV_LEN;
71 break;
72 }
73 }
74
75 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
76
77 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
78
79 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
80 scd_bc_tbl[txq_id].
81 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
82}
83
84void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
85 struct iwl_tx_queue *txq)
86{
87 struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
88 int txq_id = txq->q.id;
89 int read_ptr = txq->q.read_ptr;
90 u8 sta_id = 0;
91 __le16 bc_ent;
92
93 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
94
95 if (txq_id != IWL_CMD_QUEUE_NUM)
96 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
97
98 bc_ent = cpu_to_le16(1 | (sta_id << 12));
99 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
100
101 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
102 scd_bc_tbl[txq_id].
103 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
104}
105
106static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
107 u16 txq_id)
108{
109 u32 tbl_dw_addr;
110 u32 tbl_dw;
111 u16 scd_q2ratid;
112
113 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
114
115 tbl_dw_addr = priv->scd_base_addr +
116 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
117
118 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
119
120 if (txq_id & 0x1)
121 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
122 else
123 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
124
125 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
126
127 return 0;
128}
129
130static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
131{
132 /* Simply stop the queue, but don't change any configuration;
133 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
134 iwl_write_prph(priv,
135 IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
136 (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
137 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
138}
139
140void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
141 int txq_id, u32 index)
142{
143 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
144 (index & 0xff) | (txq_id << 8));
145 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index);
146}
147
148void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
149 struct iwl_tx_queue *txq,
150 int tx_fifo_id, int scd_retry)
151{
152 int txq_id = txq->q.id;
153 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
154
155 iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
156 (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
157 (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) |
158 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) |
159 IWL50_SCD_QUEUE_STTS_REG_MSK);
160
161 txq->sched_retry = scd_retry;
162
163 IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
164 active ? "Activate" : "Deactivate",
165 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
166}
167
168int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
169 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
170{
171 unsigned long flags;
172 u16 ra_tid;
173
174 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
175 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
176 <= txq_id)) {
177 IWL_WARN(priv,
178 "queue number out of range: %d, must be %d to %d\n",
179 txq_id, IWL50_FIRST_AMPDU_QUEUE,
180 IWL50_FIRST_AMPDU_QUEUE +
181 priv->cfg->num_of_ampdu_queues - 1);
182 return -EINVAL;
183 }
184
185 ra_tid = BUILD_RAxTID(sta_id, tid);
186
187 /* Modify device's station table to Tx this TID */
188 iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
189
190 spin_lock_irqsave(&priv->lock, flags);
191
192 /* Stop this Tx queue before configuring it */
193 iwlagn_tx_queue_stop_scheduler(priv, txq_id);
194
195 /* Map receiver-address / traffic-ID to this queue */
196 iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
197
198 /* Set this queue as a chain-building queue */
199 iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id));
200
201 /* enable aggregations for the queue */
202 iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id));
203
204 /* Place first TFD at index corresponding to start sequence number.
205 * Assumes that ssn_idx is valid (!= 0xFFF) */
206 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
207 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
208 iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
209
210 /* Set up Tx window size and frame limit for this queue */
211 iwl_write_targ_mem(priv, priv->scd_base_addr +
212 IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
213 sizeof(u32),
214 ((SCD_WIN_SIZE <<
215 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
216 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
217 ((SCD_FRAME_LIMIT <<
218 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
219 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
220
221 iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
222
223 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
224 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
225
226 spin_unlock_irqrestore(&priv->lock, flags);
227
228 return 0;
229}
230
231int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
232 u16 ssn_idx, u8 tx_fifo)
233{
234 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
235 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
236 <= txq_id)) {
237 IWL_ERR(priv,
238 "queue number out of range: %d, must be %d to %d\n",
239 txq_id, IWL50_FIRST_AMPDU_QUEUE,
240 IWL50_FIRST_AMPDU_QUEUE +
241 priv->cfg->num_of_ampdu_queues - 1);
242 return -EINVAL;
243 }
244
245 iwlagn_tx_queue_stop_scheduler(priv, txq_id);
246
247 iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id));
248
249 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
250 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
251 /* supposes that ssn_idx is valid (!= 0xFFF) */
252 iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
253
254 iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
255 iwl_txq_ctx_deactivate(priv, txq_id);
256 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
257
258 return 0;
259}
260
261/*
262 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
263 * must be called under priv->lock and mac access
264 */
265void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
266{
267 iwl_write_prph(priv, IWL50_SCD_TXFACT, mask);
268}