blob: cfa6071704c591d3be1100b469a140302da95489 [file] [log] [blame]
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/gfp.h>
Ido Yariv95dac04f2011-06-06 14:57:06 +030025#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026
Shahar Levi00d20102010-11-08 11:20:10 +000027#include "wl12xx.h"
Luciano Coelho0f4e3122011-10-07 11:02:42 +030028#include "debug.h"
Shahar Levi00d20102010-11-08 11:20:10 +000029#include "acx.h"
30#include "reg.h"
31#include "rx.h"
Eliad Peller9eb599e2011-10-10 10:12:59 +020032#include "tx.h"
Shahar Levi00d20102010-11-08 11:20:10 +000033#include "io.h"
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030034
Eliad Peller4d56ad92011-08-14 13:17:05 +030035static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status,
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030036 u32 drv_rx_counter)
37{
Luciano Coelhod0f63b22009-10-15 10:33:29 +030038 return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
39 RX_MEM_BLOCK_MASK;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030040}
41
Eliad Peller4d56ad92011-08-14 13:17:05 +030042static u32 wl12xx_rx_get_buf_size(struct wl12xx_fw_status *status,
43 u32 drv_rx_counter)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030044{
Luciano Coelhod0f63b22009-10-15 10:33:29 +030045 return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
46 RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030047}
48
Eliad Peller4d56ad92011-08-14 13:17:05 +030049static bool wl12xx_rx_get_unaligned(struct wl12xx_fw_status *status,
Shahar Levi0a1d3ab2011-07-14 11:50:27 +030050 u32 drv_rx_counter)
51{
52 /* Convert the value to bool */
53 return !!(le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
54 RX_BUF_UNALIGNED_PAYLOAD);
55}
56
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030057static void wl1271_rx_status(struct wl1271 *wl,
58 struct wl1271_rx_descriptor *desc,
59 struct ieee80211_rx_status *status,
60 u8 beacon)
61{
62 memset(status, 0, sizeof(struct ieee80211_rx_status));
63
Teemu Paasikivi6a2de932010-10-14 11:00:04 +020064 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
Juuso Oikarinen0af04672011-03-10 10:01:43 +020065 status->band = IEEE80211_BAND_2GHZ;
Teemu Paasikivi6a2de932010-10-14 11:00:04 +020066 else
Juuso Oikarinen0af04672011-03-10 10:01:43 +020067 status->band = IEEE80211_BAND_5GHZ;
Teemu Paasikivi6a2de932010-10-14 11:00:04 +020068
Juuso Oikarinen0af04672011-03-10 10:01:43 +020069 status->rate_idx = wl1271_rate_to_idx(desc->rate, status->band);
Teemu Paasikivia4102642009-10-13 12:47:51 +030070
Shahar Levi18357852010-10-13 16:09:41 +020071 /* 11n support */
72 if (desc->rate <= CONF_HW_RXTX_RATE_MCS0)
73 status->flag |= RX_FLAG_HT;
Shahar Levi18357852010-10-13 16:09:41 +020074
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030075 status->signal = desc->rssi;
76
John W. Linvilleece550d2010-07-28 16:41:06 -040077 /*
78 * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
79 * need to divide by two for now, but TI has been discussing about
80 * changing it. This needs to be rechecked.
81 */
82 wl->noise = desc->rssi - (desc->snr >> 1);
83
Juuso Oikarinen0af04672011-03-10 10:01:43 +020084 status->freq = ieee80211_channel_to_frequency(desc->channel,
85 status->band);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030086
87 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
Arik Nemtsov34c8e3d2011-04-26 23:35:40 +030088 u8 desc_err_code = desc->status & WL1271_RX_DESC_STATUS_MASK;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030089
Arik Nemtsov34c8e3d2011-04-26 23:35:40 +030090 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED |
91 RX_FLAG_DECRYPTED;
92
93 if (unlikely(desc_err_code == WL1271_RX_DESC_MIC_FAIL)) {
Teemu Paasikivi5d07b662009-10-13 12:47:52 +030094 status->flag |= RX_FLAG_MMIC_ERROR;
Arik Nemtsov34c8e3d2011-04-26 23:35:40 +030095 wl1271_warning("Michael MIC error");
96 }
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030097 }
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030098}
99
Shahar Levi0a1d3ab2011-07-14 11:50:27 +0300100static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
Eliad Peller9eb599e2011-10-10 10:12:59 +0200101 bool unaligned, u8 *hlid)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300102{
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300103 struct wl1271_rx_descriptor *desc;
104 struct sk_buff *skb;
Eliad Peller92fe9b52011-02-09 12:25:14 +0200105 struct ieee80211_hdr *hdr;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300106 u8 *buf;
107 u8 beacon = 0;
Eliad Peller77ddaa12011-05-15 11:10:29 +0300108 u8 is_data = 0;
Shahar Levi0a1d3ab2011-07-14 11:50:27 +0300109 u8 reserved = unaligned ? NET_IP_ALIGN : 0;
Eliad Peller5c472142011-08-25 18:10:58 +0300110 u16 seq_num;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300111
Kalle Valo93c5bb62010-02-22 08:38:30 +0200112 /*
113 * In PLT mode we seem to get frames and mac80211 warns about them,
114 * workaround this by not retrieving them at all.
115 */
Eliad Peller3fcdab72012-02-06 12:47:54 +0200116 if (unlikely(wl->plt))
Ido Yariv1f37cbc2010-09-30 13:28:27 +0200117 return -EINVAL;
Kalle Valo93c5bb62010-02-22 08:38:30 +0200118
Arik Nemtsov34c8e3d2011-04-26 23:35:40 +0300119 /* the data read starts with the descriptor */
120 desc = (struct wl1271_rx_descriptor *) data;
121
Ido Yariv95dac04f2011-06-06 14:57:06 +0300122 if (desc->packet_class == WL12XX_RX_CLASS_LOGGER) {
123 size_t len = length - sizeof(*desc);
124 wl12xx_copy_fwlog(wl, data + sizeof(*desc), len);
125 wake_up_interruptible(&wl->fwlog_waitq);
126 return 0;
127 }
128
Arik Nemtsov34c8e3d2011-04-26 23:35:40 +0300129 switch (desc->status & WL1271_RX_DESC_STATUS_MASK) {
130 /* discard corrupted packets */
131 case WL1271_RX_DESC_DRIVER_RX_Q_FAIL:
132 case WL1271_RX_DESC_DECRYPT_FAIL:
133 wl1271_warning("corrupted packet in RX with status: 0x%x",
134 desc->status & WL1271_RX_DESC_STATUS_MASK);
135 return -EINVAL;
136 case WL1271_RX_DESC_SUCCESS:
137 case WL1271_RX_DESC_MIC_FAIL:
138 break;
139 default:
140 wl1271_error("invalid RX descriptor status: 0x%x",
141 desc->status & WL1271_RX_DESC_STATUS_MASK);
142 return -EINVAL;
143 }
144
Shahar Levi0a1d3ab2011-07-14 11:50:27 +0300145 /* skb length not included rx descriptor */
146 skb = __dev_alloc_skb(length + reserved - sizeof(*desc), GFP_KERNEL);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300147 if (!skb) {
148 wl1271_error("Couldn't allocate RX frame");
Ido Yariv1f37cbc2010-09-30 13:28:27 +0200149 return -ENOMEM;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300150 }
151
Shahar Levi0a1d3ab2011-07-14 11:50:27 +0300152 /* reserve the unaligned payload(if any) */
153 skb_reserve(skb, reserved);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300154
Shahar Levi0a1d3ab2011-07-14 11:50:27 +0300155 buf = skb_put(skb, length - sizeof(*desc));
156
157 /*
158 * Copy packets from aggregation buffer to the skbs without rx
159 * descriptor and with packet payload aligned care. In case of unaligned
160 * packets copy the packets in offset of 2 bytes guarantee IP header
161 * payload aligned to 4 bytes.
162 */
163 memcpy(buf, data + sizeof(*desc), length - sizeof(*desc));
Eliad Peller9eb599e2011-10-10 10:12:59 +0200164 *hlid = desc->hlid;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300165
Eliad Peller92fe9b52011-02-09 12:25:14 +0200166 hdr = (struct ieee80211_hdr *)skb->data;
167 if (ieee80211_is_beacon(hdr->frame_control))
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300168 beacon = 1;
Eliad Peller77ddaa12011-05-15 11:10:29 +0300169 if (ieee80211_is_data_present(hdr->frame_control))
170 is_data = 1;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300171
Eliad Peller58be4602010-09-19 18:55:08 +0200172 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300173
Eliad Peller5c472142011-08-25 18:10:58 +0300174 seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
Eliad Peller9eb599e2011-10-10 10:12:59 +0200175 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
Eliad Pellera20a5b72011-04-05 18:21:31 +0300176 skb->len - desc->pad_len,
Eliad Peller5c472142011-08-25 18:10:58 +0300177 beacon ? "beacon" : "",
Eliad Peller9eb599e2011-10-10 10:12:59 +0200178 seq_num, *hlid);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300179
Juuso Oikarinenb9f2e392010-05-14 10:46:24 +0300180 skb_trim(skb, skb->len - desc->pad_len);
181
Ido Yariva6208652011-03-01 15:14:41 +0200182 skb_queue_tail(&wl->deferred_rx_queue, skb);
Eliad Peller92ef8962011-06-07 12:50:46 +0300183 queue_work(wl->freezable_wq, &wl->netstack_work);
Ido Yariv1f37cbc2010-09-30 13:28:27 +0200184
Eliad Peller77ddaa12011-05-15 11:10:29 +0300185 return is_data;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300186}
187
Eliad Peller4d56ad92011-08-14 13:17:05 +0300188void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300189{
190 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
Eliad Peller9eb599e2011-10-10 10:12:59 +0200191 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300192 u32 buf_size;
193 u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
194 u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
Ido Yariv1f37cbc2010-09-30 13:28:27 +0200195 u32 rx_counter;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300196 u32 mem_block;
Ido Yariv1f37cbc2010-09-30 13:28:27 +0200197 u32 pkt_length;
198 u32 pkt_offset;
Eliad Peller9eb599e2011-10-10 10:12:59 +0200199 u8 hlid;
Shahar Levi0a1d3ab2011-07-14 11:50:27 +0300200 bool unaligned = false;
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300201
202 while (drv_rx_counter != fw_rx_counter) {
Ido Yariv1f37cbc2010-09-30 13:28:27 +0200203 buf_size = 0;
204 rx_counter = drv_rx_counter;
205 while (rx_counter != fw_rx_counter) {
Eliad Peller4d56ad92011-08-14 13:17:05 +0300206 pkt_length = wl12xx_rx_get_buf_size(status, rx_counter);
Ido Yariv1f37cbc2010-09-30 13:28:27 +0200207 if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE)
208 break;
209 buf_size += pkt_length;
210 rx_counter++;
211 rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
212 }
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300213
214 if (buf_size == 0) {
215 wl1271_warning("received empty data");
216 break;
217 }
218
Shahar Leviae77ecc2011-03-06 16:32:13 +0200219 if (wl->chip.id != CHIP_ID_1283_PG20) {
220 /*
221 * Choose the block we want to read
222 * For aggregated packets, only the first memory block
223 * should be retrieved. The FW takes care of the rest.
224 */
Eliad Peller4d56ad92011-08-14 13:17:05 +0300225 mem_block = wl12xx_rx_get_mem_block(status,
Shahar Leviae77ecc2011-03-06 16:32:13 +0200226 drv_rx_counter);
227
228 wl->rx_mem_pool_addr.addr = (mem_block << 8) +
229 le32_to_cpu(wl_mem_map->packet_memory_pool_start);
230
231 wl->rx_mem_pool_addr.addr_extra =
232 wl->rx_mem_pool_addr.addr + 4;
233
234 wl1271_write(wl, WL1271_SLV_REG_DATA,
235 &wl->rx_mem_pool_addr,
236 sizeof(wl->rx_mem_pool_addr), false);
237 }
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300238
Ido Yariv1f37cbc2010-09-30 13:28:27 +0200239 /* Read all available packets at once */
240 wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
241 buf_size, true);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300242
Ido Yariv1f37cbc2010-09-30 13:28:27 +0200243 /* Split data into separate packets */
244 pkt_offset = 0;
245 while (pkt_offset < buf_size) {
Eliad Peller4d56ad92011-08-14 13:17:05 +0300246 pkt_length = wl12xx_rx_get_buf_size(status,
Ido Yariv1f37cbc2010-09-30 13:28:27 +0200247 drv_rx_counter);
Shahar Levi0a1d3ab2011-07-14 11:50:27 +0300248
Eliad Peller4d56ad92011-08-14 13:17:05 +0300249 unaligned = wl12xx_rx_get_unaligned(status,
Shahar Levi0a1d3ab2011-07-14 11:50:27 +0300250 drv_rx_counter);
251
Juuso Oikarinenfb2382c2010-10-25 11:24:29 +0200252 /*
253 * the handle data call can only fail in memory-outage
254 * conditions, in that case the received frame will just
255 * be dropped.
256 */
Eliad Peller77ddaa12011-05-15 11:10:29 +0300257 if (wl1271_rx_handle_data(wl,
258 wl->aggr_buf + pkt_offset,
Eliad Peller9eb599e2011-10-10 10:12:59 +0200259 pkt_length, unaligned,
260 &hlid) == 1) {
Luciano Coelhof4142182011-12-13 11:39:02 +0200261 if (hlid < WL12XX_MAX_LINKS)
262 __set_bit(hlid, active_hlids);
263 else
264 WARN(1,
265 "hlid exceeded WL12XX_MAX_LINKS "
266 "(%d)\n", hlid);
Eliad Peller9eb599e2011-10-10 10:12:59 +0200267 }
Eliad Peller77ddaa12011-05-15 11:10:29 +0300268
Ido Yariv1f37cbc2010-09-30 13:28:27 +0200269 wl->rx_counter++;
270 drv_rx_counter++;
271 drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
272 pkt_offset += pkt_length;
273 }
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300274 }
Ido Yariv606ea9f2011-03-01 15:14:39 +0200275
276 /*
277 * Write the driver's packet counter to the FW. This is only required
278 * for older hardware revisions
279 */
280 if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
281 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
Eliad Peller77ddaa12011-05-15 11:10:29 +0300282
Eliad Peller9eb599e2011-10-10 10:12:59 +0200283 wl12xx_rearm_rx_streaming(wl, active_hlids);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300284}