Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Marvell Wireless LAN device driver: 802.11n Aggregation |
| 3 | * |
| 4 | * Copyright (C) 2011, Marvell International Ltd. |
| 5 | * |
| 6 | * This software file (the "File") is distributed by Marvell International |
| 7 | * Ltd. under the terms of the GNU General Public License Version 2, June 1991 |
| 8 | * (the "License"). You may use, redistribute and/or modify this File in |
| 9 | * accordance with the terms and conditions of the License, a copy of which |
| 10 | * is available by writing to the Free Software Foundation, Inc., |
| 11 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the |
| 12 | * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. |
| 13 | * |
| 14 | * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE |
| 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE |
| 16 | * ARE EXPRESSLY DISCLAIMED. The License provides additional details about |
| 17 | * this warranty disclaimer. |
| 18 | */ |
| 19 | |
| 20 | #include "decl.h" |
| 21 | #include "ioctl.h" |
| 22 | #include "util.h" |
| 23 | #include "fw.h" |
| 24 | #include "main.h" |
| 25 | #include "wmm.h" |
| 26 | #include "11n.h" |
| 27 | #include "11n_aggr.h" |
| 28 | |
| 29 | /* |
| 30 | * Creates an AMSDU subframe for aggregation into one AMSDU packet. |
| 31 | * |
| 32 | * The resultant AMSDU subframe format is - |
| 33 | * |
| 34 | * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+ |
| 35 | * | DA | SA | Length | SNAP header | MSDU | |
| 36 | * | data[0..5] | data[6..11] | | | data[14..] | |
| 37 | * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+ |
| 38 | * <--6-bytes--> <--6-bytes--> <--2-bytes--><--8-bytes--> <--n-bytes--> |
| 39 | * |
| 40 | * This function also computes the amount of padding required to make the |
| 41 | * buffer length multiple of 4 bytes. |
| 42 | * |
| 43 | * Data => |DA|SA|SNAP-TYPE|........ .| |
| 44 | * MSDU => |DA|SA|Length|SNAP|...... ..| |
| 45 | */ |
| 46 | static int |
Amitkumar Karwar | 572e8f3 | 2011-04-13 17:27:08 -0700 | [diff] [blame] | 47 | mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr, |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 48 | struct sk_buff *skb_src, int *pad) |
| 49 | |
| 50 | { |
| 51 | int dt_offset; |
| 52 | struct rfc_1042_hdr snap = { |
| 53 | 0xaa, /* LLC DSAP */ |
| 54 | 0xaa, /* LLC SSAP */ |
| 55 | 0x03, /* LLC CTRL */ |
| 56 | {0x00, 0x00, 0x00}, /* SNAP OUI */ |
| 57 | 0x0000 /* SNAP type */ |
| 58 | /* |
| 59 | * This field will be overwritten |
| 60 | * later with ethertype |
| 61 | */ |
| 62 | }; |
Yogesh Ashok Powar | 270e58e | 2011-05-03 20:11:46 -0700 | [diff] [blame] | 63 | struct tx_packet_hdr *tx_header; |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 64 | |
| 65 | skb_put(skb_aggr, sizeof(*tx_header)); |
| 66 | |
| 67 | tx_header = (struct tx_packet_hdr *) skb_aggr->data; |
| 68 | |
| 69 | /* Copy DA and SA */ |
| 70 | dt_offset = 2 * ETH_ALEN; |
| 71 | memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset); |
| 72 | |
| 73 | /* Copy SNAP header */ |
| 74 | snap.snap_type = *(u16 *) ((u8 *)skb_src->data + dt_offset); |
| 75 | dt_offset += sizeof(u16); |
| 76 | |
| 77 | memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr)); |
| 78 | |
| 79 | skb_pull(skb_src, dt_offset); |
| 80 | |
| 81 | /* Update Length field */ |
| 82 | tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN); |
| 83 | |
| 84 | /* Add payload */ |
| 85 | skb_put(skb_aggr, skb_src->len); |
| 86 | memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data, |
| 87 | skb_src->len); |
| 88 | *pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len + |
| 89 | LLC_SNAP_LEN)) & 3)) : 0; |
| 90 | skb_put(skb_aggr, *pad); |
| 91 | |
| 92 | return skb_aggr->len + *pad; |
| 93 | } |
| 94 | |
| 95 | /* |
| 96 | * Adds TxPD to AMSDU header. |
| 97 | * |
| 98 | * Each AMSDU packet will contain one TxPD at the beginning, |
| 99 | * followed by multiple AMSDU subframes. |
| 100 | */ |
| 101 | static void |
| 102 | mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv, |
| 103 | struct sk_buff *skb) |
| 104 | { |
| 105 | struct txpd *local_tx_pd; |
| 106 | |
| 107 | skb_push(skb, sizeof(*local_tx_pd)); |
| 108 | |
| 109 | local_tx_pd = (struct txpd *) skb->data; |
| 110 | memset(local_tx_pd, 0, sizeof(struct txpd)); |
| 111 | |
| 112 | /* Original priority has been overwritten */ |
| 113 | local_tx_pd->priority = (u8) skb->priority; |
| 114 | local_tx_pd->pkt_delay_2ms = |
| 115 | mwifiex_wmm_compute_drv_pkt_delay(priv, skb); |
| 116 | local_tx_pd->bss_num = priv->bss_num; |
| 117 | local_tx_pd->bss_type = priv->bss_type; |
| 118 | /* Always zero as the data is followed by struct txpd */ |
| 119 | local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd)); |
| 120 | local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU); |
| 121 | local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len - |
| 122 | sizeof(*local_tx_pd)); |
| 123 | |
| 124 | if (local_tx_pd->tx_control == 0) |
| 125 | /* TxCtrl set by user or default */ |
| 126 | local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); |
| 127 | |
| 128 | if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && |
| 129 | (priv->adapter->pps_uapsd_mode)) { |
| 130 | if (true == mwifiex_check_last_packet_indication(priv)) { |
| 131 | priv->adapter->tx_lock_flag = true; |
| 132 | local_tx_pd->flags = |
| 133 | MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET; |
| 134 | } |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | /* |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 139 | * Create aggregated packet. |
| 140 | * |
| 141 | * This function creates an aggregated MSDU packet, by combining buffers |
| 142 | * from the RA list. Each individual buffer is encapsulated as an AMSDU |
| 143 | * subframe and all such subframes are concatenated together to form the |
| 144 | * AMSDU packet. |
| 145 | * |
| 146 | * A TxPD is also added to the front of the resultant AMSDU packets for |
| 147 | * transmission. The resultant packets format is - |
| 148 | * |
| 149 | * +---- ~ ----+------ ~ ------+------ ~ ------+-..-+------ ~ ------+ |
| 150 | * | TxPD |AMSDU sub-frame|AMSDU sub-frame| .. |AMSDU sub-frame| |
| 151 | * | | 1 | 2 | .. | n | |
| 152 | * +---- ~ ----+------ ~ ------+------ ~ ------+ .. +------ ~ ------+ |
| 153 | */ |
| 154 | int |
| 155 | mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, |
| 156 | struct mwifiex_ra_list_tbl *pra_list, int headroom, |
| 157 | int ptrindex, unsigned long ra_list_flags) |
| 158 | __releases(&priv->wmm.ra_list_spinlock) |
| 159 | { |
| 160 | struct mwifiex_adapter *adapter = priv->adapter; |
| 161 | struct sk_buff *skb_aggr, *skb_src; |
| 162 | struct mwifiex_txinfo *tx_info_aggr, *tx_info_src; |
Yogesh Ashok Powar | 270e58e | 2011-05-03 20:11:46 -0700 | [diff] [blame] | 163 | int pad = 0, ret; |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 164 | struct mwifiex_tx_param tx_param; |
| 165 | struct txpd *ptx_pd = NULL; |
| 166 | |
| 167 | if (skb_queue_empty(&pra_list->skb_head)) { |
| 168 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 169 | ra_list_flags); |
| 170 | return 0; |
| 171 | } |
| 172 | skb_src = skb_peek(&pra_list->skb_head); |
| 173 | tx_info_src = MWIFIEX_SKB_TXCB(skb_src); |
| 174 | skb_aggr = dev_alloc_skb(adapter->tx_buf_size); |
| 175 | if (!skb_aggr) { |
| 176 | dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__); |
| 177 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 178 | ra_list_flags); |
| 179 | return -1; |
| 180 | } |
| 181 | skb_reserve(skb_aggr, headroom + sizeof(struct txpd)); |
| 182 | tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr); |
| 183 | |
| 184 | tx_info_aggr->bss_index = tx_info_src->bss_index; |
| 185 | skb_aggr->priority = skb_src->priority; |
| 186 | |
| 187 | while (skb_src && ((skb_headroom(skb_aggr) + skb_src->len |
| 188 | + LLC_SNAP_LEN) |
| 189 | <= adapter->tx_buf_size)) { |
| 190 | |
| 191 | if (!skb_queue_empty(&pra_list->skb_head)) |
| 192 | skb_src = skb_dequeue(&pra_list->skb_head); |
| 193 | else |
| 194 | skb_src = NULL; |
| 195 | |
Christoph Fritz | b53575e | 2011-05-08 22:50:09 +0200 | [diff] [blame] | 196 | if (skb_src) |
| 197 | pra_list->total_pkts_size -= skb_src->len; |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 198 | |
Marc Yang | f699254 | 2011-05-16 19:17:49 -0700 | [diff] [blame] | 199 | atomic_dec(&priv->wmm.tx_pkts_queued); |
| 200 | |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 201 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 202 | ra_list_flags); |
Amitkumar Karwar | 572e8f3 | 2011-04-13 17:27:08 -0700 | [diff] [blame] | 203 | mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad); |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 204 | |
| 205 | mwifiex_write_data_complete(adapter, skb_src, 0); |
| 206 | |
| 207 | spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); |
| 208 | |
| 209 | if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) { |
| 210 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 211 | ra_list_flags); |
| 212 | return -1; |
| 213 | } |
| 214 | |
| 215 | if (!skb_queue_empty(&pra_list->skb_head)) |
| 216 | skb_src = skb_peek(&pra_list->skb_head); |
| 217 | else |
| 218 | skb_src = NULL; |
| 219 | } |
| 220 | |
| 221 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); |
| 222 | |
| 223 | /* Last AMSDU packet does not need padding */ |
| 224 | skb_trim(skb_aggr, skb_aggr->len - pad); |
| 225 | |
| 226 | /* Form AMSDU */ |
| 227 | mwifiex_11n_form_amsdu_txpd(priv, skb_aggr); |
| 228 | if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) |
| 229 | ptx_pd = (struct txpd *)skb_aggr->data; |
| 230 | |
| 231 | skb_push(skb_aggr, headroom); |
| 232 | |
| 233 | tx_param.next_pkt_len = ((pra_list->total_pkts_size) ? |
| 234 | (((pra_list->total_pkts_size) > |
| 235 | adapter->tx_buf_size) ? adapter-> |
| 236 | tx_buf_size : pra_list->total_pkts_size + |
| 237 | LLC_SNAP_LEN + sizeof(struct txpd)) : 0); |
| 238 | ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, |
| 239 | skb_aggr->data, |
| 240 | skb_aggr->len, &tx_param); |
| 241 | switch (ret) { |
| 242 | case -EBUSY: |
| 243 | spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); |
| 244 | if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) { |
| 245 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 246 | ra_list_flags); |
| 247 | mwifiex_write_data_complete(adapter, skb_aggr, -1); |
| 248 | return -1; |
| 249 | } |
| 250 | if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && |
| 251 | (adapter->pps_uapsd_mode) && |
| 252 | (adapter->tx_lock_flag)) { |
| 253 | priv->adapter->tx_lock_flag = false; |
Christoph Fritz | b53575e | 2011-05-08 22:50:09 +0200 | [diff] [blame] | 254 | if (ptx_pd) |
| 255 | ptx_pd->flags = 0; |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | skb_queue_tail(&pra_list->skb_head, skb_aggr); |
| 259 | |
| 260 | pra_list->total_pkts_size += skb_aggr->len; |
| 261 | |
Marc Yang | f699254 | 2011-05-16 19:17:49 -0700 | [diff] [blame] | 262 | atomic_inc(&priv->wmm.tx_pkts_queued); |
| 263 | |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 264 | tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; |
| 265 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 266 | ra_list_flags); |
| 267 | dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); |
| 268 | break; |
| 269 | case -1: |
| 270 | adapter->data_sent = false; |
| 271 | dev_err(adapter->dev, "%s: host_to_card failed: %#x\n", |
| 272 | __func__, ret); |
| 273 | adapter->dbg.num_tx_host_to_card_failure++; |
| 274 | mwifiex_write_data_complete(adapter, skb_aggr, ret); |
| 275 | return 0; |
| 276 | case -EINPROGRESS: |
| 277 | adapter->data_sent = false; |
| 278 | break; |
| 279 | case 0: |
| 280 | mwifiex_write_data_complete(adapter, skb_aggr, ret); |
| 281 | break; |
| 282 | default: |
| 283 | break; |
| 284 | } |
| 285 | if (ret != -EBUSY) { |
| 286 | spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); |
| 287 | if (mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) { |
| 288 | priv->wmm.packets_out[ptrindex]++; |
| 289 | priv->wmm.tid_tbl_ptr[ptrindex].ra_list_curr = pra_list; |
| 290 | } |
| 291 | /* Now bss_prio_cur pointer points to next node */ |
| 292 | adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur = |
| 293 | list_first_entry( |
| 294 | &adapter->bss_prio_tbl[priv->bss_priority] |
| 295 | .bss_prio_cur->list, |
| 296 | struct mwifiex_bss_prio_node, list); |
| 297 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 298 | ra_list_flags); |
| 299 | } |
| 300 | |
| 301 | return 0; |
| 302 | } |