Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Marvell Wireless LAN device driver: 802.11n Aggregation |
| 3 | * |
| 4 | * Copyright (C) 2011, Marvell International Ltd. |
| 5 | * |
| 6 | * This software file (the "File") is distributed by Marvell International |
| 7 | * Ltd. under the terms of the GNU General Public License Version 2, June 1991 |
| 8 | * (the "License"). You may use, redistribute and/or modify this File in |
| 9 | * accordance with the terms and conditions of the License, a copy of which |
| 10 | * is available by writing to the Free Software Foundation, Inc., |
| 11 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the |
| 12 | * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. |
| 13 | * |
| 14 | * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE |
| 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE |
| 16 | * ARE EXPRESSLY DISCLAIMED. The License provides additional details about |
| 17 | * this warranty disclaimer. |
| 18 | */ |
| 19 | |
| 20 | #include "decl.h" |
| 21 | #include "ioctl.h" |
| 22 | #include "util.h" |
| 23 | #include "fw.h" |
| 24 | #include "main.h" |
| 25 | #include "wmm.h" |
| 26 | #include "11n.h" |
| 27 | #include "11n_aggr.h" |
| 28 | |
| 29 | /* |
| 30 | * Creates an AMSDU subframe for aggregation into one AMSDU packet. |
| 31 | * |
| 32 | * The resultant AMSDU subframe format is - |
| 33 | * |
| 34 | * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+ |
| 35 | * | DA | SA | Length | SNAP header | MSDU | |
| 36 | * | data[0..5] | data[6..11] | | | data[14..] | |
| 37 | * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+ |
| 38 | * <--6-bytes--> <--6-bytes--> <--2-bytes--><--8-bytes--> <--n-bytes--> |
| 39 | * |
| 40 | * This function also computes the amount of padding required to make the |
| 41 | * buffer length multiple of 4 bytes. |
| 42 | * |
| 43 | * Data => |DA|SA|SNAP-TYPE|........ .| |
| 44 | * MSDU => |DA|SA|Length|SNAP|...... ..| |
| 45 | */ |
| 46 | static int |
Amitkumar Karwar | 572e8f3 | 2011-04-13 17:27:08 -0700 | [diff] [blame] | 47 | mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr, |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 48 | struct sk_buff *skb_src, int *pad) |
| 49 | |
| 50 | { |
| 51 | int dt_offset; |
| 52 | struct rfc_1042_hdr snap = { |
| 53 | 0xaa, /* LLC DSAP */ |
| 54 | 0xaa, /* LLC SSAP */ |
| 55 | 0x03, /* LLC CTRL */ |
| 56 | {0x00, 0x00, 0x00}, /* SNAP OUI */ |
| 57 | 0x0000 /* SNAP type */ |
| 58 | /* |
| 59 | * This field will be overwritten |
| 60 | * later with ethertype |
| 61 | */ |
| 62 | }; |
Yogesh Ashok Powar | 270e58e | 2011-05-03 20:11:46 -0700 | [diff] [blame] | 63 | struct tx_packet_hdr *tx_header; |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 64 | |
Yogesh Ashok Powar | d92a680 | 2012-08-03 18:05:59 -0700 | [diff] [blame] | 65 | tx_header = (void *)skb_put(skb_aggr, sizeof(*tx_header)); |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 66 | |
| 67 | /* Copy DA and SA */ |
| 68 | dt_offset = 2 * ETH_ALEN; |
| 69 | memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset); |
| 70 | |
| 71 | /* Copy SNAP header */ |
| 72 | snap.snap_type = *(u16 *) ((u8 *)skb_src->data + dt_offset); |
| 73 | dt_offset += sizeof(u16); |
| 74 | |
| 75 | memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr)); |
| 76 | |
| 77 | skb_pull(skb_src, dt_offset); |
| 78 | |
| 79 | /* Update Length field */ |
| 80 | tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN); |
| 81 | |
| 82 | /* Add payload */ |
Yogesh Ashok Powar | d92a680 | 2012-08-03 18:05:59 -0700 | [diff] [blame] | 83 | memcpy(skb_put(skb_aggr, skb_src->len), skb_src->data, skb_src->len); |
| 84 | |
Yogesh Ashok Powar | bda1b1b | 2012-08-03 18:06:01 -0700 | [diff] [blame] | 85 | /* Add padding for new MSDU to start from 4 byte boundary */ |
| 86 | *pad = (4 - ((unsigned long)skb_aggr->tail & 0x3)) % 4; |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 87 | |
| 88 | return skb_aggr->len + *pad; |
| 89 | } |
| 90 | |
| 91 | /* |
| 92 | * Adds TxPD to AMSDU header. |
| 93 | * |
| 94 | * Each AMSDU packet will contain one TxPD at the beginning, |
| 95 | * followed by multiple AMSDU subframes. |
| 96 | */ |
| 97 | static void |
| 98 | mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv, |
| 99 | struct sk_buff *skb) |
| 100 | { |
| 101 | struct txpd *local_tx_pd; |
| 102 | |
| 103 | skb_push(skb, sizeof(*local_tx_pd)); |
| 104 | |
| 105 | local_tx_pd = (struct txpd *) skb->data; |
| 106 | memset(local_tx_pd, 0, sizeof(struct txpd)); |
| 107 | |
| 108 | /* Original priority has been overwritten */ |
| 109 | local_tx_pd->priority = (u8) skb->priority; |
| 110 | local_tx_pd->pkt_delay_2ms = |
| 111 | mwifiex_wmm_compute_drv_pkt_delay(priv, skb); |
| 112 | local_tx_pd->bss_num = priv->bss_num; |
| 113 | local_tx_pd->bss_type = priv->bss_type; |
| 114 | /* Always zero as the data is followed by struct txpd */ |
| 115 | local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd)); |
| 116 | local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU); |
| 117 | local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len - |
Yogesh Ashok Powar | 8426684 | 2012-03-13 19:22:34 -0700 | [diff] [blame] | 118 | sizeof(*local_tx_pd)); |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 119 | |
| 120 | if (local_tx_pd->tx_control == 0) |
| 121 | /* TxCtrl set by user or default */ |
| 122 | local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); |
| 123 | |
Yogesh Ashok Powar | 8426684 | 2012-03-13 19:22:34 -0700 | [diff] [blame] | 124 | if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA && |
| 125 | priv->adapter->pps_uapsd_mode) { |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 126 | if (true == mwifiex_check_last_packet_indication(priv)) { |
| 127 | priv->adapter->tx_lock_flag = true; |
| 128 | local_tx_pd->flags = |
| 129 | MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET; |
| 130 | } |
| 131 | } |
| 132 | } |
| 133 | |
| 134 | /* |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 135 | * Create aggregated packet. |
| 136 | * |
| 137 | * This function creates an aggregated MSDU packet, by combining buffers |
| 138 | * from the RA list. Each individual buffer is encapsulated as an AMSDU |
| 139 | * subframe and all such subframes are concatenated together to form the |
| 140 | * AMSDU packet. |
| 141 | * |
| 142 | * A TxPD is also added to the front of the resultant AMSDU packets for |
| 143 | * transmission. The resultant packets format is - |
| 144 | * |
| 145 | * +---- ~ ----+------ ~ ------+------ ~ ------+-..-+------ ~ ------+ |
| 146 | * | TxPD |AMSDU sub-frame|AMSDU sub-frame| .. |AMSDU sub-frame| |
| 147 | * | | 1 | 2 | .. | n | |
| 148 | * +---- ~ ----+------ ~ ------+------ ~ ------+ .. +------ ~ ------+ |
| 149 | */ |
| 150 | int |
| 151 | mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, |
| 152 | struct mwifiex_ra_list_tbl *pra_list, int headroom, |
| 153 | int ptrindex, unsigned long ra_list_flags) |
| 154 | __releases(&priv->wmm.ra_list_spinlock) |
| 155 | { |
| 156 | struct mwifiex_adapter *adapter = priv->adapter; |
| 157 | struct sk_buff *skb_aggr, *skb_src; |
| 158 | struct mwifiex_txinfo *tx_info_aggr, *tx_info_src; |
Yogesh Ashok Powar | 270e58e | 2011-05-03 20:11:46 -0700 | [diff] [blame] | 159 | int pad = 0, ret; |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 160 | struct mwifiex_tx_param tx_param; |
| 161 | struct txpd *ptx_pd = NULL; |
| 162 | |
Yogesh Ashok Powar | a8fe329 | 2011-06-06 14:50:58 +0530 | [diff] [blame] | 163 | skb_src = skb_peek(&pra_list->skb_head); |
| 164 | if (!skb_src) { |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 165 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 166 | ra_list_flags); |
| 167 | return 0; |
| 168 | } |
Yogesh Ashok Powar | a8fe329 | 2011-06-06 14:50:58 +0530 | [diff] [blame] | 169 | |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 170 | tx_info_src = MWIFIEX_SKB_TXCB(skb_src); |
| 171 | skb_aggr = dev_alloc_skb(adapter->tx_buf_size); |
| 172 | if (!skb_aggr) { |
| 173 | dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__); |
| 174 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 175 | ra_list_flags); |
| 176 | return -1; |
| 177 | } |
| 178 | skb_reserve(skb_aggr, headroom + sizeof(struct txpd)); |
| 179 | tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr); |
| 180 | |
Yogesh Ashok Powar | 9da9a3b | 2012-01-11 20:06:11 -0800 | [diff] [blame] | 181 | tx_info_aggr->bss_type = tx_info_src->bss_type; |
| 182 | tx_info_aggr->bss_num = tx_info_src->bss_num; |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 183 | skb_aggr->priority = skb_src->priority; |
| 184 | |
Yogesh Ashok Powar | fb3c19b | 2011-06-06 14:53:02 +0530 | [diff] [blame] | 185 | do { |
| 186 | /* Check if AMSDU can accommodate this MSDU */ |
| 187 | if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN)) |
| 188 | break; |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 189 | |
Yogesh Ashok Powar | a8fe329 | 2011-06-06 14:50:58 +0530 | [diff] [blame] | 190 | skb_src = skb_dequeue(&pra_list->skb_head); |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 191 | |
Yogesh Ashok Powar | fb3c19b | 2011-06-06 14:53:02 +0530 | [diff] [blame] | 192 | pra_list->total_pkts_size -= skb_src->len; |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 193 | |
Marc Yang | f699254 | 2011-05-16 19:17:49 -0700 | [diff] [blame] | 194 | atomic_dec(&priv->wmm.tx_pkts_queued); |
| 195 | |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 196 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 197 | ra_list_flags); |
Amitkumar Karwar | 572e8f3 | 2011-04-13 17:27:08 -0700 | [diff] [blame] | 198 | mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad); |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 199 | |
| 200 | mwifiex_write_data_complete(adapter, skb_src, 0); |
| 201 | |
| 202 | spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); |
| 203 | |
| 204 | if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) { |
| 205 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 206 | ra_list_flags); |
| 207 | return -1; |
| 208 | } |
| 209 | |
Yogesh Ashok Powar | fb3c19b | 2011-06-06 14:53:02 +0530 | [diff] [blame] | 210 | if (skb_tailroom(skb_aggr) < pad) { |
| 211 | pad = 0; |
| 212 | break; |
| 213 | } |
| 214 | skb_put(skb_aggr, pad); |
| 215 | |
Yogesh Ashok Powar | a8fe329 | 2011-06-06 14:50:58 +0530 | [diff] [blame] | 216 | skb_src = skb_peek(&pra_list->skb_head); |
Yogesh Ashok Powar | fb3c19b | 2011-06-06 14:53:02 +0530 | [diff] [blame] | 217 | |
| 218 | } while (skb_src); |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 219 | |
| 220 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); |
| 221 | |
| 222 | /* Last AMSDU packet does not need padding */ |
| 223 | skb_trim(skb_aggr, skb_aggr->len - pad); |
| 224 | |
| 225 | /* Form AMSDU */ |
| 226 | mwifiex_11n_form_amsdu_txpd(priv, skb_aggr); |
| 227 | if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) |
| 228 | ptx_pd = (struct txpd *)skb_aggr->data; |
| 229 | |
| 230 | skb_push(skb_aggr, headroom); |
| 231 | |
Amitkumar Karwar | 4daffe3 | 2012-04-18 20:08:28 -0700 | [diff] [blame] | 232 | if (adapter->iface_type == MWIFIEX_USB) { |
| 233 | adapter->data_sent = true; |
| 234 | ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA, |
| 235 | skb_aggr, NULL); |
| 236 | } else { |
| 237 | /* |
| 238 | * Padding per MSDU will affect the length of next |
| 239 | * packet and hence the exact length of next packet |
| 240 | * is uncertain here. |
| 241 | * |
| 242 | * Also, aggregation of transmission buffer, while |
| 243 | * downloading the data to the card, wont gain much |
| 244 | * on the AMSDU packets as the AMSDU packets utilizes |
| 245 | * the transmission buffer space to the maximum |
| 246 | * (adapter->tx_buf_size). |
| 247 | */ |
| 248 | tx_param.next_pkt_len = 0; |
Yogesh Ashok Powar | 36cb7cc | 2011-06-06 14:54:17 +0530 | [diff] [blame] | 249 | |
Amitkumar Karwar | 4daffe3 | 2012-04-18 20:08:28 -0700 | [diff] [blame] | 250 | ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, |
| 251 | skb_aggr, &tx_param); |
| 252 | } |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 253 | switch (ret) { |
| 254 | case -EBUSY: |
| 255 | spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); |
| 256 | if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) { |
| 257 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 258 | ra_list_flags); |
| 259 | mwifiex_write_data_complete(adapter, skb_aggr, -1); |
| 260 | return -1; |
| 261 | } |
Yogesh Ashok Powar | 8426684 | 2012-03-13 19:22:34 -0700 | [diff] [blame] | 262 | if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA && |
| 263 | adapter->pps_uapsd_mode && adapter->tx_lock_flag) { |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 264 | priv->adapter->tx_lock_flag = false; |
Christoph Fritz | b53575e | 2011-05-08 22:50:09 +0200 | [diff] [blame] | 265 | if (ptx_pd) |
| 266 | ptx_pd->flags = 0; |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 267 | } |
| 268 | |
| 269 | skb_queue_tail(&pra_list->skb_head, skb_aggr); |
| 270 | |
| 271 | pra_list->total_pkts_size += skb_aggr->len; |
| 272 | |
Marc Yang | f699254 | 2011-05-16 19:17:49 -0700 | [diff] [blame] | 273 | atomic_inc(&priv->wmm.tx_pkts_queued); |
| 274 | |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 275 | tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; |
| 276 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 277 | ra_list_flags); |
| 278 | dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); |
| 279 | break; |
| 280 | case -1: |
| 281 | adapter->data_sent = false; |
| 282 | dev_err(adapter->dev, "%s: host_to_card failed: %#x\n", |
Yogesh Ashok Powar | 8426684 | 2012-03-13 19:22:34 -0700 | [diff] [blame] | 283 | __func__, ret); |
Bing Zhao | 5e6e3a9 | 2011-03-21 18:00:50 -0700 | [diff] [blame] | 284 | adapter->dbg.num_tx_host_to_card_failure++; |
| 285 | mwifiex_write_data_complete(adapter, skb_aggr, ret); |
| 286 | return 0; |
| 287 | case -EINPROGRESS: |
| 288 | adapter->data_sent = false; |
| 289 | break; |
| 290 | case 0: |
| 291 | mwifiex_write_data_complete(adapter, skb_aggr, ret); |
| 292 | break; |
| 293 | default: |
| 294 | break; |
| 295 | } |
| 296 | if (ret != -EBUSY) { |
| 297 | spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); |
| 298 | if (mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) { |
| 299 | priv->wmm.packets_out[ptrindex]++; |
| 300 | priv->wmm.tid_tbl_ptr[ptrindex].ra_list_curr = pra_list; |
| 301 | } |
| 302 | /* Now bss_prio_cur pointer points to next node */ |
| 303 | adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur = |
| 304 | list_first_entry( |
| 305 | &adapter->bss_prio_tbl[priv->bss_priority] |
| 306 | .bss_prio_cur->list, |
| 307 | struct mwifiex_bss_prio_node, list); |
| 308 | spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, |
| 309 | ra_list_flags); |
| 310 | } |
| 311 | |
| 312 | return 0; |
| 313 | } |