blob: b45d2486391836d47d417ec0e52d81bdf08da753 [file] [log] [blame]
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301/*
2 * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
29#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
30#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
31#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
32#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
33#include <ol_txrx.h>
34#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
35#include <ol_txrx_types.h> /* pdev stats */
36#include <ol_tx_desc.h> /* ol_tx_desc */
37#include <ol_tx_send.h> /* ol_tx_send */
38#include <ol_txrx_peer_find.h>
39#include <ol_tx_classify.h>
40#include <ol_tx_queue.h>
41#include <ipv4.h>
42#include <ipv6_defs.h>
43#include <ip_prot.h>
44#include <enet.h> /* ETHERTYPE_VLAN, etc. */
45#include <cds_ieee80211_common.h> /* ieee80211_frame */
46
47/*
48 * In theory, this tx classify code could be used on the host or in the target.
49 * Thus, this code uses generic OS primitives, that can be aliased to either
50 * the host's OS primitives or the target's OS primitives.
51 * For now, the following #defines set up these host-specific or
52 * target-specific aliases.
53 */
54
55#if defined(CONFIG_HL_SUPPORT)
56
57#define OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
58#define OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
59
60#ifdef QCA_TX_HTT2_SUPPORT
61static void
62ol_tx_classify_htt2_frm(
63 struct ol_txrx_vdev_t *vdev,
64 qdf_nbuf_t tx_nbuf,
65 struct ol_txrx_msdu_info_t *tx_msdu_info)
66{
67 struct htt_msdu_info_t *htt = &tx_msdu_info->htt;
68 A_UINT8 candi_frm = 0;
69
70 /*
71 * Offload the frame re-order to L3 protocol and ONLY support
72 * TCP protocol now.
73 */
74 if ((htt->info.l2_hdr_type == htt_pkt_type_ethernet) &&
75 (htt->info.frame_type == htt_frm_type_data) &&
76 htt->info.is_unicast &&
77 (htt->info.ethertype == ETHERTYPE_IPV4)) {
78 struct ipv4_hdr_t *ipHdr;
79
80 ipHdr = (struct ipv4_hdr_t *)(qdf_nbuf_data(tx_nbuf) +
81 htt->info.l3_hdr_offset);
82 if (ipHdr->protocol == IP_PROTOCOL_TCP)
83 candi_frm = 1;
84 }
85
86 qdf_nbuf_set_tx_parallel_dnload_frm(tx_nbuf, candi_frm);
87}
88
89#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info) \
90 ol_tx_classify_htt2_frm(vdev, netbuf, msdu_info);
91#else
92#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info) /* no-op */
93#endif /* QCA_TX_HTT2_SUPPORT */
94/* DHCP go with voice priority; WMM_AC_VO_TID1();*/
95#define TX_DHCP_TID 6
96
97#if defined(QCA_BAD_PEER_TX_FLOW_CL)
98static inline A_BOOL
99ol_if_tx_bad_peer_txq_overflow(
100 struct ol_txrx_pdev_t *pdev,
101 struct ol_txrx_peer_t *peer,
102 struct ol_tx_frms_queue_t *txq)
103{
104 if (peer && pdev && txq && (peer->tx_limit_flag) &&
105 (txq->frms >= pdev->tx_peer_bal.peer_bal_txq_limit))
106 return true;
107 else
108 return false;
109}
110#else
111static inline A_BOOL ol_if_tx_bad_peer_txq_overflow(
112 struct ol_txrx_pdev_t *pdev,
113 struct ol_txrx_peer_t *peer,
114 struct ol_tx_frms_queue_t *txq)
115{
116 return false;
117}
118#endif
119
120/* EAPOL go with voice priority: WMM_AC_TO_TID1(WMM_AC_VO);*/
121#define TX_EAPOL_TID 6
122
123/* ARP go with voice priority: WMM_AC_TO_TID1(pdev->arp_ac_override)*/
124#define TX_ARP_TID 6
125
126/* For non-IP case, use default TID */
127#define TX_DEFAULT_TID 0
128
129/*
130 * Determine IP TOS priority
131 * IP Tos format :
132 * (Refer Pg 57 WMM-test-plan-v1.2)
133 * IP-TOS - 8bits
134 * : DSCP(6-bits) ECN(2-bits)
135 * : DSCP - P2 P1 P0 X X X
136 * where (P2 P1 P0) form 802.1D
137 */
138static inline A_UINT8
139ol_tx_tid_by_ipv4(A_UINT8 *pkt)
140{
141 A_UINT8 ipPri, tid;
142 struct ipv4_hdr_t *ipHdr = (struct ipv4_hdr_t *)pkt;
143
144 ipPri = ipHdr->tos >> 5;
145 tid = ipPri & 0x7;
146
147 return tid;
148}
149
150static inline A_UINT8
151ol_tx_tid_by_ipv6(A_UINT8 *pkt)
152{
153 return (ipv6_traffic_class((struct ipv6_hdr_t *)pkt) >> 5) & 0x7;
154}
155
156static inline void
157ol_tx_set_ether_type(
158 A_UINT8 *datap,
159 struct ol_txrx_msdu_info_t *tx_msdu_info)
160{
161 A_UINT16 typeorlength;
162 A_UINT8 *ptr;
163 A_UINT8 *l3_data_ptr;
164
165 if (tx_msdu_info->htt.info.l2_hdr_type == htt_pkt_type_raw) {
166 /* adjust hdr_ptr to RA */
167 struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
168
169 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
170 IEEE80211_FC0_TYPE_DATA) {
171 struct llc_snap_hdr_t *llc;
172 /* dot11 encapsulated frame */
173 struct ieee80211_qosframe *whqos =
174 (struct ieee80211_qosframe *)datap;
175 if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
176 tx_msdu_info->htt.info.l3_hdr_offset =
177 sizeof(struct ieee80211_qosframe);
178 } else {
179 tx_msdu_info->htt.info.l3_hdr_offset =
180 sizeof(struct ieee80211_frame);
181 }
182 llc = (struct llc_snap_hdr_t *)
183 (datap + tx_msdu_info->htt.info.l3_hdr_offset);
184 tx_msdu_info->htt.info.ethertype =
185 (llc->ethertype[0] << 8) | llc->ethertype[1];
186 /*
187 * l3_hdr_offset refers to the end of the 802.3 or
188 * 802.11 header, which may be a LLC/SNAP header rather
189 * than the IP header.
190 * Thus, don't increment l3_hdr_offset += sizeof(*llc);
191 * rather,leave it as is.
192 */
193 } else {
194 /*
195 * This function should only be applied to data frames.
196 * For management frames, we already know to use
197 * HTT_TX_EXT_TID_MGMT.
198 */
199 TXRX_ASSERT2(0);
200 }
201 } else if (tx_msdu_info->htt.info.l2_hdr_type ==
202 htt_pkt_type_ethernet) {
203 ptr = (datap + ETHERNET_ADDR_LEN * 2);
204 typeorlength = (ptr[0] << 8) | ptr[1];
205 /*ETHERNET_HDR_LEN;*/
206 l3_data_ptr = datap + sizeof(struct ethernet_hdr_t);
207
208 if (typeorlength == ETHERTYPE_VLAN) {
209 ptr = (datap + ETHERNET_ADDR_LEN * 2
210 + ETHERTYPE_VLAN_LEN);
211 typeorlength = (ptr[0] << 8) | ptr[1];
212 l3_data_ptr += ETHERTYPE_VLAN_LEN;
213 }
214
215 if (!IS_ETHERTYPE(typeorlength)) {
216 /* 802.3 header*/
217 struct llc_snap_hdr_t *llc_hdr =
218 (struct llc_snap_hdr_t *)l3_data_ptr;
219 typeorlength = (llc_hdr->ethertype[0] << 8) |
220 llc_hdr->ethertype[1];
221 l3_data_ptr += sizeof(struct llc_snap_hdr_t);
222 }
223
224 tx_msdu_info->htt.info.l3_hdr_offset = (A_UINT8)(l3_data_ptr -
225 datap);
226 tx_msdu_info->htt.info.ethertype = typeorlength;
227 }
228}
229
230static inline A_UINT8
231ol_tx_tid_by_ether_type(
232 A_UINT8 *datap,
233 struct ol_txrx_msdu_info_t *tx_msdu_info)
234{
235 A_UINT8 tid;
236 A_UINT8 *l3_data_ptr;
237 A_UINT16 typeorlength;
238
239 l3_data_ptr = datap + tx_msdu_info->htt.info.l3_hdr_offset;
240 typeorlength = tx_msdu_info->htt.info.ethertype;
241
242 /* IP packet, do packet inspection for TID */
243 if (typeorlength == ETHERTYPE_IPV4) {
244 tid = ol_tx_tid_by_ipv4(l3_data_ptr);
245 } else if (typeorlength == ETHERTYPE_IPV6) {
246 tid = ol_tx_tid_by_ipv6(l3_data_ptr);
247 } else if (ETHERTYPE_IS_EAPOL_WAPI(typeorlength)) {
248 /* EAPOL go with voice priority*/
249 tid = TX_EAPOL_TID;
250 } else if (typeorlength == ETHERTYPE_ARP) {
251 tid = TX_ARP_TID;
252 } else {
253 /* For non-IP case, use default TID */
254 tid = TX_DEFAULT_TID;
255 }
256 return tid;
257}
258
259static inline A_UINT8
260ol_tx_tid_by_raw_type(
261 A_UINT8 *datap,
262 struct ol_txrx_msdu_info_t *tx_msdu_info)
263{
264 A_UINT8 tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
265
266 /* adjust hdr_ptr to RA */
267 struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
268
269 /* FIXME: This code does not handle 4 address formats. The QOS field
270 * is not at usual location.
271 */
272 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
273 IEEE80211_FC0_TYPE_DATA) {
274 /* dot11 encapsulated frame */
275 struct ieee80211_qosframe *whqos =
276 (struct ieee80211_qosframe *)datap;
277 if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
278 tid = whqos->i_qos[0] & IEEE80211_QOS_TID;
279 else
280 tid = HTT_NON_QOS_TID;
281 } else {
282 /*
283 * This function should only be applied to data frames.
284 * For management frames, we already know to use
285 * HTT_TX_EXT_TID_MGMT.
286 */
287 qdf_assert(0);
288 }
289 return tid;
290}
291
292static A_UINT8
293ol_tx_tid(
294 struct ol_txrx_pdev_t *pdev,
295 qdf_nbuf_t tx_nbuf,
296 struct ol_txrx_msdu_info_t *tx_msdu_info)
297{
298 A_UINT8 *datap = qdf_nbuf_data(tx_nbuf);
299 A_UINT8 tid;
300
301 if (pdev->frame_format == wlan_frm_fmt_raw) {
302 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_raw;
303
304 ol_tx_set_ether_type(datap, tx_msdu_info);
305 tid = tx_msdu_info->htt.info.ext_tid ==
306 QDF_NBUF_TX_EXT_TID_INVALID ?
307 ol_tx_tid_by_raw_type(datap, tx_msdu_info) :
308 tx_msdu_info->htt.info.ext_tid;
309 } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
310 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_ethernet;
311
312 ol_tx_set_ether_type(datap, tx_msdu_info);
313 tid =
314 tx_msdu_info->htt.info.ext_tid ==
315 QDF_NBUF_TX_EXT_TID_INVALID ?
316 ol_tx_tid_by_ether_type(datap, tx_msdu_info) :
317 tx_msdu_info->htt.info.ext_tid;
318 } else if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
319 struct llc_snap_hdr_t *llc;
320
321 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
322 tx_msdu_info->htt.info.l3_hdr_offset =
323 sizeof(struct ieee80211_frame);
324 llc = (struct llc_snap_hdr_t *)
325 (datap + tx_msdu_info->htt.info.l3_hdr_offset);
326 tx_msdu_info->htt.info.ethertype =
327 (llc->ethertype[0] << 8) | llc->ethertype[1];
328 /*
329 * Native WiFi is a special case of "raw" 802.11 header format.
330 * However, we expect that for all cases that use native WiFi,
331 * the TID will be directly specified out of band.
332 */
333 tid = tx_msdu_info->htt.info.ext_tid;
334 } else {
335 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
336 "Invalid standard frame type: %d\n",
337 pdev->frame_format);
338 qdf_assert(0);
339 tid = HTT_TX_EXT_TID_INVALID;
340 }
341 return tid;
342}
343
344#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
345static inline
346struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
347 struct ol_txrx_vdev_t *vdev,
348 uint8_t *peer_id)
349{
350 struct ol_txrx_peer_t *peer = NULL;
351
352 if (vdev->hlTdlsFlag) {
353 peer = ol_txrx_find_peer_by_addr(pdev,
354 vdev->hl_tdls_ap_mac_addr.raw,
355 peer_id);
356 if (peer && (peer->peer_ids[0] == HTT_INVALID_PEER_ID)) {
357 peer = NULL;
358 } else {
359 if (peer)
360 qdf_atomic_inc(&peer->ref_cnt);
361 }
362 }
363 if (!peer)
364 peer = ol_txrx_assoc_peer_find(vdev);
365
366 return peer;
367}
368
369#else
370struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
371 struct ol_txrx_vdev_t *vdev,
372 uint8_t *peer_id)
373{
374 struct ol_txrx_peer_t *peer = NULL;
375 peer = ol_txrx_assoc_peer_find(vdev);
376
377 return peer;
378}
379
380
381#endif
382
383struct ol_tx_frms_queue_t *
384ol_tx_classify(
385 struct ol_txrx_vdev_t *vdev,
386 struct ol_tx_desc_t *tx_desc,
387 qdf_nbuf_t tx_nbuf,
388 struct ol_txrx_msdu_info_t *tx_msdu_info)
389{
390 struct ol_txrx_pdev_t *pdev = vdev->pdev;
391 struct ol_txrx_peer_t *peer = NULL;
392 struct ol_tx_frms_queue_t *txq = NULL;
393 A_UINT8 *dest_addr;
394 A_UINT8 tid;
395#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
396 u_int8_t peer_id;
397#endif
398
399 TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
400 dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
401 if ((IEEE80211_IS_MULTICAST(dest_addr)) ||
402 (vdev->opmode == wlan_op_mode_ocb)) {
403 txq = &vdev->txqs[OL_TX_VDEV_MCAST_BCAST];
404 tx_msdu_info->htt.info.ext_tid =
405 HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
406 if (vdev->opmode == wlan_op_mode_sta) {
407 /*
408 * The STA sends a frame with a broadcast
409 * dest addr (DA) as a
410 * unicast frame to the AP's receive addr (RA).
411 * Find the peer object that represents the AP
412 * that the STA is associated with.
413 */
414 peer = ol_txrx_assoc_peer_find(vdev);
415 if (!peer) {
416 QDF_TRACE(QDF_MODULE_ID_TXRX,
417 QDF_TRACE_LEVEL_ERROR,
418 "Error: STA %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast DA tx data frame w/o association\n",
419 vdev,
420 vdev->mac_addr.raw[0],
421 vdev->mac_addr.raw[1],
422 vdev->mac_addr.raw[2],
423 vdev->mac_addr.raw[3],
424 vdev->mac_addr.raw[4],
425 vdev->mac_addr.raw[5]);
426 return NULL; /* error */
427 } else if ((peer->security[
428 OL_TXRX_PEER_SECURITY_MULTICAST].sec_type
429 != htt_sec_type_wapi) &&
430 (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530431 if (QDF_NBUF_CB_PACKET_TYPE_DHCP ==
432 QDF_NBUF_CB_GET_PACKET_TYPE(
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530433 tx_nbuf)) {
434 /* DHCP frame to go with
435 * voice priority
436 */
437 txq = &peer->txqs[TX_DHCP_TID];
438 tx_msdu_info->htt.info.ext_tid =
439 TX_DHCP_TID;
440 }
441 }
442 /*
443 * The following line assumes each peer object has a
444 * single ID. This is currently true, and is expected
445 * to remain true.
446 */
447 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
448 } else if (vdev->opmode == wlan_op_mode_ocb) {
449 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
450 /* In OCB mode, don't worry about the peer.
451 *We don't need it. */
452 peer = NULL;
453 } else {
454 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
455 /*
456 * Look up the vdev's BSS peer, so that the
457 * classify_extension function can check whether to
458 * encrypt multicast / broadcast frames.
459 */
460 peer = ol_txrx_peer_find_hash_find(pdev,
461 vdev->mac_addr.raw,
462 0, 1);
463 if (!peer) {
464 QDF_TRACE(QDF_MODULE_ID_TXRX,
465 QDF_TRACE_LEVEL_ERROR,
466 "Error: vdev %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast/mcast, but no self-peer found\n",
467 vdev,
468 vdev->mac_addr.raw[0],
469 vdev->mac_addr.raw[1],
470 vdev->mac_addr.raw[2],
471 vdev->mac_addr.raw[3],
472 vdev->mac_addr.raw[4],
473 vdev->mac_addr.raw[5]);
474 return NULL; /* error */
475 }
476 }
477 tx_msdu_info->htt.info.is_unicast = false;
478 } else {
479 /* tid would be overwritten for non QoS case*/
480 tid = ol_tx_tid(pdev, tx_nbuf, tx_msdu_info);
481 if ((HTT_TX_EXT_TID_INVALID == tid) ||
482 (tid >= OL_TX_NUM_TIDS)) {
483 QDF_TRACE(QDF_MODULE_ID_TXRX,
484 QDF_TRACE_LEVEL_ERROR,
485 "%s Error: could not classify packet into valid TID(%d).\n",
486 __func__, tid);
487 return NULL;
488 }
489#ifdef ATH_SUPPORT_WAPI
490 /* Check to see if a frame is a WAI frame */
491 if (tx_msdu_info->htt.info.ethertype == ETHERTYPE_WAI) {
492 /* WAI frames should not be encrypted */
493 tx_msdu_info->htt.action.do_encrypt = 0;
494 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
495 "Tx Frame is a WAI frame\n");
496 }
497#endif /* ATH_SUPPORT_WAPI */
498
499 /*
500 * Find the peer and increment its reference count.
501 * If this vdev is an AP, use the dest addr (DA) to determine
502 * which peer STA this unicast data frame is for.
503 * If this vdev is a STA, the unicast data frame is for the
504 * AP the STA is associated with.
505 */
506 if (vdev->opmode == wlan_op_mode_sta) {
507 /*
508 * TO DO:
509 * To support TDLS, first check if there is a TDLS
510 * peer STA,
511 * and if so, check if the DA matches the TDLS peer
512 * STA's MAC address. If there is no peer TDLS STA,
513 * or if the DA is not the TDLS STA's address,
514 * then the frame is either for the AP itself, or is
515 * supposed to be sent to the AP for forwarding.
516 */
517#if 0
518 if (vdev->num_tdls_peers > 0) {
519 peer = NULL;
520 for (i = 0; i < vdev->num_tdls_peers; i++) {
521 int differs = adf_os_mem_cmp(
522 vdev->tdls_peers[i]->
523 mac_addr.raw,
524 dest_addr,
525 OL_TXRX_MAC_ADDR_LEN);
526 if (!differs) {
527 peer = vdev->tdls_peers[i];
528 break;
529 }
530 }
531 } else {
532 /* send to AP */
533 peer = ol_txrx_assoc_peer_find(vdev);
534 }
535#endif
536
537 peer = ol_tx_tdls_peer_find(pdev, vdev, &peer_id);
538 } else {
539 peer = ol_txrx_peer_find_hash_find(pdev, dest_addr,
540 0, 1);
541 }
542 tx_msdu_info->htt.info.is_unicast = true;
543 if (!peer) {
544 /*
545 * Unicast data xfer can only happen to an
546 * associated peer. It is illegitimate to send unicast
547 * data if there is no peer to send it to.
548 */
549 QDF_TRACE(QDF_MODULE_ID_TXRX,
550 QDF_TRACE_LEVEL_ERROR,
551 "Error: vdev %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send unicast tx data frame to an unknown peer\n",
552 vdev,
553 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
554 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
555 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
556 return NULL; /* error */
557 }
558 TX_SCHED_DEBUG_PRINT("Peer found\n");
559 if (!peer->qos_capable) {
560 tid = OL_TX_NON_QOS_TID;
561 } else if ((peer->security[
562 OL_TXRX_PEER_SECURITY_UNICAST].sec_type
563 != htt_sec_type_wapi) &&
564 (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530565 if (QDF_NBUF_CB_PACKET_TYPE_DHCP ==
566 QDF_NBUF_CB_GET_PACKET_TYPE(tx_nbuf))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530567 /* DHCP frame to go with voice priority */
568 tid = TX_DHCP_TID;
569 }
570
571 /* Only allow encryption when in authenticated state */
572 if (OL_TXRX_PEER_STATE_AUTH != peer->state)
573 tx_msdu_info->htt.action.do_encrypt = 0;
574
575 txq = &peer->txqs[tid];
576 tx_msdu_info->htt.info.ext_tid = tid;
577 /*
578 * The following line assumes each peer object has a single ID.
579 * This is currently true, and is expected to remain true.
580 */
581 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
582 /*
583 * WORKAROUND - check that the peer ID is valid.
584 * If tx data is provided before ol_rx_peer_map_handler is
585 * called to record the peer ID specified by the target,
586 * then we could end up here with an invalid peer ID.
587 * TO DO: rather than dropping the tx frame, pause the txq it
588 * goes into, then fill in the peer ID for the entries in the
589 * txq when the peer_map event provides the peer ID, and then
590 * unpause the txq.
591 */
592 if (tx_msdu_info->htt.info.peer_id == HTT_INVALID_PEER_ID) {
593 if (peer) {
594 TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
595 "%s: remove the peer for invalid peer_id %p\n",
596 __func__, peer);
597 /* remove the peer reference added above */
598 ol_txrx_peer_unref_delete(peer);
599 tx_msdu_info->peer = NULL;
600 }
601 return NULL;
602 }
603 }
604 tx_msdu_info->peer = peer;
605 if (ol_if_tx_bad_peer_txq_overflow(pdev, peer, txq))
606 return NULL;
607 /*
608 * If relevant, do a deeper inspection to determine additional
609 * characteristics of the tx frame.
610 * If the frame is invalid, then the txq will be set to NULL to
611 * indicate an error.
612 */
613 OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, tx_nbuf, tx_msdu_info, txq);
614 if (IEEE80211_IS_MULTICAST(dest_addr) && vdev->opmode !=
615 wlan_op_mode_sta && tx_msdu_info->peer !=
616 NULL) {
617 TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
618 "%s: remove the peer reference %p\n",
619 __func__, peer);
620 /* remove the peer reference added above */
621 ol_txrx_peer_unref_delete(tx_msdu_info->peer);
622 /* Making peer NULL in case if multicast non STA mode */
623 tx_msdu_info->peer = NULL;
624 }
625
626 /* Whether this frame can download though HTT2 data pipe or not. */
627 OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
628
629 /* Update Tx Queue info */
630 tx_desc->txq = txq;
631
632 TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
633 return txq;
634}
635
636struct ol_tx_frms_queue_t *
637ol_tx_classify_mgmt(
638 struct ol_txrx_vdev_t *vdev,
639 struct ol_tx_desc_t *tx_desc,
640 qdf_nbuf_t tx_nbuf,
641 struct ol_txrx_msdu_info_t *tx_msdu_info)
642{
643 struct ol_txrx_pdev_t *pdev = vdev->pdev;
644 struct ol_txrx_peer_t *peer = NULL;
645 struct ol_tx_frms_queue_t *txq = NULL;
646 A_UINT8 *dest_addr;
647 union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
648
649 TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
650 dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
651 if (IEEE80211_IS_MULTICAST(dest_addr)) {
652 /*
653 * AP: beacons are broadcast,
654 * public action frames (e.g. extended channel
655 * switch announce) may be broadcast
656 * STA: probe requests can be either broadcast or unicast
657 */
658 txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
659 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
660 tx_msdu_info->peer = NULL;
661 tx_msdu_info->htt.info.is_unicast = 0;
662 } else {
663 /*
664 * Find the peer and increment its reference count.
665 * If this vdev is an AP, use the receiver addr (RA) to
666 * determine which peer STA this unicast mgmt frame is for.
667 * If this vdev is a STA, the unicast mgmt frame is for the
668 * AP the STA is associated with.
669 * Probe request / response and Assoc request / response are
670 * sent before the peer exists - in this case, use the
671 * vdev's default tx queue.
672 */
673 if (vdev->opmode == wlan_op_mode_sta) {
674 /*
675 * TO DO:
676 * To support TDLS, first check if there is a TDLS
677 * peer STA, and if so, check if the DA matches
678 * the TDLS peer STA's MAC address.
679 */
680 peer = ol_txrx_assoc_peer_find(vdev);
681 /*
682 * Some special case(preauth for example) needs to send
683 * unicast mgmt frame to unassociated AP. In such case,
684 * we need to check if dest addr match the associated
685 * peer addr. If not, we set peer as NULL to queue this
686 * frame to vdev queue.
687 */
688 if (peer) {
689 qdf_mem_copy(
690 &local_mac_addr_aligned.raw[0],
691 dest_addr, OL_TXRX_MAC_ADDR_LEN);
692 mac_addr = &local_mac_addr_aligned;
693 if (ol_txrx_peer_find_mac_addr_cmp(
694 mac_addr,
695 &peer->mac_addr) != 0) {
696 qdf_atomic_dec(&peer->ref_cnt);
Mohit Khanna3aee1312016-07-28 19:07:05 -0700697 qdf_print("%s: peer %p peer->ref_cnt %d",
698 __func__, peer,
699 qdf_atomic_read
700 (&peer->ref_cnt));
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530701 peer = NULL;
702 }
703 }
704 } else {
705 /* find the peer and increment its reference count */
706 peer = ol_txrx_peer_find_hash_find(pdev, dest_addr,
707 0, 1);
708 }
709 tx_msdu_info->peer = peer;
710 if (!peer) {
711 txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
712 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
713 } else {
714 txq = &peer->txqs[HTT_TX_EXT_TID_MGMT];
715 tx_msdu_info->htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
716 /*
717 * The following line assumes each peer object has a
718 * single ID. This is currently true, and is expected
719 * to remain true.
720 */
721 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
722 }
723 tx_msdu_info->htt.info.is_unicast = 1;
724 }
725 /*
726 * If relevant, do a deeper inspection to determine additional
727 * characteristics of the tx frame.
728 * If the frame is invalid, then the txq will be set to NULL to
729 * indicate an error.
730 */
731 OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, tx_nbuf,
732 tx_msdu_info, txq);
733
734 /* Whether this frame can download though HTT2 data pipe or not. */
735 OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
736
737 /* Update Tx Queue info */
738 tx_desc->txq = txq;
739
740 TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
741 return txq;
742}
743
744A_STATUS
745ol_tx_classify_extension(
746 struct ol_txrx_vdev_t *vdev,
747 struct ol_tx_desc_t *tx_desc,
748 qdf_nbuf_t tx_msdu,
749 struct ol_txrx_msdu_info_t *msdu_info)
750{
751 A_UINT8 *datap = qdf_nbuf_data(tx_msdu);
752 struct ol_txrx_peer_t *peer;
753 int which_key;
754
755 /*
756 * The following msdu_info fields were already filled in by the
757 * ol_tx entry function or the regular ol_tx_classify function:
758 * htt.info.vdev_id (ol_tx_hl or ol_tx_non_std_hl)
759 * htt.info.ext_tid (ol_tx_non_std_hl or ol_tx_classify)
760 * htt.info.frame_type (ol_tx_hl or ol_tx_non_std_hl)
761 * htt.info.l2_hdr_type (ol_tx_hl or ol_tx_non_std_hl)
762 * htt.info.is_unicast (ol_tx_classify)
763 * htt.info.peer_id (ol_tx_classify)
764 * peer (ol_tx_classify)
765 * if (is_unicast) {
766 * htt.info.ethertype (ol_tx_classify)
767 * htt.info.l3_hdr_offset (ol_tx_classify)
768 * }
769 * The following fields need to be filled in by this function:
770 * if (!is_unicast) {
771 * htt.info.ethertype
772 * htt.info.l3_hdr_offset
773 * }
774 * htt.action.band (NOT CURRENTLY USED)
775 * htt.action.do_encrypt
776 * htt.action.do_tx_complete
777 * The following fields are not needed for data frames, and can
778 * be left uninitialized:
779 * htt.info.frame_subtype
780 */
781
782 if (!msdu_info->htt.info.is_unicast) {
783 int l2_hdr_size;
784 A_UINT16 ethertype;
785
786 if (msdu_info->htt.info.l2_hdr_type == htt_pkt_type_ethernet) {
787 struct ethernet_hdr_t *eh;
788
789 eh = (struct ethernet_hdr_t *)datap;
790 l2_hdr_size = sizeof(*eh);
791 ethertype = (eh->ethertype[0] << 8) | eh->ethertype[1];
792
793 if (ethertype == ETHERTYPE_VLAN) {
794 struct ethernet_vlan_hdr_t *evh;
795
796 evh = (struct ethernet_vlan_hdr_t *)datap;
797 l2_hdr_size = sizeof(*evh);
798 ethertype = (evh->ethertype[0] << 8) |
799 evh->ethertype[1];
800 }
801
802 if (!IS_ETHERTYPE(ethertype)) {
803 /* 802.3 header*/
804 struct llc_snap_hdr_t *llc =
805 (struct llc_snap_hdr_t *)(datap +
806 l2_hdr_size);
807 ethertype = (llc->ethertype[0] << 8) |
808 llc->ethertype[1];
809 l2_hdr_size += sizeof(*llc);
810 }
811 msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
812 msdu_info->htt.info.ethertype = ethertype;
813 } else { /* 802.11 */
814 struct llc_snap_hdr_t *llc;
815 l2_hdr_size = ol_txrx_ieee80211_hdrsize(datap);
816 llc = (struct llc_snap_hdr_t *)(datap + l2_hdr_size);
817 ethertype = (llc->ethertype[0] << 8) |
818 llc->ethertype[1];
819 /*
820 * Don't include the LLC/SNAP header in l2_hdr_size,
821 * because l3_hdr_offset is actually supposed to refer
822 * to the header after the 802.3 or 802.11 header,
823 * which could be a LLC/SNAP header rather
824 * than the L3 header.
825 */
826 }
827 msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
828 msdu_info->htt.info.ethertype = ethertype;
829 which_key = txrx_sec_mcast;
830 } else {
831 which_key = txrx_sec_ucast;
832 }
833 peer = msdu_info->peer;
834 /*
835 * msdu_info->htt.action.do_encrypt is initially set in ol_tx_desc_hl.
836 * Add more check here.
837 */
838 msdu_info->htt.action.do_encrypt = (!peer) ? 0 :
839 (peer->security[which_key].sec_type == htt_sec_type_none) ? 0 :
840 msdu_info->htt.action.do_encrypt;
841 /*
842 * For systems that have a frame by frame spec for whether to receive
843 * a tx completion notification, use the tx completion notification
844 * only for certain management frames, not for data frames.
845 * (In the future, this may be changed slightly, e.g. to request a
846 * tx completion notification for the final EAPOL message sent by a
847 * STA during the key delivery handshake.)
848 */
849 msdu_info->htt.action.do_tx_complete = 0;
850
851 return A_OK;
852}
853
854A_STATUS
855ol_tx_classify_mgmt_extension(
856 struct ol_txrx_vdev_t *vdev,
857 struct ol_tx_desc_t *tx_desc,
858 qdf_nbuf_t tx_msdu,
859 struct ol_txrx_msdu_info_t *msdu_info)
860{
861 struct ieee80211_frame *wh;
862
863 /*
864 * The following msdu_info fields were already filled in by the
865 * ol_tx entry function or the regular ol_tx_classify_mgmt function:
866 * htt.info.vdev_id (ol_txrx_mgmt_send)
867 * htt.info.frame_type (ol_txrx_mgmt_send)
868 * htt.info.l2_hdr_type (ol_txrx_mgmt_send)
869 * htt.action.do_tx_complete (ol_txrx_mgmt_send)
870 * htt.info.peer_id (ol_tx_classify_mgmt)
871 * htt.info.ext_tid (ol_tx_classify_mgmt)
872 * htt.info.is_unicast (ol_tx_classify_mgmt)
873 * peer (ol_tx_classify_mgmt)
874 * The following fields need to be filled in by this function:
875 * htt.info.frame_subtype
876 * htt.info.l3_hdr_offset
877 * htt.action.band (NOT CURRENTLY USED)
878 * The following fields are not needed for mgmt frames, and can
879 * be left uninitialized:
880 * htt.info.ethertype
881 * htt.action.do_encrypt
882 * (This will be filled in by other SW, which knows whether
883 * the peer has robust-managment-frames enabled.)
884 */
885 wh = (struct ieee80211_frame *)qdf_nbuf_data(tx_msdu);
886 msdu_info->htt.info.frame_subtype =
887 (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >>
888 IEEE80211_FC0_SUBTYPE_SHIFT;
889 msdu_info->htt.info.l3_hdr_offset = sizeof(struct ieee80211_frame);
890
891 return A_OK;
892}
893
894#endif /* defined(CONFIG_HL_SUPPORT) */