blob: aeb0ff02d2373e514e71e5b6396af8e9a8ae31fd [file] [log] [blame]
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301/*
Yu Wang053d3e72017-02-08 18:48:24 +08002 * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
29#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
30#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
31#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
32#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
33#include <ol_txrx.h>
34#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
35#include <ol_txrx_types.h> /* pdev stats */
36#include <ol_tx_desc.h> /* ol_tx_desc */
37#include <ol_tx_send.h> /* ol_tx_send */
38#include <ol_txrx_peer_find.h>
39#include <ol_tx_classify.h>
40#include <ol_tx_queue.h>
41#include <ipv4.h>
42#include <ipv6_defs.h>
43#include <ip_prot.h>
44#include <enet.h> /* ETHERTYPE_VLAN, etc. */
45#include <cds_ieee80211_common.h> /* ieee80211_frame */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080046#include <cdp_txrx_handle.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053047/*
48 * In theory, this tx classify code could be used on the host or in the target.
49 * Thus, this code uses generic OS primitives, that can be aliased to either
50 * the host's OS primitives or the target's OS primitives.
51 * For now, the following #defines set up these host-specific or
52 * target-specific aliases.
53 */
54
55#if defined(CONFIG_HL_SUPPORT)
56
57#define OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
58#define OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
59
60#ifdef QCA_TX_HTT2_SUPPORT
61static void
62ol_tx_classify_htt2_frm(
63 struct ol_txrx_vdev_t *vdev,
64 qdf_nbuf_t tx_nbuf,
65 struct ol_txrx_msdu_info_t *tx_msdu_info)
66{
67 struct htt_msdu_info_t *htt = &tx_msdu_info->htt;
68 A_UINT8 candi_frm = 0;
69
70 /*
71 * Offload the frame re-order to L3 protocol and ONLY support
72 * TCP protocol now.
73 */
74 if ((htt->info.l2_hdr_type == htt_pkt_type_ethernet) &&
75 (htt->info.frame_type == htt_frm_type_data) &&
76 htt->info.is_unicast &&
77 (htt->info.ethertype == ETHERTYPE_IPV4)) {
78 struct ipv4_hdr_t *ipHdr;
79
80 ipHdr = (struct ipv4_hdr_t *)(qdf_nbuf_data(tx_nbuf) +
81 htt->info.l3_hdr_offset);
82 if (ipHdr->protocol == IP_PROTOCOL_TCP)
83 candi_frm = 1;
84 }
85
86 qdf_nbuf_set_tx_parallel_dnload_frm(tx_nbuf, candi_frm);
87}
88
89#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info) \
90 ol_tx_classify_htt2_frm(vdev, netbuf, msdu_info);
91#else
92#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info) /* no-op */
93#endif /* QCA_TX_HTT2_SUPPORT */
94/* DHCP go with voice priority; WMM_AC_VO_TID1();*/
95#define TX_DHCP_TID 6
96
97#if defined(QCA_BAD_PEER_TX_FLOW_CL)
98static inline A_BOOL
99ol_if_tx_bad_peer_txq_overflow(
100 struct ol_txrx_pdev_t *pdev,
101 struct ol_txrx_peer_t *peer,
102 struct ol_tx_frms_queue_t *txq)
103{
104 if (peer && pdev && txq && (peer->tx_limit_flag) &&
105 (txq->frms >= pdev->tx_peer_bal.peer_bal_txq_limit))
106 return true;
107 else
108 return false;
109}
110#else
111static inline A_BOOL ol_if_tx_bad_peer_txq_overflow(
112 struct ol_txrx_pdev_t *pdev,
113 struct ol_txrx_peer_t *peer,
114 struct ol_tx_frms_queue_t *txq)
115{
116 return false;
117}
118#endif
119
120/* EAPOL go with voice priority: WMM_AC_TO_TID1(WMM_AC_VO);*/
121#define TX_EAPOL_TID 6
122
123/* ARP go with voice priority: WMM_AC_TO_TID1(pdev->arp_ac_override)*/
124#define TX_ARP_TID 6
125
126/* For non-IP case, use default TID */
127#define TX_DEFAULT_TID 0
128
129/*
130 * Determine IP TOS priority
131 * IP Tos format :
132 * (Refer Pg 57 WMM-test-plan-v1.2)
133 * IP-TOS - 8bits
134 * : DSCP(6-bits) ECN(2-bits)
135 * : DSCP - P2 P1 P0 X X X
136 * where (P2 P1 P0) form 802.1D
137 */
138static inline A_UINT8
139ol_tx_tid_by_ipv4(A_UINT8 *pkt)
140{
141 A_UINT8 ipPri, tid;
142 struct ipv4_hdr_t *ipHdr = (struct ipv4_hdr_t *)pkt;
143
144 ipPri = ipHdr->tos >> 5;
145 tid = ipPri & 0x7;
146
147 return tid;
148}
149
150static inline A_UINT8
151ol_tx_tid_by_ipv6(A_UINT8 *pkt)
152{
153 return (ipv6_traffic_class((struct ipv6_hdr_t *)pkt) >> 5) & 0x7;
154}
155
156static inline void
157ol_tx_set_ether_type(
158 A_UINT8 *datap,
159 struct ol_txrx_msdu_info_t *tx_msdu_info)
160{
161 A_UINT16 typeorlength;
162 A_UINT8 *ptr;
163 A_UINT8 *l3_data_ptr;
164
165 if (tx_msdu_info->htt.info.l2_hdr_type == htt_pkt_type_raw) {
166 /* adjust hdr_ptr to RA */
167 struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
168
169 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
170 IEEE80211_FC0_TYPE_DATA) {
171 struct llc_snap_hdr_t *llc;
172 /* dot11 encapsulated frame */
173 struct ieee80211_qosframe *whqos =
174 (struct ieee80211_qosframe *)datap;
175 if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
176 tx_msdu_info->htt.info.l3_hdr_offset =
177 sizeof(struct ieee80211_qosframe);
178 } else {
179 tx_msdu_info->htt.info.l3_hdr_offset =
180 sizeof(struct ieee80211_frame);
181 }
182 llc = (struct llc_snap_hdr_t *)
183 (datap + tx_msdu_info->htt.info.l3_hdr_offset);
184 tx_msdu_info->htt.info.ethertype =
185 (llc->ethertype[0] << 8) | llc->ethertype[1];
186 /*
187 * l3_hdr_offset refers to the end of the 802.3 or
188 * 802.11 header, which may be a LLC/SNAP header rather
189 * than the IP header.
190 * Thus, don't increment l3_hdr_offset += sizeof(*llc);
191 * rather,leave it as is.
192 */
193 } else {
194 /*
195 * This function should only be applied to data frames.
196 * For management frames, we already know to use
197 * HTT_TX_EXT_TID_MGMT.
198 */
199 TXRX_ASSERT2(0);
200 }
201 } else if (tx_msdu_info->htt.info.l2_hdr_type ==
202 htt_pkt_type_ethernet) {
203 ptr = (datap + ETHERNET_ADDR_LEN * 2);
204 typeorlength = (ptr[0] << 8) | ptr[1];
205 /*ETHERNET_HDR_LEN;*/
206 l3_data_ptr = datap + sizeof(struct ethernet_hdr_t);
207
208 if (typeorlength == ETHERTYPE_VLAN) {
209 ptr = (datap + ETHERNET_ADDR_LEN * 2
210 + ETHERTYPE_VLAN_LEN);
211 typeorlength = (ptr[0] << 8) | ptr[1];
212 l3_data_ptr += ETHERTYPE_VLAN_LEN;
213 }
214
215 if (!IS_ETHERTYPE(typeorlength)) {
216 /* 802.3 header*/
217 struct llc_snap_hdr_t *llc_hdr =
218 (struct llc_snap_hdr_t *)l3_data_ptr;
219 typeorlength = (llc_hdr->ethertype[0] << 8) |
220 llc_hdr->ethertype[1];
221 l3_data_ptr += sizeof(struct llc_snap_hdr_t);
222 }
223
224 tx_msdu_info->htt.info.l3_hdr_offset = (A_UINT8)(l3_data_ptr -
225 datap);
226 tx_msdu_info->htt.info.ethertype = typeorlength;
227 }
228}
229
230static inline A_UINT8
231ol_tx_tid_by_ether_type(
232 A_UINT8 *datap,
233 struct ol_txrx_msdu_info_t *tx_msdu_info)
234{
235 A_UINT8 tid;
236 A_UINT8 *l3_data_ptr;
237 A_UINT16 typeorlength;
238
239 l3_data_ptr = datap + tx_msdu_info->htt.info.l3_hdr_offset;
240 typeorlength = tx_msdu_info->htt.info.ethertype;
241
242 /* IP packet, do packet inspection for TID */
243 if (typeorlength == ETHERTYPE_IPV4) {
244 tid = ol_tx_tid_by_ipv4(l3_data_ptr);
245 } else if (typeorlength == ETHERTYPE_IPV6) {
246 tid = ol_tx_tid_by_ipv6(l3_data_ptr);
247 } else if (ETHERTYPE_IS_EAPOL_WAPI(typeorlength)) {
248 /* EAPOL go with voice priority*/
249 tid = TX_EAPOL_TID;
250 } else if (typeorlength == ETHERTYPE_ARP) {
251 tid = TX_ARP_TID;
252 } else {
253 /* For non-IP case, use default TID */
254 tid = TX_DEFAULT_TID;
255 }
256 return tid;
257}
258
259static inline A_UINT8
260ol_tx_tid_by_raw_type(
261 A_UINT8 *datap,
262 struct ol_txrx_msdu_info_t *tx_msdu_info)
263{
264 A_UINT8 tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
265
266 /* adjust hdr_ptr to RA */
267 struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
268
269 /* FIXME: This code does not handle 4 address formats. The QOS field
270 * is not at usual location.
271 */
272 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
273 IEEE80211_FC0_TYPE_DATA) {
274 /* dot11 encapsulated frame */
275 struct ieee80211_qosframe *whqos =
276 (struct ieee80211_qosframe *)datap;
277 if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
278 tid = whqos->i_qos[0] & IEEE80211_QOS_TID;
279 else
280 tid = HTT_NON_QOS_TID;
281 } else {
282 /*
283 * This function should only be applied to data frames.
284 * For management frames, we already know to use
285 * HTT_TX_EXT_TID_MGMT.
286 */
287 qdf_assert(0);
288 }
289 return tid;
290}
291
292static A_UINT8
293ol_tx_tid(
294 struct ol_txrx_pdev_t *pdev,
295 qdf_nbuf_t tx_nbuf,
296 struct ol_txrx_msdu_info_t *tx_msdu_info)
297{
298 A_UINT8 *datap = qdf_nbuf_data(tx_nbuf);
299 A_UINT8 tid;
300
301 if (pdev->frame_format == wlan_frm_fmt_raw) {
302 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_raw;
303
304 ol_tx_set_ether_type(datap, tx_msdu_info);
305 tid = tx_msdu_info->htt.info.ext_tid ==
306 QDF_NBUF_TX_EXT_TID_INVALID ?
307 ol_tx_tid_by_raw_type(datap, tx_msdu_info) :
308 tx_msdu_info->htt.info.ext_tid;
309 } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
310 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_ethernet;
311
312 ol_tx_set_ether_type(datap, tx_msdu_info);
313 tid =
314 tx_msdu_info->htt.info.ext_tid ==
315 QDF_NBUF_TX_EXT_TID_INVALID ?
316 ol_tx_tid_by_ether_type(datap, tx_msdu_info) :
317 tx_msdu_info->htt.info.ext_tid;
318 } else if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
319 struct llc_snap_hdr_t *llc;
320
321 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
322 tx_msdu_info->htt.info.l3_hdr_offset =
323 sizeof(struct ieee80211_frame);
324 llc = (struct llc_snap_hdr_t *)
325 (datap + tx_msdu_info->htt.info.l3_hdr_offset);
326 tx_msdu_info->htt.info.ethertype =
327 (llc->ethertype[0] << 8) | llc->ethertype[1];
328 /*
329 * Native WiFi is a special case of "raw" 802.11 header format.
330 * However, we expect that for all cases that use native WiFi,
331 * the TID will be directly specified out of band.
332 */
333 tid = tx_msdu_info->htt.info.ext_tid;
334 } else {
335 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
336 "Invalid standard frame type: %d\n",
337 pdev->frame_format);
338 qdf_assert(0);
339 tid = HTT_TX_EXT_TID_INVALID;
340 }
341 return tid;
342}
343
344#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
345static inline
346struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
347 struct ol_txrx_vdev_t *vdev,
348 uint8_t *peer_id)
349{
350 struct ol_txrx_peer_t *peer = NULL;
351
352 if (vdev->hlTdlsFlag) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800353 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530354 vdev->hl_tdls_ap_mac_addr.raw,
355 peer_id);
356 if (peer && (peer->peer_ids[0] == HTT_INVALID_PEER_ID)) {
357 peer = NULL;
358 } else {
359 if (peer)
360 qdf_atomic_inc(&peer->ref_cnt);
361 }
362 }
363 if (!peer)
364 peer = ol_txrx_assoc_peer_find(vdev);
365
366 return peer;
367}
368
369#else
370struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
371 struct ol_txrx_vdev_t *vdev,
372 uint8_t *peer_id)
373{
374 struct ol_txrx_peer_t *peer = NULL;
375 peer = ol_txrx_assoc_peer_find(vdev);
376
377 return peer;
378}
379
380
381#endif
382
383struct ol_tx_frms_queue_t *
384ol_tx_classify(
385 struct ol_txrx_vdev_t *vdev,
386 struct ol_tx_desc_t *tx_desc,
387 qdf_nbuf_t tx_nbuf,
388 struct ol_txrx_msdu_info_t *tx_msdu_info)
389{
390 struct ol_txrx_pdev_t *pdev = vdev->pdev;
391 struct ol_txrx_peer_t *peer = NULL;
392 struct ol_tx_frms_queue_t *txq = NULL;
393 A_UINT8 *dest_addr;
394 A_UINT8 tid;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530395 u_int8_t peer_id;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530396
397 TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
398 dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
399 if ((IEEE80211_IS_MULTICAST(dest_addr)) ||
400 (vdev->opmode == wlan_op_mode_ocb)) {
401 txq = &vdev->txqs[OL_TX_VDEV_MCAST_BCAST];
402 tx_msdu_info->htt.info.ext_tid =
403 HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
404 if (vdev->opmode == wlan_op_mode_sta) {
405 /*
406 * The STA sends a frame with a broadcast
407 * dest addr (DA) as a
408 * unicast frame to the AP's receive addr (RA).
409 * Find the peer object that represents the AP
410 * that the STA is associated with.
411 */
412 peer = ol_txrx_assoc_peer_find(vdev);
413 if (!peer) {
414 QDF_TRACE(QDF_MODULE_ID_TXRX,
415 QDF_TRACE_LEVEL_ERROR,
416 "Error: STA %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast DA tx data frame w/o association\n",
417 vdev,
418 vdev->mac_addr.raw[0],
419 vdev->mac_addr.raw[1],
420 vdev->mac_addr.raw[2],
421 vdev->mac_addr.raw[3],
422 vdev->mac_addr.raw[4],
423 vdev->mac_addr.raw[5]);
424 return NULL; /* error */
425 } else if ((peer->security[
426 OL_TXRX_PEER_SECURITY_MULTICAST].sec_type
427 != htt_sec_type_wapi) &&
428 (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530429 if (QDF_NBUF_CB_PACKET_TYPE_DHCP ==
430 QDF_NBUF_CB_GET_PACKET_TYPE(
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530431 tx_nbuf)) {
432 /* DHCP frame to go with
433 * voice priority
434 */
435 txq = &peer->txqs[TX_DHCP_TID];
436 tx_msdu_info->htt.info.ext_tid =
437 TX_DHCP_TID;
438 }
439 }
440 /*
441 * The following line assumes each peer object has a
442 * single ID. This is currently true, and is expected
443 * to remain true.
444 */
445 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
446 } else if (vdev->opmode == wlan_op_mode_ocb) {
447 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
448 /* In OCB mode, don't worry about the peer.
449 *We don't need it. */
450 peer = NULL;
451 } else {
452 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
453 /*
454 * Look up the vdev's BSS peer, so that the
455 * classify_extension function can check whether to
456 * encrypt multicast / broadcast frames.
457 */
458 peer = ol_txrx_peer_find_hash_find(pdev,
459 vdev->mac_addr.raw,
460 0, 1);
461 if (!peer) {
462 QDF_TRACE(QDF_MODULE_ID_TXRX,
463 QDF_TRACE_LEVEL_ERROR,
464 "Error: vdev %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast/mcast, but no self-peer found\n",
465 vdev,
466 vdev->mac_addr.raw[0],
467 vdev->mac_addr.raw[1],
468 vdev->mac_addr.raw[2],
469 vdev->mac_addr.raw[3],
470 vdev->mac_addr.raw[4],
471 vdev->mac_addr.raw[5]);
472 return NULL; /* error */
473 }
474 }
475 tx_msdu_info->htt.info.is_unicast = false;
476 } else {
477 /* tid would be overwritten for non QoS case*/
478 tid = ol_tx_tid(pdev, tx_nbuf, tx_msdu_info);
479 if ((HTT_TX_EXT_TID_INVALID == tid) ||
480 (tid >= OL_TX_NUM_TIDS)) {
481 QDF_TRACE(QDF_MODULE_ID_TXRX,
482 QDF_TRACE_LEVEL_ERROR,
483 "%s Error: could not classify packet into valid TID(%d).\n",
484 __func__, tid);
485 return NULL;
486 }
487#ifdef ATH_SUPPORT_WAPI
488 /* Check to see if a frame is a WAI frame */
489 if (tx_msdu_info->htt.info.ethertype == ETHERTYPE_WAI) {
490 /* WAI frames should not be encrypted */
491 tx_msdu_info->htt.action.do_encrypt = 0;
492 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
493 "Tx Frame is a WAI frame\n");
494 }
495#endif /* ATH_SUPPORT_WAPI */
496
497 /*
498 * Find the peer and increment its reference count.
499 * If this vdev is an AP, use the dest addr (DA) to determine
500 * which peer STA this unicast data frame is for.
501 * If this vdev is a STA, the unicast data frame is for the
502 * AP the STA is associated with.
503 */
504 if (vdev->opmode == wlan_op_mode_sta) {
505 /*
506 * TO DO:
507 * To support TDLS, first check if there is a TDLS
508 * peer STA,
509 * and if so, check if the DA matches the TDLS peer
510 * STA's MAC address. If there is no peer TDLS STA,
511 * or if the DA is not the TDLS STA's address,
512 * then the frame is either for the AP itself, or is
513 * supposed to be sent to the AP for forwarding.
514 */
515#if 0
516 if (vdev->num_tdls_peers > 0) {
517 peer = NULL;
518 for (i = 0; i < vdev->num_tdls_peers; i++) {
519 int differs = adf_os_mem_cmp(
520 vdev->tdls_peers[i]->
521 mac_addr.raw,
522 dest_addr,
523 OL_TXRX_MAC_ADDR_LEN);
524 if (!differs) {
525 peer = vdev->tdls_peers[i];
526 break;
527 }
528 }
529 } else {
530 /* send to AP */
531 peer = ol_txrx_assoc_peer_find(vdev);
532 }
533#endif
534
535 peer = ol_tx_tdls_peer_find(pdev, vdev, &peer_id);
536 } else {
537 peer = ol_txrx_peer_find_hash_find(pdev, dest_addr,
538 0, 1);
539 }
540 tx_msdu_info->htt.info.is_unicast = true;
541 if (!peer) {
542 /*
543 * Unicast data xfer can only happen to an
544 * associated peer. It is illegitimate to send unicast
545 * data if there is no peer to send it to.
546 */
547 QDF_TRACE(QDF_MODULE_ID_TXRX,
548 QDF_TRACE_LEVEL_ERROR,
549 "Error: vdev %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send unicast tx data frame to an unknown peer\n",
550 vdev,
551 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
552 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
553 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
554 return NULL; /* error */
555 }
556 TX_SCHED_DEBUG_PRINT("Peer found\n");
557 if (!peer->qos_capable) {
558 tid = OL_TX_NON_QOS_TID;
559 } else if ((peer->security[
560 OL_TXRX_PEER_SECURITY_UNICAST].sec_type
561 != htt_sec_type_wapi) &&
562 (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530563 if (QDF_NBUF_CB_PACKET_TYPE_DHCP ==
564 QDF_NBUF_CB_GET_PACKET_TYPE(tx_nbuf))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530565 /* DHCP frame to go with voice priority */
566 tid = TX_DHCP_TID;
567 }
568
569 /* Only allow encryption when in authenticated state */
570 if (OL_TXRX_PEER_STATE_AUTH != peer->state)
571 tx_msdu_info->htt.action.do_encrypt = 0;
572
573 txq = &peer->txqs[tid];
574 tx_msdu_info->htt.info.ext_tid = tid;
575 /*
576 * The following line assumes each peer object has a single ID.
577 * This is currently true, and is expected to remain true.
578 */
579 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
580 /*
581 * WORKAROUND - check that the peer ID is valid.
582 * If tx data is provided before ol_rx_peer_map_handler is
583 * called to record the peer ID specified by the target,
584 * then we could end up here with an invalid peer ID.
585 * TO DO: rather than dropping the tx frame, pause the txq it
586 * goes into, then fill in the peer ID for the entries in the
587 * txq when the peer_map event provides the peer ID, and then
588 * unpause the txq.
589 */
590 if (tx_msdu_info->htt.info.peer_id == HTT_INVALID_PEER_ID) {
591 if (peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530592 ol_txrx_info(
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530593 "%s: remove the peer for invalid peer_id %p\n",
594 __func__, peer);
595 /* remove the peer reference added above */
596 ol_txrx_peer_unref_delete(peer);
597 tx_msdu_info->peer = NULL;
598 }
599 return NULL;
600 }
601 }
602 tx_msdu_info->peer = peer;
603 if (ol_if_tx_bad_peer_txq_overflow(pdev, peer, txq))
604 return NULL;
605 /*
606 * If relevant, do a deeper inspection to determine additional
607 * characteristics of the tx frame.
608 * If the frame is invalid, then the txq will be set to NULL to
609 * indicate an error.
610 */
611 OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, tx_nbuf, tx_msdu_info, txq);
612 if (IEEE80211_IS_MULTICAST(dest_addr) && vdev->opmode !=
613 wlan_op_mode_sta && tx_msdu_info->peer !=
614 NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530615 ol_txrx_dbg(
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530616 "%s: remove the peer reference %p\n",
617 __func__, peer);
618 /* remove the peer reference added above */
619 ol_txrx_peer_unref_delete(tx_msdu_info->peer);
620 /* Making peer NULL in case if multicast non STA mode */
621 tx_msdu_info->peer = NULL;
622 }
623
624 /* Whether this frame can download though HTT2 data pipe or not. */
625 OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
626
627 /* Update Tx Queue info */
628 tx_desc->txq = txq;
629
630 TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
631 return txq;
632}
633
634struct ol_tx_frms_queue_t *
635ol_tx_classify_mgmt(
636 struct ol_txrx_vdev_t *vdev,
637 struct ol_tx_desc_t *tx_desc,
638 qdf_nbuf_t tx_nbuf,
639 struct ol_txrx_msdu_info_t *tx_msdu_info)
640{
641 struct ol_txrx_pdev_t *pdev = vdev->pdev;
642 struct ol_txrx_peer_t *peer = NULL;
643 struct ol_tx_frms_queue_t *txq = NULL;
644 A_UINT8 *dest_addr;
645 union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
646
647 TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
648 dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
649 if (IEEE80211_IS_MULTICAST(dest_addr)) {
650 /*
651 * AP: beacons are broadcast,
652 * public action frames (e.g. extended channel
653 * switch announce) may be broadcast
654 * STA: probe requests can be either broadcast or unicast
655 */
656 txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
657 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
658 tx_msdu_info->peer = NULL;
659 tx_msdu_info->htt.info.is_unicast = 0;
660 } else {
661 /*
662 * Find the peer and increment its reference count.
663 * If this vdev is an AP, use the receiver addr (RA) to
664 * determine which peer STA this unicast mgmt frame is for.
665 * If this vdev is a STA, the unicast mgmt frame is for the
666 * AP the STA is associated with.
667 * Probe request / response and Assoc request / response are
668 * sent before the peer exists - in this case, use the
669 * vdev's default tx queue.
670 */
671 if (vdev->opmode == wlan_op_mode_sta) {
672 /*
673 * TO DO:
674 * To support TDLS, first check if there is a TDLS
675 * peer STA, and if so, check if the DA matches
676 * the TDLS peer STA's MAC address.
677 */
678 peer = ol_txrx_assoc_peer_find(vdev);
679 /*
680 * Some special case(preauth for example) needs to send
681 * unicast mgmt frame to unassociated AP. In such case,
682 * we need to check if dest addr match the associated
683 * peer addr. If not, we set peer as NULL to queue this
684 * frame to vdev queue.
685 */
686 if (peer) {
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -0800687 int rcnt;
688
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530689 qdf_mem_copy(
690 &local_mac_addr_aligned.raw[0],
691 dest_addr, OL_TXRX_MAC_ADDR_LEN);
692 mac_addr = &local_mac_addr_aligned;
693 if (ol_txrx_peer_find_mac_addr_cmp(
694 mac_addr,
695 &peer->mac_addr) != 0) {
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -0800696 rcnt = ol_txrx_peer_unref_delete(peer);
Mohit Khanna47384bc2016-08-15 15:37:05 -0700697 QDF_TRACE(QDF_MODULE_ID_TXRX,
698 QDF_TRACE_LEVEL_INFO_HIGH,
699 "%s: peer %p peer->ref_cnt %d",
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -0800700 __func__, peer, rcnt);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530701 peer = NULL;
702 }
703 }
704 } else {
705 /* find the peer and increment its reference count */
706 peer = ol_txrx_peer_find_hash_find(pdev, dest_addr,
707 0, 1);
708 }
709 tx_msdu_info->peer = peer;
710 if (!peer) {
711 txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
712 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
713 } else {
714 txq = &peer->txqs[HTT_TX_EXT_TID_MGMT];
715 tx_msdu_info->htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
716 /*
717 * The following line assumes each peer object has a
718 * single ID. This is currently true, and is expected
719 * to remain true.
720 */
721 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
722 }
723 tx_msdu_info->htt.info.is_unicast = 1;
724 }
725 /*
726 * If relevant, do a deeper inspection to determine additional
727 * characteristics of the tx frame.
728 * If the frame is invalid, then the txq will be set to NULL to
729 * indicate an error.
730 */
731 OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, tx_nbuf,
732 tx_msdu_info, txq);
733
734 /* Whether this frame can download though HTT2 data pipe or not. */
735 OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
736
737 /* Update Tx Queue info */
738 tx_desc->txq = txq;
739
740 TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
741 return txq;
742}
743
744A_STATUS
745ol_tx_classify_extension(
746 struct ol_txrx_vdev_t *vdev,
747 struct ol_tx_desc_t *tx_desc,
748 qdf_nbuf_t tx_msdu,
749 struct ol_txrx_msdu_info_t *msdu_info)
750{
751 A_UINT8 *datap = qdf_nbuf_data(tx_msdu);
752 struct ol_txrx_peer_t *peer;
753 int which_key;
754
755 /*
756 * The following msdu_info fields were already filled in by the
757 * ol_tx entry function or the regular ol_tx_classify function:
758 * htt.info.vdev_id (ol_tx_hl or ol_tx_non_std_hl)
759 * htt.info.ext_tid (ol_tx_non_std_hl or ol_tx_classify)
760 * htt.info.frame_type (ol_tx_hl or ol_tx_non_std_hl)
761 * htt.info.l2_hdr_type (ol_tx_hl or ol_tx_non_std_hl)
762 * htt.info.is_unicast (ol_tx_classify)
763 * htt.info.peer_id (ol_tx_classify)
764 * peer (ol_tx_classify)
765 * if (is_unicast) {
766 * htt.info.ethertype (ol_tx_classify)
767 * htt.info.l3_hdr_offset (ol_tx_classify)
768 * }
769 * The following fields need to be filled in by this function:
770 * if (!is_unicast) {
771 * htt.info.ethertype
772 * htt.info.l3_hdr_offset
773 * }
774 * htt.action.band (NOT CURRENTLY USED)
775 * htt.action.do_encrypt
776 * htt.action.do_tx_complete
777 * The following fields are not needed for data frames, and can
778 * be left uninitialized:
779 * htt.info.frame_subtype
780 */
781
782 if (!msdu_info->htt.info.is_unicast) {
783 int l2_hdr_size;
784 A_UINT16 ethertype;
785
786 if (msdu_info->htt.info.l2_hdr_type == htt_pkt_type_ethernet) {
787 struct ethernet_hdr_t *eh;
788
789 eh = (struct ethernet_hdr_t *)datap;
790 l2_hdr_size = sizeof(*eh);
791 ethertype = (eh->ethertype[0] << 8) | eh->ethertype[1];
792
793 if (ethertype == ETHERTYPE_VLAN) {
794 struct ethernet_vlan_hdr_t *evh;
795
796 evh = (struct ethernet_vlan_hdr_t *)datap;
797 l2_hdr_size = sizeof(*evh);
798 ethertype = (evh->ethertype[0] << 8) |
799 evh->ethertype[1];
800 }
801
802 if (!IS_ETHERTYPE(ethertype)) {
803 /* 802.3 header*/
804 struct llc_snap_hdr_t *llc =
805 (struct llc_snap_hdr_t *)(datap +
806 l2_hdr_size);
807 ethertype = (llc->ethertype[0] << 8) |
808 llc->ethertype[1];
809 l2_hdr_size += sizeof(*llc);
810 }
811 msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
812 msdu_info->htt.info.ethertype = ethertype;
813 } else { /* 802.11 */
814 struct llc_snap_hdr_t *llc;
815 l2_hdr_size = ol_txrx_ieee80211_hdrsize(datap);
816 llc = (struct llc_snap_hdr_t *)(datap + l2_hdr_size);
817 ethertype = (llc->ethertype[0] << 8) |
818 llc->ethertype[1];
819 /*
820 * Don't include the LLC/SNAP header in l2_hdr_size,
821 * because l3_hdr_offset is actually supposed to refer
822 * to the header after the 802.3 or 802.11 header,
823 * which could be a LLC/SNAP header rather
824 * than the L3 header.
825 */
826 }
827 msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
828 msdu_info->htt.info.ethertype = ethertype;
829 which_key = txrx_sec_mcast;
830 } else {
831 which_key = txrx_sec_ucast;
832 }
833 peer = msdu_info->peer;
834 /*
835 * msdu_info->htt.action.do_encrypt is initially set in ol_tx_desc_hl.
836 * Add more check here.
837 */
838 msdu_info->htt.action.do_encrypt = (!peer) ? 0 :
839 (peer->security[which_key].sec_type == htt_sec_type_none) ? 0 :
840 msdu_info->htt.action.do_encrypt;
841 /*
842 * For systems that have a frame by frame spec for whether to receive
843 * a tx completion notification, use the tx completion notification
844 * only for certain management frames, not for data frames.
845 * (In the future, this may be changed slightly, e.g. to request a
846 * tx completion notification for the final EAPOL message sent by a
847 * STA during the key delivery handshake.)
848 */
849 msdu_info->htt.action.do_tx_complete = 0;
850
851 return A_OK;
852}
853
854A_STATUS
855ol_tx_classify_mgmt_extension(
856 struct ol_txrx_vdev_t *vdev,
857 struct ol_tx_desc_t *tx_desc,
858 qdf_nbuf_t tx_msdu,
859 struct ol_txrx_msdu_info_t *msdu_info)
860{
861 struct ieee80211_frame *wh;
862
863 /*
864 * The following msdu_info fields were already filled in by the
865 * ol_tx entry function or the regular ol_tx_classify_mgmt function:
866 * htt.info.vdev_id (ol_txrx_mgmt_send)
867 * htt.info.frame_type (ol_txrx_mgmt_send)
868 * htt.info.l2_hdr_type (ol_txrx_mgmt_send)
869 * htt.action.do_tx_complete (ol_txrx_mgmt_send)
870 * htt.info.peer_id (ol_tx_classify_mgmt)
871 * htt.info.ext_tid (ol_tx_classify_mgmt)
872 * htt.info.is_unicast (ol_tx_classify_mgmt)
873 * peer (ol_tx_classify_mgmt)
874 * The following fields need to be filled in by this function:
875 * htt.info.frame_subtype
876 * htt.info.l3_hdr_offset
877 * htt.action.band (NOT CURRENTLY USED)
878 * The following fields are not needed for mgmt frames, and can
879 * be left uninitialized:
880 * htt.info.ethertype
881 * htt.action.do_encrypt
882 * (This will be filled in by other SW, which knows whether
883 * the peer has robust-managment-frames enabled.)
884 */
885 wh = (struct ieee80211_frame *)qdf_nbuf_data(tx_msdu);
886 msdu_info->htt.info.frame_subtype =
887 (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >>
888 IEEE80211_FC0_SUBTYPE_SHIFT;
889 msdu_info->htt.info.l3_hdr_offset = sizeof(struct ieee80211_frame);
890
891 return A_OK;
892}
893
894#endif /* defined(CONFIG_HL_SUPPORT) */