blob: ec1e845570683835310de86fcdee0b031451c7e8 [file] [log] [blame]
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301/*
Yu Wang053d3e72017-02-08 18:48:24 +08002 * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
29#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
30#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
31#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
32#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
33#include <ol_txrx.h>
34#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
35#include <ol_txrx_types.h> /* pdev stats */
36#include <ol_tx_desc.h> /* ol_tx_desc */
37#include <ol_tx_send.h> /* ol_tx_send */
38#include <ol_txrx_peer_find.h>
39#include <ol_tx_classify.h>
40#include <ol_tx_queue.h>
41#include <ipv4.h>
42#include <ipv6_defs.h>
43#include <ip_prot.h>
44#include <enet.h> /* ETHERTYPE_VLAN, etc. */
45#include <cds_ieee80211_common.h> /* ieee80211_frame */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080046#include <cdp_txrx_handle.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053047/*
48 * In theory, this tx classify code could be used on the host or in the target.
49 * Thus, this code uses generic OS primitives, that can be aliased to either
50 * the host's OS primitives or the target's OS primitives.
51 * For now, the following #defines set up these host-specific or
52 * target-specific aliases.
53 */
54
55#if defined(CONFIG_HL_SUPPORT)
56
57#define OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
58#define OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
59
60#ifdef QCA_TX_HTT2_SUPPORT
61static void
62ol_tx_classify_htt2_frm(
63 struct ol_txrx_vdev_t *vdev,
64 qdf_nbuf_t tx_nbuf,
65 struct ol_txrx_msdu_info_t *tx_msdu_info)
66{
67 struct htt_msdu_info_t *htt = &tx_msdu_info->htt;
68 A_UINT8 candi_frm = 0;
69
70 /*
71 * Offload the frame re-order to L3 protocol and ONLY support
72 * TCP protocol now.
73 */
74 if ((htt->info.l2_hdr_type == htt_pkt_type_ethernet) &&
75 (htt->info.frame_type == htt_frm_type_data) &&
76 htt->info.is_unicast &&
77 (htt->info.ethertype == ETHERTYPE_IPV4)) {
78 struct ipv4_hdr_t *ipHdr;
79
80 ipHdr = (struct ipv4_hdr_t *)(qdf_nbuf_data(tx_nbuf) +
81 htt->info.l3_hdr_offset);
82 if (ipHdr->protocol == IP_PROTOCOL_TCP)
83 candi_frm = 1;
84 }
85
86 qdf_nbuf_set_tx_parallel_dnload_frm(tx_nbuf, candi_frm);
87}
88
89#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info) \
Yun Park04097e82017-04-05 13:59:13 -070090 ol_tx_classify_htt2_frm(vdev, netbuf, msdu_info)
Siddarth Poddarb2011f62016-04-27 20:45:42 +053091#else
92#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info) /* no-op */
93#endif /* QCA_TX_HTT2_SUPPORT */
94/* DHCP go with voice priority; WMM_AC_VO_TID1();*/
95#define TX_DHCP_TID 6
96
97#if defined(QCA_BAD_PEER_TX_FLOW_CL)
98static inline A_BOOL
99ol_if_tx_bad_peer_txq_overflow(
100 struct ol_txrx_pdev_t *pdev,
101 struct ol_txrx_peer_t *peer,
102 struct ol_tx_frms_queue_t *txq)
103{
104 if (peer && pdev && txq && (peer->tx_limit_flag) &&
105 (txq->frms >= pdev->tx_peer_bal.peer_bal_txq_limit))
106 return true;
107 else
108 return false;
109}
110#else
111static inline A_BOOL ol_if_tx_bad_peer_txq_overflow(
112 struct ol_txrx_pdev_t *pdev,
113 struct ol_txrx_peer_t *peer,
114 struct ol_tx_frms_queue_t *txq)
115{
116 return false;
117}
118#endif
119
120/* EAPOL go with voice priority: WMM_AC_TO_TID1(WMM_AC_VO);*/
121#define TX_EAPOL_TID 6
122
123/* ARP go with voice priority: WMM_AC_TO_TID1(pdev->arp_ac_override)*/
124#define TX_ARP_TID 6
125
126/* For non-IP case, use default TID */
127#define TX_DEFAULT_TID 0
128
129/*
130 * Determine IP TOS priority
131 * IP Tos format :
132 * (Refer Pg 57 WMM-test-plan-v1.2)
133 * IP-TOS - 8bits
134 * : DSCP(6-bits) ECN(2-bits)
135 * : DSCP - P2 P1 P0 X X X
136 * where (P2 P1 P0) form 802.1D
137 */
138static inline A_UINT8
139ol_tx_tid_by_ipv4(A_UINT8 *pkt)
140{
141 A_UINT8 ipPri, tid;
142 struct ipv4_hdr_t *ipHdr = (struct ipv4_hdr_t *)pkt;
143
144 ipPri = ipHdr->tos >> 5;
145 tid = ipPri & 0x7;
146
147 return tid;
148}
149
150static inline A_UINT8
151ol_tx_tid_by_ipv6(A_UINT8 *pkt)
152{
153 return (ipv6_traffic_class((struct ipv6_hdr_t *)pkt) >> 5) & 0x7;
154}
155
156static inline void
157ol_tx_set_ether_type(
158 A_UINT8 *datap,
159 struct ol_txrx_msdu_info_t *tx_msdu_info)
160{
161 A_UINT16 typeorlength;
162 A_UINT8 *ptr;
163 A_UINT8 *l3_data_ptr;
164
165 if (tx_msdu_info->htt.info.l2_hdr_type == htt_pkt_type_raw) {
166 /* adjust hdr_ptr to RA */
167 struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
168
169 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
170 IEEE80211_FC0_TYPE_DATA) {
171 struct llc_snap_hdr_t *llc;
172 /* dot11 encapsulated frame */
173 struct ieee80211_qosframe *whqos =
174 (struct ieee80211_qosframe *)datap;
175 if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
176 tx_msdu_info->htt.info.l3_hdr_offset =
177 sizeof(struct ieee80211_qosframe);
178 } else {
179 tx_msdu_info->htt.info.l3_hdr_offset =
180 sizeof(struct ieee80211_frame);
181 }
182 llc = (struct llc_snap_hdr_t *)
183 (datap + tx_msdu_info->htt.info.l3_hdr_offset);
184 tx_msdu_info->htt.info.ethertype =
185 (llc->ethertype[0] << 8) | llc->ethertype[1];
186 /*
187 * l3_hdr_offset refers to the end of the 802.3 or
188 * 802.11 header, which may be a LLC/SNAP header rather
189 * than the IP header.
190 * Thus, don't increment l3_hdr_offset += sizeof(*llc);
191 * rather,leave it as is.
192 */
193 } else {
194 /*
195 * This function should only be applied to data frames.
196 * For management frames, we already know to use
197 * HTT_TX_EXT_TID_MGMT.
198 */
199 TXRX_ASSERT2(0);
200 }
201 } else if (tx_msdu_info->htt.info.l2_hdr_type ==
202 htt_pkt_type_ethernet) {
203 ptr = (datap + ETHERNET_ADDR_LEN * 2);
204 typeorlength = (ptr[0] << 8) | ptr[1];
205 /*ETHERNET_HDR_LEN;*/
206 l3_data_ptr = datap + sizeof(struct ethernet_hdr_t);
207
208 if (typeorlength == ETHERTYPE_VLAN) {
209 ptr = (datap + ETHERNET_ADDR_LEN * 2
210 + ETHERTYPE_VLAN_LEN);
211 typeorlength = (ptr[0] << 8) | ptr[1];
212 l3_data_ptr += ETHERTYPE_VLAN_LEN;
213 }
214
215 if (!IS_ETHERTYPE(typeorlength)) {
216 /* 802.3 header*/
217 struct llc_snap_hdr_t *llc_hdr =
218 (struct llc_snap_hdr_t *)l3_data_ptr;
219 typeorlength = (llc_hdr->ethertype[0] << 8) |
220 llc_hdr->ethertype[1];
221 l3_data_ptr += sizeof(struct llc_snap_hdr_t);
222 }
223
224 tx_msdu_info->htt.info.l3_hdr_offset = (A_UINT8)(l3_data_ptr -
225 datap);
226 tx_msdu_info->htt.info.ethertype = typeorlength;
227 }
228}
229
230static inline A_UINT8
231ol_tx_tid_by_ether_type(
232 A_UINT8 *datap,
233 struct ol_txrx_msdu_info_t *tx_msdu_info)
234{
235 A_UINT8 tid;
236 A_UINT8 *l3_data_ptr;
237 A_UINT16 typeorlength;
238
239 l3_data_ptr = datap + tx_msdu_info->htt.info.l3_hdr_offset;
240 typeorlength = tx_msdu_info->htt.info.ethertype;
241
242 /* IP packet, do packet inspection for TID */
243 if (typeorlength == ETHERTYPE_IPV4) {
244 tid = ol_tx_tid_by_ipv4(l3_data_ptr);
245 } else if (typeorlength == ETHERTYPE_IPV6) {
246 tid = ol_tx_tid_by_ipv6(l3_data_ptr);
247 } else if (ETHERTYPE_IS_EAPOL_WAPI(typeorlength)) {
248 /* EAPOL go with voice priority*/
249 tid = TX_EAPOL_TID;
250 } else if (typeorlength == ETHERTYPE_ARP) {
251 tid = TX_ARP_TID;
252 } else {
253 /* For non-IP case, use default TID */
254 tid = TX_DEFAULT_TID;
255 }
256 return tid;
257}
258
259static inline A_UINT8
260ol_tx_tid_by_raw_type(
261 A_UINT8 *datap,
262 struct ol_txrx_msdu_info_t *tx_msdu_info)
263{
264 A_UINT8 tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
265
266 /* adjust hdr_ptr to RA */
267 struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
268
269 /* FIXME: This code does not handle 4 address formats. The QOS field
270 * is not at usual location.
271 */
272 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
273 IEEE80211_FC0_TYPE_DATA) {
274 /* dot11 encapsulated frame */
275 struct ieee80211_qosframe *whqos =
276 (struct ieee80211_qosframe *)datap;
277 if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
278 tid = whqos->i_qos[0] & IEEE80211_QOS_TID;
279 else
280 tid = HTT_NON_QOS_TID;
281 } else {
282 /*
283 * This function should only be applied to data frames.
284 * For management frames, we already know to use
285 * HTT_TX_EXT_TID_MGMT.
286 */
287 qdf_assert(0);
288 }
289 return tid;
290}
291
292static A_UINT8
293ol_tx_tid(
294 struct ol_txrx_pdev_t *pdev,
295 qdf_nbuf_t tx_nbuf,
296 struct ol_txrx_msdu_info_t *tx_msdu_info)
297{
298 A_UINT8 *datap = qdf_nbuf_data(tx_nbuf);
299 A_UINT8 tid;
300
301 if (pdev->frame_format == wlan_frm_fmt_raw) {
302 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_raw;
303
304 ol_tx_set_ether_type(datap, tx_msdu_info);
305 tid = tx_msdu_info->htt.info.ext_tid ==
306 QDF_NBUF_TX_EXT_TID_INVALID ?
307 ol_tx_tid_by_raw_type(datap, tx_msdu_info) :
308 tx_msdu_info->htt.info.ext_tid;
309 } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
310 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_ethernet;
311
312 ol_tx_set_ether_type(datap, tx_msdu_info);
313 tid =
314 tx_msdu_info->htt.info.ext_tid ==
315 QDF_NBUF_TX_EXT_TID_INVALID ?
316 ol_tx_tid_by_ether_type(datap, tx_msdu_info) :
317 tx_msdu_info->htt.info.ext_tid;
318 } else if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
319 struct llc_snap_hdr_t *llc;
320
321 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
322 tx_msdu_info->htt.info.l3_hdr_offset =
323 sizeof(struct ieee80211_frame);
324 llc = (struct llc_snap_hdr_t *)
325 (datap + tx_msdu_info->htt.info.l3_hdr_offset);
326 tx_msdu_info->htt.info.ethertype =
327 (llc->ethertype[0] << 8) | llc->ethertype[1];
328 /*
329 * Native WiFi is a special case of "raw" 802.11 header format.
330 * However, we expect that for all cases that use native WiFi,
331 * the TID will be directly specified out of band.
332 */
333 tid = tx_msdu_info->htt.info.ext_tid;
334 } else {
335 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
336 "Invalid standard frame type: %d\n",
337 pdev->frame_format);
338 qdf_assert(0);
339 tid = HTT_TX_EXT_TID_INVALID;
340 }
341 return tid;
342}
343
344#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
345static inline
346struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
347 struct ol_txrx_vdev_t *vdev,
348 uint8_t *peer_id)
349{
350 struct ol_txrx_peer_t *peer = NULL;
351
352 if (vdev->hlTdlsFlag) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800353 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530354 vdev->hl_tdls_ap_mac_addr.raw,
355 peer_id);
356 if (peer && (peer->peer_ids[0] == HTT_INVALID_PEER_ID)) {
357 peer = NULL;
358 } else {
359 if (peer)
360 qdf_atomic_inc(&peer->ref_cnt);
361 }
362 }
363 if (!peer)
364 peer = ol_txrx_assoc_peer_find(vdev);
365
366 return peer;
367}
368
369#else
370struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
371 struct ol_txrx_vdev_t *vdev,
372 uint8_t *peer_id)
373{
374 struct ol_txrx_peer_t *peer = NULL;
Yun Park04097e82017-04-05 13:59:13 -0700375
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530376 peer = ol_txrx_assoc_peer_find(vdev);
377
378 return peer;
379}
380
381
382#endif
383
384struct ol_tx_frms_queue_t *
385ol_tx_classify(
386 struct ol_txrx_vdev_t *vdev,
387 struct ol_tx_desc_t *tx_desc,
388 qdf_nbuf_t tx_nbuf,
389 struct ol_txrx_msdu_info_t *tx_msdu_info)
390{
391 struct ol_txrx_pdev_t *pdev = vdev->pdev;
392 struct ol_txrx_peer_t *peer = NULL;
393 struct ol_tx_frms_queue_t *txq = NULL;
394 A_UINT8 *dest_addr;
395 A_UINT8 tid;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530396 u_int8_t peer_id;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530397
398 TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
399 dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
400 if ((IEEE80211_IS_MULTICAST(dest_addr)) ||
401 (vdev->opmode == wlan_op_mode_ocb)) {
402 txq = &vdev->txqs[OL_TX_VDEV_MCAST_BCAST];
403 tx_msdu_info->htt.info.ext_tid =
404 HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
405 if (vdev->opmode == wlan_op_mode_sta) {
406 /*
407 * The STA sends a frame with a broadcast
408 * dest addr (DA) as a
409 * unicast frame to the AP's receive addr (RA).
410 * Find the peer object that represents the AP
411 * that the STA is associated with.
412 */
413 peer = ol_txrx_assoc_peer_find(vdev);
414 if (!peer) {
415 QDF_TRACE(QDF_MODULE_ID_TXRX,
416 QDF_TRACE_LEVEL_ERROR,
417 "Error: STA %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast DA tx data frame w/o association\n",
418 vdev,
419 vdev->mac_addr.raw[0],
420 vdev->mac_addr.raw[1],
421 vdev->mac_addr.raw[2],
422 vdev->mac_addr.raw[3],
423 vdev->mac_addr.raw[4],
424 vdev->mac_addr.raw[5]);
425 return NULL; /* error */
426 } else if ((peer->security[
427 OL_TXRX_PEER_SECURITY_MULTICAST].sec_type
428 != htt_sec_type_wapi) &&
429 (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530430 if (QDF_NBUF_CB_PACKET_TYPE_DHCP ==
431 QDF_NBUF_CB_GET_PACKET_TYPE(
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530432 tx_nbuf)) {
433 /* DHCP frame to go with
434 * voice priority
435 */
436 txq = &peer->txqs[TX_DHCP_TID];
437 tx_msdu_info->htt.info.ext_tid =
438 TX_DHCP_TID;
439 }
440 }
441 /*
442 * The following line assumes each peer object has a
443 * single ID. This is currently true, and is expected
444 * to remain true.
445 */
446 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
447 } else if (vdev->opmode == wlan_op_mode_ocb) {
448 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
Yun Park04097e82017-04-05 13:59:13 -0700449 /*
450 * In OCB mode, don't worry about the peer.
451 * We don't need it.
452 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530453 peer = NULL;
454 } else {
455 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
456 /*
457 * Look up the vdev's BSS peer, so that the
458 * classify_extension function can check whether to
459 * encrypt multicast / broadcast frames.
460 */
461 peer = ol_txrx_peer_find_hash_find(pdev,
462 vdev->mac_addr.raw,
463 0, 1);
464 if (!peer) {
465 QDF_TRACE(QDF_MODULE_ID_TXRX,
466 QDF_TRACE_LEVEL_ERROR,
467 "Error: vdev %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast/mcast, but no self-peer found\n",
468 vdev,
469 vdev->mac_addr.raw[0],
470 vdev->mac_addr.raw[1],
471 vdev->mac_addr.raw[2],
472 vdev->mac_addr.raw[3],
473 vdev->mac_addr.raw[4],
474 vdev->mac_addr.raw[5]);
475 return NULL; /* error */
476 }
477 }
478 tx_msdu_info->htt.info.is_unicast = false;
479 } else {
480 /* tid would be overwritten for non QoS case*/
481 tid = ol_tx_tid(pdev, tx_nbuf, tx_msdu_info);
482 if ((HTT_TX_EXT_TID_INVALID == tid) ||
483 (tid >= OL_TX_NUM_TIDS)) {
484 QDF_TRACE(QDF_MODULE_ID_TXRX,
485 QDF_TRACE_LEVEL_ERROR,
486 "%s Error: could not classify packet into valid TID(%d).\n",
487 __func__, tid);
488 return NULL;
489 }
490#ifdef ATH_SUPPORT_WAPI
491 /* Check to see if a frame is a WAI frame */
492 if (tx_msdu_info->htt.info.ethertype == ETHERTYPE_WAI) {
493 /* WAI frames should not be encrypted */
494 tx_msdu_info->htt.action.do_encrypt = 0;
495 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
496 "Tx Frame is a WAI frame\n");
497 }
498#endif /* ATH_SUPPORT_WAPI */
499
500 /*
501 * Find the peer and increment its reference count.
502 * If this vdev is an AP, use the dest addr (DA) to determine
503 * which peer STA this unicast data frame is for.
504 * If this vdev is a STA, the unicast data frame is for the
505 * AP the STA is associated with.
506 */
507 if (vdev->opmode == wlan_op_mode_sta) {
508 /*
509 * TO DO:
510 * To support TDLS, first check if there is a TDLS
511 * peer STA,
512 * and if so, check if the DA matches the TDLS peer
513 * STA's MAC address. If there is no peer TDLS STA,
514 * or if the DA is not the TDLS STA's address,
515 * then the frame is either for the AP itself, or is
516 * supposed to be sent to the AP for forwarding.
517 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530518 peer = ol_tx_tdls_peer_find(pdev, vdev, &peer_id);
519 } else {
520 peer = ol_txrx_peer_find_hash_find(pdev, dest_addr,
521 0, 1);
522 }
523 tx_msdu_info->htt.info.is_unicast = true;
524 if (!peer) {
525 /*
526 * Unicast data xfer can only happen to an
527 * associated peer. It is illegitimate to send unicast
528 * data if there is no peer to send it to.
529 */
530 QDF_TRACE(QDF_MODULE_ID_TXRX,
531 QDF_TRACE_LEVEL_ERROR,
532 "Error: vdev %p (%02x:%02x:%02x:%02x:%02x:%02x) trying to send unicast tx data frame to an unknown peer\n",
533 vdev,
534 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
535 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
536 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
537 return NULL; /* error */
538 }
539 TX_SCHED_DEBUG_PRINT("Peer found\n");
540 if (!peer->qos_capable) {
541 tid = OL_TX_NON_QOS_TID;
542 } else if ((peer->security[
543 OL_TXRX_PEER_SECURITY_UNICAST].sec_type
544 != htt_sec_type_wapi) &&
545 (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530546 if (QDF_NBUF_CB_PACKET_TYPE_DHCP ==
547 QDF_NBUF_CB_GET_PACKET_TYPE(tx_nbuf))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530548 /* DHCP frame to go with voice priority */
549 tid = TX_DHCP_TID;
550 }
551
552 /* Only allow encryption when in authenticated state */
553 if (OL_TXRX_PEER_STATE_AUTH != peer->state)
554 tx_msdu_info->htt.action.do_encrypt = 0;
555
556 txq = &peer->txqs[tid];
557 tx_msdu_info->htt.info.ext_tid = tid;
558 /*
559 * The following line assumes each peer object has a single ID.
560 * This is currently true, and is expected to remain true.
561 */
562 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
563 /*
564 * WORKAROUND - check that the peer ID is valid.
565 * If tx data is provided before ol_rx_peer_map_handler is
566 * called to record the peer ID specified by the target,
567 * then we could end up here with an invalid peer ID.
568 * TO DO: rather than dropping the tx frame, pause the txq it
569 * goes into, then fill in the peer ID for the entries in the
570 * txq when the peer_map event provides the peer ID, and then
571 * unpause the txq.
572 */
573 if (tx_msdu_info->htt.info.peer_id == HTT_INVALID_PEER_ID) {
574 if (peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530575 ol_txrx_info(
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530576 "%s: remove the peer for invalid peer_id %p\n",
577 __func__, peer);
578 /* remove the peer reference added above */
579 ol_txrx_peer_unref_delete(peer);
580 tx_msdu_info->peer = NULL;
581 }
582 return NULL;
583 }
584 }
585 tx_msdu_info->peer = peer;
586 if (ol_if_tx_bad_peer_txq_overflow(pdev, peer, txq))
587 return NULL;
588 /*
589 * If relevant, do a deeper inspection to determine additional
590 * characteristics of the tx frame.
591 * If the frame is invalid, then the txq will be set to NULL to
592 * indicate an error.
593 */
594 OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, tx_nbuf, tx_msdu_info, txq);
595 if (IEEE80211_IS_MULTICAST(dest_addr) && vdev->opmode !=
596 wlan_op_mode_sta && tx_msdu_info->peer !=
597 NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530598 ol_txrx_dbg(
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530599 "%s: remove the peer reference %p\n",
600 __func__, peer);
601 /* remove the peer reference added above */
602 ol_txrx_peer_unref_delete(tx_msdu_info->peer);
603 /* Making peer NULL in case if multicast non STA mode */
604 tx_msdu_info->peer = NULL;
605 }
606
607 /* Whether this frame can download though HTT2 data pipe or not. */
608 OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
609
610 /* Update Tx Queue info */
611 tx_desc->txq = txq;
612
613 TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
614 return txq;
615}
616
617struct ol_tx_frms_queue_t *
618ol_tx_classify_mgmt(
619 struct ol_txrx_vdev_t *vdev,
620 struct ol_tx_desc_t *tx_desc,
621 qdf_nbuf_t tx_nbuf,
622 struct ol_txrx_msdu_info_t *tx_msdu_info)
623{
624 struct ol_txrx_pdev_t *pdev = vdev->pdev;
625 struct ol_txrx_peer_t *peer = NULL;
626 struct ol_tx_frms_queue_t *txq = NULL;
627 A_UINT8 *dest_addr;
628 union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
629
630 TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
631 dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
632 if (IEEE80211_IS_MULTICAST(dest_addr)) {
633 /*
634 * AP: beacons are broadcast,
635 * public action frames (e.g. extended channel
636 * switch announce) may be broadcast
637 * STA: probe requests can be either broadcast or unicast
638 */
639 txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
640 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
641 tx_msdu_info->peer = NULL;
642 tx_msdu_info->htt.info.is_unicast = 0;
643 } else {
644 /*
645 * Find the peer and increment its reference count.
646 * If this vdev is an AP, use the receiver addr (RA) to
647 * determine which peer STA this unicast mgmt frame is for.
648 * If this vdev is a STA, the unicast mgmt frame is for the
649 * AP the STA is associated with.
650 * Probe request / response and Assoc request / response are
651 * sent before the peer exists - in this case, use the
652 * vdev's default tx queue.
653 */
654 if (vdev->opmode == wlan_op_mode_sta) {
655 /*
656 * TO DO:
657 * To support TDLS, first check if there is a TDLS
658 * peer STA, and if so, check if the DA matches
659 * the TDLS peer STA's MAC address.
660 */
661 peer = ol_txrx_assoc_peer_find(vdev);
662 /*
663 * Some special case(preauth for example) needs to send
664 * unicast mgmt frame to unassociated AP. In such case,
665 * we need to check if dest addr match the associated
666 * peer addr. If not, we set peer as NULL to queue this
667 * frame to vdev queue.
668 */
669 if (peer) {
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -0800670 int rcnt;
671
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530672 qdf_mem_copy(
673 &local_mac_addr_aligned.raw[0],
674 dest_addr, OL_TXRX_MAC_ADDR_LEN);
675 mac_addr = &local_mac_addr_aligned;
676 if (ol_txrx_peer_find_mac_addr_cmp(
677 mac_addr,
678 &peer->mac_addr) != 0) {
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -0800679 rcnt = ol_txrx_peer_unref_delete(peer);
Mohit Khanna47384bc2016-08-15 15:37:05 -0700680 QDF_TRACE(QDF_MODULE_ID_TXRX,
681 QDF_TRACE_LEVEL_INFO_HIGH,
682 "%s: peer %p peer->ref_cnt %d",
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -0800683 __func__, peer, rcnt);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530684 peer = NULL;
685 }
686 }
687 } else {
688 /* find the peer and increment its reference count */
689 peer = ol_txrx_peer_find_hash_find(pdev, dest_addr,
Yun Park04097e82017-04-05 13:59:13 -0700690 0, 1);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530691 }
692 tx_msdu_info->peer = peer;
693 if (!peer) {
694 txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
695 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
696 } else {
697 txq = &peer->txqs[HTT_TX_EXT_TID_MGMT];
698 tx_msdu_info->htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
699 /*
700 * The following line assumes each peer object has a
701 * single ID. This is currently true, and is expected
702 * to remain true.
703 */
704 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
705 }
706 tx_msdu_info->htt.info.is_unicast = 1;
707 }
708 /*
709 * If relevant, do a deeper inspection to determine additional
710 * characteristics of the tx frame.
711 * If the frame is invalid, then the txq will be set to NULL to
712 * indicate an error.
713 */
714 OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, tx_nbuf,
715 tx_msdu_info, txq);
716
717 /* Whether this frame can download though HTT2 data pipe or not. */
718 OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
719
720 /* Update Tx Queue info */
721 tx_desc->txq = txq;
722
723 TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
724 return txq;
725}
726
727A_STATUS
728ol_tx_classify_extension(
729 struct ol_txrx_vdev_t *vdev,
730 struct ol_tx_desc_t *tx_desc,
731 qdf_nbuf_t tx_msdu,
732 struct ol_txrx_msdu_info_t *msdu_info)
733{
734 A_UINT8 *datap = qdf_nbuf_data(tx_msdu);
735 struct ol_txrx_peer_t *peer;
736 int which_key;
737
738 /*
739 * The following msdu_info fields were already filled in by the
740 * ol_tx entry function or the regular ol_tx_classify function:
741 * htt.info.vdev_id (ol_tx_hl or ol_tx_non_std_hl)
742 * htt.info.ext_tid (ol_tx_non_std_hl or ol_tx_classify)
743 * htt.info.frame_type (ol_tx_hl or ol_tx_non_std_hl)
744 * htt.info.l2_hdr_type (ol_tx_hl or ol_tx_non_std_hl)
745 * htt.info.is_unicast (ol_tx_classify)
746 * htt.info.peer_id (ol_tx_classify)
747 * peer (ol_tx_classify)
748 * if (is_unicast) {
749 * htt.info.ethertype (ol_tx_classify)
750 * htt.info.l3_hdr_offset (ol_tx_classify)
751 * }
752 * The following fields need to be filled in by this function:
753 * if (!is_unicast) {
754 * htt.info.ethertype
755 * htt.info.l3_hdr_offset
756 * }
757 * htt.action.band (NOT CURRENTLY USED)
758 * htt.action.do_encrypt
759 * htt.action.do_tx_complete
760 * The following fields are not needed for data frames, and can
761 * be left uninitialized:
762 * htt.info.frame_subtype
763 */
764
765 if (!msdu_info->htt.info.is_unicast) {
766 int l2_hdr_size;
767 A_UINT16 ethertype;
768
769 if (msdu_info->htt.info.l2_hdr_type == htt_pkt_type_ethernet) {
770 struct ethernet_hdr_t *eh;
771
772 eh = (struct ethernet_hdr_t *)datap;
773 l2_hdr_size = sizeof(*eh);
774 ethertype = (eh->ethertype[0] << 8) | eh->ethertype[1];
775
776 if (ethertype == ETHERTYPE_VLAN) {
777 struct ethernet_vlan_hdr_t *evh;
778
779 evh = (struct ethernet_vlan_hdr_t *)datap;
780 l2_hdr_size = sizeof(*evh);
781 ethertype = (evh->ethertype[0] << 8) |
782 evh->ethertype[1];
783 }
784
785 if (!IS_ETHERTYPE(ethertype)) {
786 /* 802.3 header*/
787 struct llc_snap_hdr_t *llc =
788 (struct llc_snap_hdr_t *)(datap +
789 l2_hdr_size);
790 ethertype = (llc->ethertype[0] << 8) |
791 llc->ethertype[1];
792 l2_hdr_size += sizeof(*llc);
793 }
794 msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
795 msdu_info->htt.info.ethertype = ethertype;
796 } else { /* 802.11 */
797 struct llc_snap_hdr_t *llc;
798 l2_hdr_size = ol_txrx_ieee80211_hdrsize(datap);
799 llc = (struct llc_snap_hdr_t *)(datap + l2_hdr_size);
800 ethertype = (llc->ethertype[0] << 8) |
801 llc->ethertype[1];
802 /*
803 * Don't include the LLC/SNAP header in l2_hdr_size,
804 * because l3_hdr_offset is actually supposed to refer
805 * to the header after the 802.3 or 802.11 header,
806 * which could be a LLC/SNAP header rather
807 * than the L3 header.
808 */
809 }
810 msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
811 msdu_info->htt.info.ethertype = ethertype;
812 which_key = txrx_sec_mcast;
813 } else {
814 which_key = txrx_sec_ucast;
815 }
816 peer = msdu_info->peer;
817 /*
818 * msdu_info->htt.action.do_encrypt is initially set in ol_tx_desc_hl.
819 * Add more check here.
820 */
821 msdu_info->htt.action.do_encrypt = (!peer) ? 0 :
822 (peer->security[which_key].sec_type == htt_sec_type_none) ? 0 :
823 msdu_info->htt.action.do_encrypt;
824 /*
825 * For systems that have a frame by frame spec for whether to receive
826 * a tx completion notification, use the tx completion notification
827 * only for certain management frames, not for data frames.
828 * (In the future, this may be changed slightly, e.g. to request a
829 * tx completion notification for the final EAPOL message sent by a
830 * STA during the key delivery handshake.)
831 */
832 msdu_info->htt.action.do_tx_complete = 0;
833
834 return A_OK;
835}
836
837A_STATUS
838ol_tx_classify_mgmt_extension(
839 struct ol_txrx_vdev_t *vdev,
840 struct ol_tx_desc_t *tx_desc,
841 qdf_nbuf_t tx_msdu,
842 struct ol_txrx_msdu_info_t *msdu_info)
843{
844 struct ieee80211_frame *wh;
845
846 /*
847 * The following msdu_info fields were already filled in by the
848 * ol_tx entry function or the regular ol_tx_classify_mgmt function:
849 * htt.info.vdev_id (ol_txrx_mgmt_send)
850 * htt.info.frame_type (ol_txrx_mgmt_send)
851 * htt.info.l2_hdr_type (ol_txrx_mgmt_send)
852 * htt.action.do_tx_complete (ol_txrx_mgmt_send)
853 * htt.info.peer_id (ol_tx_classify_mgmt)
854 * htt.info.ext_tid (ol_tx_classify_mgmt)
855 * htt.info.is_unicast (ol_tx_classify_mgmt)
856 * peer (ol_tx_classify_mgmt)
857 * The following fields need to be filled in by this function:
858 * htt.info.frame_subtype
859 * htt.info.l3_hdr_offset
860 * htt.action.band (NOT CURRENTLY USED)
861 * The following fields are not needed for mgmt frames, and can
862 * be left uninitialized:
863 * htt.info.ethertype
864 * htt.action.do_encrypt
865 * (This will be filled in by other SW, which knows whether
866 * the peer has robust-managment-frames enabled.)
867 */
868 wh = (struct ieee80211_frame *)qdf_nbuf_data(tx_msdu);
869 msdu_info->htt.info.frame_subtype =
870 (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >>
871 IEEE80211_FC0_SUBTYPE_SHIFT;
872 msdu_info->htt.info.l3_hdr_offset = sizeof(struct ieee80211_frame);
873
874 return A_OK;
875}
876
877#endif /* defined(CONFIG_HL_SUPPORT) */