blob: d770ee381d66207df487d8130e8818948472ce4a [file] [log] [blame]
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301/*
Yu Wang053d3e72017-02-08 18:48:24 +08002 * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
29#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
30#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
31#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
32#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
33#include <ol_txrx.h>
34#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
35#include <ol_txrx_types.h> /* pdev stats */
36#include <ol_tx_desc.h> /* ol_tx_desc */
37#include <ol_tx_send.h> /* ol_tx_send */
38#include <ol_txrx_peer_find.h>
39#include <ol_tx_classify.h>
40#include <ol_tx_queue.h>
41#include <ipv4.h>
42#include <ipv6_defs.h>
43#include <ip_prot.h>
44#include <enet.h> /* ETHERTYPE_VLAN, etc. */
45#include <cds_ieee80211_common.h> /* ieee80211_frame */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080046#include <cdp_txrx_handle.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053047/*
48 * In theory, this tx classify code could be used on the host or in the target.
49 * Thus, this code uses generic OS primitives, that can be aliased to either
50 * the host's OS primitives or the target's OS primitives.
51 * For now, the following #defines set up these host-specific or
52 * target-specific aliases.
53 */
54
55#if defined(CONFIG_HL_SUPPORT)
56
57#define OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
58#define OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
59
60#ifdef QCA_TX_HTT2_SUPPORT
61static void
62ol_tx_classify_htt2_frm(
63 struct ol_txrx_vdev_t *vdev,
64 qdf_nbuf_t tx_nbuf,
65 struct ol_txrx_msdu_info_t *tx_msdu_info)
66{
67 struct htt_msdu_info_t *htt = &tx_msdu_info->htt;
68 A_UINT8 candi_frm = 0;
69
70 /*
71 * Offload the frame re-order to L3 protocol and ONLY support
72 * TCP protocol now.
73 */
74 if ((htt->info.l2_hdr_type == htt_pkt_type_ethernet) &&
75 (htt->info.frame_type == htt_frm_type_data) &&
76 htt->info.is_unicast &&
77 (htt->info.ethertype == ETHERTYPE_IPV4)) {
78 struct ipv4_hdr_t *ipHdr;
79
80 ipHdr = (struct ipv4_hdr_t *)(qdf_nbuf_data(tx_nbuf) +
81 htt->info.l3_hdr_offset);
82 if (ipHdr->protocol == IP_PROTOCOL_TCP)
83 candi_frm = 1;
84 }
85
86 qdf_nbuf_set_tx_parallel_dnload_frm(tx_nbuf, candi_frm);
87}
88
89#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info) \
Yun Park04097e82017-04-05 13:59:13 -070090 ol_tx_classify_htt2_frm(vdev, netbuf, msdu_info)
Siddarth Poddarb2011f62016-04-27 20:45:42 +053091#else
92#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info) /* no-op */
93#endif /* QCA_TX_HTT2_SUPPORT */
94/* DHCP go with voice priority; WMM_AC_VO_TID1();*/
95#define TX_DHCP_TID 6
96
97#if defined(QCA_BAD_PEER_TX_FLOW_CL)
98static inline A_BOOL
99ol_if_tx_bad_peer_txq_overflow(
100 struct ol_txrx_pdev_t *pdev,
101 struct ol_txrx_peer_t *peer,
102 struct ol_tx_frms_queue_t *txq)
103{
104 if (peer && pdev && txq && (peer->tx_limit_flag) &&
105 (txq->frms >= pdev->tx_peer_bal.peer_bal_txq_limit))
106 return true;
107 else
108 return false;
109}
110#else
111static inline A_BOOL ol_if_tx_bad_peer_txq_overflow(
112 struct ol_txrx_pdev_t *pdev,
113 struct ol_txrx_peer_t *peer,
114 struct ol_tx_frms_queue_t *txq)
115{
116 return false;
117}
118#endif
119
120/* EAPOL go with voice priority: WMM_AC_TO_TID1(WMM_AC_VO);*/
121#define TX_EAPOL_TID 6
122
123/* ARP go with voice priority: WMM_AC_TO_TID1(pdev->arp_ac_override)*/
124#define TX_ARP_TID 6
125
126/* For non-IP case, use default TID */
127#define TX_DEFAULT_TID 0
128
129/*
130 * Determine IP TOS priority
131 * IP Tos format :
132 * (Refer Pg 57 WMM-test-plan-v1.2)
133 * IP-TOS - 8bits
134 * : DSCP(6-bits) ECN(2-bits)
135 * : DSCP - P2 P1 P0 X X X
136 * where (P2 P1 P0) form 802.1D
137 */
138static inline A_UINT8
139ol_tx_tid_by_ipv4(A_UINT8 *pkt)
140{
141 A_UINT8 ipPri, tid;
142 struct ipv4_hdr_t *ipHdr = (struct ipv4_hdr_t *)pkt;
143
144 ipPri = ipHdr->tos >> 5;
145 tid = ipPri & 0x7;
146
147 return tid;
148}
149
150static inline A_UINT8
151ol_tx_tid_by_ipv6(A_UINT8 *pkt)
152{
153 return (ipv6_traffic_class((struct ipv6_hdr_t *)pkt) >> 5) & 0x7;
154}
155
156static inline void
157ol_tx_set_ether_type(
158 A_UINT8 *datap,
159 struct ol_txrx_msdu_info_t *tx_msdu_info)
160{
161 A_UINT16 typeorlength;
162 A_UINT8 *ptr;
163 A_UINT8 *l3_data_ptr;
164
165 if (tx_msdu_info->htt.info.l2_hdr_type == htt_pkt_type_raw) {
166 /* adjust hdr_ptr to RA */
167 struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
168
169 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
170 IEEE80211_FC0_TYPE_DATA) {
171 struct llc_snap_hdr_t *llc;
172 /* dot11 encapsulated frame */
173 struct ieee80211_qosframe *whqos =
174 (struct ieee80211_qosframe *)datap;
175 if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
176 tx_msdu_info->htt.info.l3_hdr_offset =
177 sizeof(struct ieee80211_qosframe);
178 } else {
179 tx_msdu_info->htt.info.l3_hdr_offset =
180 sizeof(struct ieee80211_frame);
181 }
182 llc = (struct llc_snap_hdr_t *)
183 (datap + tx_msdu_info->htt.info.l3_hdr_offset);
184 tx_msdu_info->htt.info.ethertype =
185 (llc->ethertype[0] << 8) | llc->ethertype[1];
186 /*
187 * l3_hdr_offset refers to the end of the 802.3 or
188 * 802.11 header, which may be a LLC/SNAP header rather
189 * than the IP header.
190 * Thus, don't increment l3_hdr_offset += sizeof(*llc);
191 * rather,leave it as is.
192 */
193 } else {
194 /*
195 * This function should only be applied to data frames.
196 * For management frames, we already know to use
197 * HTT_TX_EXT_TID_MGMT.
198 */
199 TXRX_ASSERT2(0);
200 }
201 } else if (tx_msdu_info->htt.info.l2_hdr_type ==
202 htt_pkt_type_ethernet) {
203 ptr = (datap + ETHERNET_ADDR_LEN * 2);
204 typeorlength = (ptr[0] << 8) | ptr[1];
205 /*ETHERNET_HDR_LEN;*/
206 l3_data_ptr = datap + sizeof(struct ethernet_hdr_t);
207
208 if (typeorlength == ETHERTYPE_VLAN) {
209 ptr = (datap + ETHERNET_ADDR_LEN * 2
210 + ETHERTYPE_VLAN_LEN);
211 typeorlength = (ptr[0] << 8) | ptr[1];
212 l3_data_ptr += ETHERTYPE_VLAN_LEN;
213 }
214
215 if (!IS_ETHERTYPE(typeorlength)) {
216 /* 802.3 header*/
217 struct llc_snap_hdr_t *llc_hdr =
218 (struct llc_snap_hdr_t *)l3_data_ptr;
219 typeorlength = (llc_hdr->ethertype[0] << 8) |
220 llc_hdr->ethertype[1];
221 l3_data_ptr += sizeof(struct llc_snap_hdr_t);
222 }
223
224 tx_msdu_info->htt.info.l3_hdr_offset = (A_UINT8)(l3_data_ptr -
225 datap);
226 tx_msdu_info->htt.info.ethertype = typeorlength;
227 }
228}
229
230static inline A_UINT8
231ol_tx_tid_by_ether_type(
232 A_UINT8 *datap,
233 struct ol_txrx_msdu_info_t *tx_msdu_info)
234{
235 A_UINT8 tid;
236 A_UINT8 *l3_data_ptr;
237 A_UINT16 typeorlength;
238
239 l3_data_ptr = datap + tx_msdu_info->htt.info.l3_hdr_offset;
240 typeorlength = tx_msdu_info->htt.info.ethertype;
241
242 /* IP packet, do packet inspection for TID */
243 if (typeorlength == ETHERTYPE_IPV4) {
244 tid = ol_tx_tid_by_ipv4(l3_data_ptr);
245 } else if (typeorlength == ETHERTYPE_IPV6) {
246 tid = ol_tx_tid_by_ipv6(l3_data_ptr);
247 } else if (ETHERTYPE_IS_EAPOL_WAPI(typeorlength)) {
248 /* EAPOL go with voice priority*/
249 tid = TX_EAPOL_TID;
250 } else if (typeorlength == ETHERTYPE_ARP) {
251 tid = TX_ARP_TID;
252 } else {
253 /* For non-IP case, use default TID */
254 tid = TX_DEFAULT_TID;
255 }
256 return tid;
257}
258
259static inline A_UINT8
260ol_tx_tid_by_raw_type(
261 A_UINT8 *datap,
262 struct ol_txrx_msdu_info_t *tx_msdu_info)
263{
264 A_UINT8 tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
265
266 /* adjust hdr_ptr to RA */
267 struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
268
269 /* FIXME: This code does not handle 4 address formats. The QOS field
270 * is not at usual location.
271 */
272 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
273 IEEE80211_FC0_TYPE_DATA) {
274 /* dot11 encapsulated frame */
275 struct ieee80211_qosframe *whqos =
276 (struct ieee80211_qosframe *)datap;
277 if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
278 tid = whqos->i_qos[0] & IEEE80211_QOS_TID;
279 else
280 tid = HTT_NON_QOS_TID;
281 } else {
282 /*
283 * This function should only be applied to data frames.
284 * For management frames, we already know to use
285 * HTT_TX_EXT_TID_MGMT.
286 */
287 qdf_assert(0);
288 }
289 return tid;
290}
291
292static A_UINT8
293ol_tx_tid(
294 struct ol_txrx_pdev_t *pdev,
295 qdf_nbuf_t tx_nbuf,
296 struct ol_txrx_msdu_info_t *tx_msdu_info)
297{
298 A_UINT8 *datap = qdf_nbuf_data(tx_nbuf);
299 A_UINT8 tid;
300
301 if (pdev->frame_format == wlan_frm_fmt_raw) {
302 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_raw;
303
304 ol_tx_set_ether_type(datap, tx_msdu_info);
305 tid = tx_msdu_info->htt.info.ext_tid ==
306 QDF_NBUF_TX_EXT_TID_INVALID ?
307 ol_tx_tid_by_raw_type(datap, tx_msdu_info) :
308 tx_msdu_info->htt.info.ext_tid;
309 } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
310 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_ethernet;
311
312 ol_tx_set_ether_type(datap, tx_msdu_info);
313 tid =
314 tx_msdu_info->htt.info.ext_tid ==
315 QDF_NBUF_TX_EXT_TID_INVALID ?
316 ol_tx_tid_by_ether_type(datap, tx_msdu_info) :
317 tx_msdu_info->htt.info.ext_tid;
318 } else if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
319 struct llc_snap_hdr_t *llc;
320
321 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
322 tx_msdu_info->htt.info.l3_hdr_offset =
323 sizeof(struct ieee80211_frame);
324 llc = (struct llc_snap_hdr_t *)
325 (datap + tx_msdu_info->htt.info.l3_hdr_offset);
326 tx_msdu_info->htt.info.ethertype =
327 (llc->ethertype[0] << 8) | llc->ethertype[1];
328 /*
329 * Native WiFi is a special case of "raw" 802.11 header format.
330 * However, we expect that for all cases that use native WiFi,
331 * the TID will be directly specified out of band.
332 */
333 tid = tx_msdu_info->htt.info.ext_tid;
334 } else {
335 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
336 "Invalid standard frame type: %d\n",
337 pdev->frame_format);
338 qdf_assert(0);
339 tid = HTT_TX_EXT_TID_INVALID;
340 }
341 return tid;
342}
343
344#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
345static inline
346struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
347 struct ol_txrx_vdev_t *vdev,
348 uint8_t *peer_id)
349{
350 struct ol_txrx_peer_t *peer = NULL;
351
352 if (vdev->hlTdlsFlag) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800353 peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530354 vdev->hl_tdls_ap_mac_addr.raw,
355 peer_id);
356 if (peer && (peer->peer_ids[0] == HTT_INVALID_PEER_ID)) {
357 peer = NULL;
358 } else {
359 if (peer)
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800360 OL_TXRX_PEER_INC_REF_CNT(peer);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530361 }
362 }
363 if (!peer)
364 peer = ol_txrx_assoc_peer_find(vdev);
365
366 return peer;
367}
368
369#else
370struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
371 struct ol_txrx_vdev_t *vdev,
372 uint8_t *peer_id)
373{
374 struct ol_txrx_peer_t *peer = NULL;
Yun Park04097e82017-04-05 13:59:13 -0700375
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530376 peer = ol_txrx_assoc_peer_find(vdev);
377
378 return peer;
379}
380
381
382#endif
383
384struct ol_tx_frms_queue_t *
385ol_tx_classify(
386 struct ol_txrx_vdev_t *vdev,
387 struct ol_tx_desc_t *tx_desc,
388 qdf_nbuf_t tx_nbuf,
389 struct ol_txrx_msdu_info_t *tx_msdu_info)
390{
391 struct ol_txrx_pdev_t *pdev = vdev->pdev;
392 struct ol_txrx_peer_t *peer = NULL;
393 struct ol_tx_frms_queue_t *txq = NULL;
394 A_UINT8 *dest_addr;
395 A_UINT8 tid;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530396 u_int8_t peer_id;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530397
398 TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
399 dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
jge35cba622017-06-12 17:15:15 +0800400 if (unlikely(NULL == dest_addr)) {
401 QDF_TRACE(QDF_MODULE_ID_TXRX,
402 QDF_TRACE_LEVEL_ERROR,
403 "Error: dest_addr is NULL.\n");
404 return NULL; /*error*/
405 }
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530406 if ((IEEE80211_IS_MULTICAST(dest_addr)) ||
407 (vdev->opmode == wlan_op_mode_ocb)) {
408 txq = &vdev->txqs[OL_TX_VDEV_MCAST_BCAST];
409 tx_msdu_info->htt.info.ext_tid =
410 HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
411 if (vdev->opmode == wlan_op_mode_sta) {
412 /*
413 * The STA sends a frame with a broadcast
414 * dest addr (DA) as a
415 * unicast frame to the AP's receive addr (RA).
416 * Find the peer object that represents the AP
417 * that the STA is associated with.
418 */
419 peer = ol_txrx_assoc_peer_find(vdev);
420 if (!peer) {
421 QDF_TRACE(QDF_MODULE_ID_TXRX,
422 QDF_TRACE_LEVEL_ERROR,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700423 "Error: STA %pK (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast DA tx data frame w/o association\n",
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530424 vdev,
425 vdev->mac_addr.raw[0],
426 vdev->mac_addr.raw[1],
427 vdev->mac_addr.raw[2],
428 vdev->mac_addr.raw[3],
429 vdev->mac_addr.raw[4],
430 vdev->mac_addr.raw[5]);
431 return NULL; /* error */
432 } else if ((peer->security[
433 OL_TXRX_PEER_SECURITY_MULTICAST].sec_type
434 != htt_sec_type_wapi) &&
435 (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530436 if (QDF_NBUF_CB_PACKET_TYPE_DHCP ==
437 QDF_NBUF_CB_GET_PACKET_TYPE(
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530438 tx_nbuf)) {
439 /* DHCP frame to go with
440 * voice priority
441 */
442 txq = &peer->txqs[TX_DHCP_TID];
443 tx_msdu_info->htt.info.ext_tid =
444 TX_DHCP_TID;
445 }
446 }
447 /*
448 * The following line assumes each peer object has a
449 * single ID. This is currently true, and is expected
450 * to remain true.
451 */
452 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
453 } else if (vdev->opmode == wlan_op_mode_ocb) {
454 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
Yun Park04097e82017-04-05 13:59:13 -0700455 /*
456 * In OCB mode, don't worry about the peer.
457 * We don't need it.
458 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530459 peer = NULL;
460 } else {
461 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
462 /*
463 * Look up the vdev's BSS peer, so that the
464 * classify_extension function can check whether to
465 * encrypt multicast / broadcast frames.
466 */
Mohit Khannababadb82017-02-21 18:54:19 -0800467 peer = ol_txrx_peer_find_hash_find_inc_ref(pdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530468 vdev->mac_addr.raw,
469 0, 1);
470 if (!peer) {
471 QDF_TRACE(QDF_MODULE_ID_TXRX,
472 QDF_TRACE_LEVEL_ERROR,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700473 "Error: vdev %pK (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast/mcast, but no self-peer found\n",
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530474 vdev,
475 vdev->mac_addr.raw[0],
476 vdev->mac_addr.raw[1],
477 vdev->mac_addr.raw[2],
478 vdev->mac_addr.raw[3],
479 vdev->mac_addr.raw[4],
480 vdev->mac_addr.raw[5]);
481 return NULL; /* error */
482 }
483 }
484 tx_msdu_info->htt.info.is_unicast = false;
485 } else {
486 /* tid would be overwritten for non QoS case*/
487 tid = ol_tx_tid(pdev, tx_nbuf, tx_msdu_info);
488 if ((HTT_TX_EXT_TID_INVALID == tid) ||
489 (tid >= OL_TX_NUM_TIDS)) {
490 QDF_TRACE(QDF_MODULE_ID_TXRX,
491 QDF_TRACE_LEVEL_ERROR,
492 "%s Error: could not classify packet into valid TID(%d).\n",
493 __func__, tid);
494 return NULL;
495 }
496#ifdef ATH_SUPPORT_WAPI
497 /* Check to see if a frame is a WAI frame */
498 if (tx_msdu_info->htt.info.ethertype == ETHERTYPE_WAI) {
499 /* WAI frames should not be encrypted */
500 tx_msdu_info->htt.action.do_encrypt = 0;
501 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
502 "Tx Frame is a WAI frame\n");
503 }
504#endif /* ATH_SUPPORT_WAPI */
505
506 /*
507 * Find the peer and increment its reference count.
508 * If this vdev is an AP, use the dest addr (DA) to determine
509 * which peer STA this unicast data frame is for.
510 * If this vdev is a STA, the unicast data frame is for the
511 * AP the STA is associated with.
512 */
513 if (vdev->opmode == wlan_op_mode_sta) {
514 /*
515 * TO DO:
516 * To support TDLS, first check if there is a TDLS
517 * peer STA,
518 * and if so, check if the DA matches the TDLS peer
519 * STA's MAC address. If there is no peer TDLS STA,
520 * or if the DA is not the TDLS STA's address,
521 * then the frame is either for the AP itself, or is
522 * supposed to be sent to the AP for forwarding.
523 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530524 peer = ol_tx_tdls_peer_find(pdev, vdev, &peer_id);
525 } else {
Mohit Khannababadb82017-02-21 18:54:19 -0800526 peer = ol_txrx_peer_find_hash_find_inc_ref(pdev,
527 dest_addr,
528 0, 1);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530529 }
530 tx_msdu_info->htt.info.is_unicast = true;
531 if (!peer) {
532 /*
533 * Unicast data xfer can only happen to an
534 * associated peer. It is illegitimate to send unicast
535 * data if there is no peer to send it to.
536 */
537 QDF_TRACE(QDF_MODULE_ID_TXRX,
538 QDF_TRACE_LEVEL_ERROR,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700539 "Error: vdev %pK (%02x:%02x:%02x:%02x:%02x:%02x) trying to send unicast tx data frame to an unknown peer\n",
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530540 vdev,
541 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
542 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
543 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
544 return NULL; /* error */
545 }
546 TX_SCHED_DEBUG_PRINT("Peer found\n");
547 if (!peer->qos_capable) {
548 tid = OL_TX_NON_QOS_TID;
549 } else if ((peer->security[
550 OL_TXRX_PEER_SECURITY_UNICAST].sec_type
551 != htt_sec_type_wapi) &&
552 (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530553 if (QDF_NBUF_CB_PACKET_TYPE_DHCP ==
554 QDF_NBUF_CB_GET_PACKET_TYPE(tx_nbuf))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530555 /* DHCP frame to go with voice priority */
556 tid = TX_DHCP_TID;
557 }
558
559 /* Only allow encryption when in authenticated state */
560 if (OL_TXRX_PEER_STATE_AUTH != peer->state)
561 tx_msdu_info->htt.action.do_encrypt = 0;
562
563 txq = &peer->txqs[tid];
564 tx_msdu_info->htt.info.ext_tid = tid;
565 /*
566 * The following line assumes each peer object has a single ID.
567 * This is currently true, and is expected to remain true.
568 */
569 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
570 /*
571 * WORKAROUND - check that the peer ID is valid.
572 * If tx data is provided before ol_rx_peer_map_handler is
573 * called to record the peer ID specified by the target,
574 * then we could end up here with an invalid peer ID.
575 * TO DO: rather than dropping the tx frame, pause the txq it
576 * goes into, then fill in the peer ID for the entries in the
577 * txq when the peer_map event provides the peer ID, and then
578 * unpause the txq.
579 */
580 if (tx_msdu_info->htt.info.peer_id == HTT_INVALID_PEER_ID) {
581 if (peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530582 ol_txrx_info(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700583 "%s: remove the peer for invalid peer_id %pK\n",
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530584 __func__, peer);
585 /* remove the peer reference added above */
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800586 OL_TXRX_PEER_UNREF_DELETE(peer);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530587 tx_msdu_info->peer = NULL;
588 }
589 return NULL;
590 }
591 }
592 tx_msdu_info->peer = peer;
593 if (ol_if_tx_bad_peer_txq_overflow(pdev, peer, txq))
594 return NULL;
595 /*
596 * If relevant, do a deeper inspection to determine additional
597 * characteristics of the tx frame.
598 * If the frame is invalid, then the txq will be set to NULL to
599 * indicate an error.
600 */
601 OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, tx_nbuf, tx_msdu_info, txq);
602 if (IEEE80211_IS_MULTICAST(dest_addr) && vdev->opmode !=
603 wlan_op_mode_sta && tx_msdu_info->peer !=
604 NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530605 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700606 "%s: remove the peer reference %pK\n",
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530607 __func__, peer);
608 /* remove the peer reference added above */
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800609 OL_TXRX_PEER_UNREF_DELETE(tx_msdu_info->peer);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530610 /* Making peer NULL in case if multicast non STA mode */
611 tx_msdu_info->peer = NULL;
612 }
613
614 /* Whether this frame can download though HTT2 data pipe or not. */
615 OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
616
617 /* Update Tx Queue info */
618 tx_desc->txq = txq;
619
620 TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
621 return txq;
622}
623
624struct ol_tx_frms_queue_t *
625ol_tx_classify_mgmt(
626 struct ol_txrx_vdev_t *vdev,
627 struct ol_tx_desc_t *tx_desc,
628 qdf_nbuf_t tx_nbuf,
629 struct ol_txrx_msdu_info_t *tx_msdu_info)
630{
631 struct ol_txrx_pdev_t *pdev = vdev->pdev;
632 struct ol_txrx_peer_t *peer = NULL;
633 struct ol_tx_frms_queue_t *txq = NULL;
634 A_UINT8 *dest_addr;
635 union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
636
637 TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
638 dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
jge35cba622017-06-12 17:15:15 +0800639 if (unlikely(NULL == dest_addr)) {
640 QDF_TRACE(QDF_MODULE_ID_TXRX,
641 QDF_TRACE_LEVEL_ERROR,
642 "Error: dest_addr is NULL.\n");
643 return NULL; /*error*/
644 }
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530645 if (IEEE80211_IS_MULTICAST(dest_addr)) {
646 /*
647 * AP: beacons are broadcast,
648 * public action frames (e.g. extended channel
649 * switch announce) may be broadcast
650 * STA: probe requests can be either broadcast or unicast
651 */
652 txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
653 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
654 tx_msdu_info->peer = NULL;
655 tx_msdu_info->htt.info.is_unicast = 0;
656 } else {
657 /*
658 * Find the peer and increment its reference count.
659 * If this vdev is an AP, use the receiver addr (RA) to
660 * determine which peer STA this unicast mgmt frame is for.
661 * If this vdev is a STA, the unicast mgmt frame is for the
662 * AP the STA is associated with.
663 * Probe request / response and Assoc request / response are
664 * sent before the peer exists - in this case, use the
665 * vdev's default tx queue.
666 */
667 if (vdev->opmode == wlan_op_mode_sta) {
668 /*
669 * TO DO:
670 * To support TDLS, first check if there is a TDLS
671 * peer STA, and if so, check if the DA matches
672 * the TDLS peer STA's MAC address.
673 */
674 peer = ol_txrx_assoc_peer_find(vdev);
675 /*
676 * Some special case(preauth for example) needs to send
677 * unicast mgmt frame to unassociated AP. In such case,
678 * we need to check if dest addr match the associated
679 * peer addr. If not, we set peer as NULL to queue this
680 * frame to vdev queue.
681 */
682 if (peer) {
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -0800683 int rcnt;
684
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530685 qdf_mem_copy(
686 &local_mac_addr_aligned.raw[0],
687 dest_addr, OL_TXRX_MAC_ADDR_LEN);
688 mac_addr = &local_mac_addr_aligned;
689 if (ol_txrx_peer_find_mac_addr_cmp(
690 mac_addr,
691 &peer->mac_addr) != 0) {
Mohit Khannab04dfcd2017-02-13 18:54:35 -0800692 rcnt = OL_TXRX_PEER_UNREF_DELETE(peer);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530693 peer = NULL;
694 }
695 }
696 } else {
697 /* find the peer and increment its reference count */
Mohit Khannababadb82017-02-21 18:54:19 -0800698 peer = ol_txrx_peer_find_hash_find_inc_ref(pdev,
699 dest_addr,
700 0, 1);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530701 }
702 tx_msdu_info->peer = peer;
703 if (!peer) {
704 txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
705 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
706 } else {
707 txq = &peer->txqs[HTT_TX_EXT_TID_MGMT];
708 tx_msdu_info->htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
709 /*
710 * The following line assumes each peer object has a
711 * single ID. This is currently true, and is expected
712 * to remain true.
713 */
714 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
715 }
716 tx_msdu_info->htt.info.is_unicast = 1;
717 }
718 /*
719 * If relevant, do a deeper inspection to determine additional
720 * characteristics of the tx frame.
721 * If the frame is invalid, then the txq will be set to NULL to
722 * indicate an error.
723 */
724 OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, tx_nbuf,
725 tx_msdu_info, txq);
726
727 /* Whether this frame can download though HTT2 data pipe or not. */
728 OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
729
730 /* Update Tx Queue info */
731 tx_desc->txq = txq;
732
733 TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
734 return txq;
735}
736
737A_STATUS
738ol_tx_classify_extension(
739 struct ol_txrx_vdev_t *vdev,
740 struct ol_tx_desc_t *tx_desc,
741 qdf_nbuf_t tx_msdu,
742 struct ol_txrx_msdu_info_t *msdu_info)
743{
744 A_UINT8 *datap = qdf_nbuf_data(tx_msdu);
745 struct ol_txrx_peer_t *peer;
746 int which_key;
747
748 /*
749 * The following msdu_info fields were already filled in by the
750 * ol_tx entry function or the regular ol_tx_classify function:
751 * htt.info.vdev_id (ol_tx_hl or ol_tx_non_std_hl)
752 * htt.info.ext_tid (ol_tx_non_std_hl or ol_tx_classify)
753 * htt.info.frame_type (ol_tx_hl or ol_tx_non_std_hl)
754 * htt.info.l2_hdr_type (ol_tx_hl or ol_tx_non_std_hl)
755 * htt.info.is_unicast (ol_tx_classify)
756 * htt.info.peer_id (ol_tx_classify)
757 * peer (ol_tx_classify)
758 * if (is_unicast) {
759 * htt.info.ethertype (ol_tx_classify)
760 * htt.info.l3_hdr_offset (ol_tx_classify)
761 * }
762 * The following fields need to be filled in by this function:
763 * if (!is_unicast) {
764 * htt.info.ethertype
765 * htt.info.l3_hdr_offset
766 * }
767 * htt.action.band (NOT CURRENTLY USED)
768 * htt.action.do_encrypt
769 * htt.action.do_tx_complete
770 * The following fields are not needed for data frames, and can
771 * be left uninitialized:
772 * htt.info.frame_subtype
773 */
774
775 if (!msdu_info->htt.info.is_unicast) {
776 int l2_hdr_size;
777 A_UINT16 ethertype;
778
779 if (msdu_info->htt.info.l2_hdr_type == htt_pkt_type_ethernet) {
780 struct ethernet_hdr_t *eh;
781
782 eh = (struct ethernet_hdr_t *)datap;
783 l2_hdr_size = sizeof(*eh);
784 ethertype = (eh->ethertype[0] << 8) | eh->ethertype[1];
785
786 if (ethertype == ETHERTYPE_VLAN) {
787 struct ethernet_vlan_hdr_t *evh;
788
789 evh = (struct ethernet_vlan_hdr_t *)datap;
790 l2_hdr_size = sizeof(*evh);
791 ethertype = (evh->ethertype[0] << 8) |
792 evh->ethertype[1];
793 }
794
795 if (!IS_ETHERTYPE(ethertype)) {
796 /* 802.3 header*/
797 struct llc_snap_hdr_t *llc =
798 (struct llc_snap_hdr_t *)(datap +
799 l2_hdr_size);
800 ethertype = (llc->ethertype[0] << 8) |
801 llc->ethertype[1];
802 l2_hdr_size += sizeof(*llc);
803 }
804 msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
805 msdu_info->htt.info.ethertype = ethertype;
806 } else { /* 802.11 */
807 struct llc_snap_hdr_t *llc;
808 l2_hdr_size = ol_txrx_ieee80211_hdrsize(datap);
809 llc = (struct llc_snap_hdr_t *)(datap + l2_hdr_size);
810 ethertype = (llc->ethertype[0] << 8) |
811 llc->ethertype[1];
812 /*
813 * Don't include the LLC/SNAP header in l2_hdr_size,
814 * because l3_hdr_offset is actually supposed to refer
815 * to the header after the 802.3 or 802.11 header,
816 * which could be a LLC/SNAP header rather
817 * than the L3 header.
818 */
819 }
820 msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
821 msdu_info->htt.info.ethertype = ethertype;
822 which_key = txrx_sec_mcast;
823 } else {
824 which_key = txrx_sec_ucast;
825 }
826 peer = msdu_info->peer;
827 /*
828 * msdu_info->htt.action.do_encrypt is initially set in ol_tx_desc_hl.
829 * Add more check here.
830 */
831 msdu_info->htt.action.do_encrypt = (!peer) ? 0 :
832 (peer->security[which_key].sec_type == htt_sec_type_none) ? 0 :
833 msdu_info->htt.action.do_encrypt;
834 /*
835 * For systems that have a frame by frame spec for whether to receive
836 * a tx completion notification, use the tx completion notification
837 * only for certain management frames, not for data frames.
838 * (In the future, this may be changed slightly, e.g. to request a
839 * tx completion notification for the final EAPOL message sent by a
840 * STA during the key delivery handshake.)
841 */
842 msdu_info->htt.action.do_tx_complete = 0;
843
844 return A_OK;
845}
846
847A_STATUS
848ol_tx_classify_mgmt_extension(
849 struct ol_txrx_vdev_t *vdev,
850 struct ol_tx_desc_t *tx_desc,
851 qdf_nbuf_t tx_msdu,
852 struct ol_txrx_msdu_info_t *msdu_info)
853{
854 struct ieee80211_frame *wh;
855
856 /*
857 * The following msdu_info fields were already filled in by the
858 * ol_tx entry function or the regular ol_tx_classify_mgmt function:
859 * htt.info.vdev_id (ol_txrx_mgmt_send)
860 * htt.info.frame_type (ol_txrx_mgmt_send)
861 * htt.info.l2_hdr_type (ol_txrx_mgmt_send)
862 * htt.action.do_tx_complete (ol_txrx_mgmt_send)
863 * htt.info.peer_id (ol_tx_classify_mgmt)
864 * htt.info.ext_tid (ol_tx_classify_mgmt)
865 * htt.info.is_unicast (ol_tx_classify_mgmt)
866 * peer (ol_tx_classify_mgmt)
867 * The following fields need to be filled in by this function:
868 * htt.info.frame_subtype
869 * htt.info.l3_hdr_offset
870 * htt.action.band (NOT CURRENTLY USED)
871 * The following fields are not needed for mgmt frames, and can
872 * be left uninitialized:
873 * htt.info.ethertype
874 * htt.action.do_encrypt
875 * (This will be filled in by other SW, which knows whether
876 * the peer has robust-managment-frames enabled.)
877 */
878 wh = (struct ieee80211_frame *)qdf_nbuf_data(tx_msdu);
879 msdu_info->htt.info.frame_subtype =
880 (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >>
881 IEEE80211_FC0_SUBTYPE_SHIFT;
882 msdu_info->htt.info.l3_hdr_offset = sizeof(struct ieee80211_frame);
883
884 return A_OK;
885}
886
887#endif /* defined(CONFIG_HL_SUPPORT) */