blob: 25c62c7124917e5c1c8a45954fc3e92f49abe185 [file] [log] [blame]
Siddarth Poddarb2011f62016-04-27 20:45:42 +05301/*
Frank Liu4362e462018-01-16 11:51:55 +08002 * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
Siddarth Poddarb2011f62016-04-27 20:45:42 +05303 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <qdf_nbuf.h> /* qdf_nbuf_t, etc. */
29#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
30#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
31#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
32#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
33#include <ol_txrx.h>
34#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
35#include <ol_txrx_types.h> /* pdev stats */
36#include <ol_tx_desc.h> /* ol_tx_desc */
37#include <ol_tx_send.h> /* ol_tx_send */
38#include <ol_txrx_peer_find.h>
39#include <ol_tx_classify.h>
40#include <ol_tx_queue.h>
41#include <ipv4.h>
42#include <ipv6_defs.h>
43#include <ip_prot.h>
44#include <enet.h> /* ETHERTYPE_VLAN, etc. */
45#include <cds_ieee80211_common.h> /* ieee80211_frame */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080046#include <cdp_txrx_handle.h>
Siddarth Poddarb2011f62016-04-27 20:45:42 +053047/*
48 * In theory, this tx classify code could be used on the host or in the target.
49 * Thus, this code uses generic OS primitives, that can be aliased to either
50 * the host's OS primitives or the target's OS primitives.
51 * For now, the following #defines set up these host-specific or
52 * target-specific aliases.
53 */
54
55#if defined(CONFIG_HL_SUPPORT)
56
57#define OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
58#define OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, netbuf, msdu_info, txq)
59
60#ifdef QCA_TX_HTT2_SUPPORT
61static void
62ol_tx_classify_htt2_frm(
63 struct ol_txrx_vdev_t *vdev,
64 qdf_nbuf_t tx_nbuf,
65 struct ol_txrx_msdu_info_t *tx_msdu_info)
66{
67 struct htt_msdu_info_t *htt = &tx_msdu_info->htt;
68 A_UINT8 candi_frm = 0;
69
70 /*
71 * Offload the frame re-order to L3 protocol and ONLY support
72 * TCP protocol now.
73 */
74 if ((htt->info.l2_hdr_type == htt_pkt_type_ethernet) &&
75 (htt->info.frame_type == htt_frm_type_data) &&
76 htt->info.is_unicast &&
77 (htt->info.ethertype == ETHERTYPE_IPV4)) {
78 struct ipv4_hdr_t *ipHdr;
79
80 ipHdr = (struct ipv4_hdr_t *)(qdf_nbuf_data(tx_nbuf) +
81 htt->info.l3_hdr_offset);
82 if (ipHdr->protocol == IP_PROTOCOL_TCP)
83 candi_frm = 1;
84 }
85
86 qdf_nbuf_set_tx_parallel_dnload_frm(tx_nbuf, candi_frm);
87}
88
89#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info) \
Yun Park04097e82017-04-05 13:59:13 -070090 ol_tx_classify_htt2_frm(vdev, netbuf, msdu_info)
Siddarth Poddarb2011f62016-04-27 20:45:42 +053091#else
92#define OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, netbuf, msdu_info) /* no-op */
93#endif /* QCA_TX_HTT2_SUPPORT */
94/* DHCP go with voice priority; WMM_AC_VO_TID1();*/
95#define TX_DHCP_TID 6
96
97#if defined(QCA_BAD_PEER_TX_FLOW_CL)
98static inline A_BOOL
99ol_if_tx_bad_peer_txq_overflow(
100 struct ol_txrx_pdev_t *pdev,
101 struct ol_txrx_peer_t *peer,
102 struct ol_tx_frms_queue_t *txq)
103{
104 if (peer && pdev && txq && (peer->tx_limit_flag) &&
105 (txq->frms >= pdev->tx_peer_bal.peer_bal_txq_limit))
106 return true;
107 else
108 return false;
109}
110#else
111static inline A_BOOL ol_if_tx_bad_peer_txq_overflow(
112 struct ol_txrx_pdev_t *pdev,
113 struct ol_txrx_peer_t *peer,
114 struct ol_tx_frms_queue_t *txq)
115{
116 return false;
117}
118#endif
119
120/* EAPOL go with voice priority: WMM_AC_TO_TID1(WMM_AC_VO);*/
121#define TX_EAPOL_TID 6
122
123/* ARP go with voice priority: WMM_AC_TO_TID1(pdev->arp_ac_override)*/
124#define TX_ARP_TID 6
125
126/* For non-IP case, use default TID */
127#define TX_DEFAULT_TID 0
128
129/*
130 * Determine IP TOS priority
131 * IP Tos format :
132 * (Refer Pg 57 WMM-test-plan-v1.2)
133 * IP-TOS - 8bits
134 * : DSCP(6-bits) ECN(2-bits)
135 * : DSCP - P2 P1 P0 X X X
136 * where (P2 P1 P0) form 802.1D
137 */
138static inline A_UINT8
139ol_tx_tid_by_ipv4(A_UINT8 *pkt)
140{
141 A_UINT8 ipPri, tid;
142 struct ipv4_hdr_t *ipHdr = (struct ipv4_hdr_t *)pkt;
143
144 ipPri = ipHdr->tos >> 5;
145 tid = ipPri & 0x7;
146
147 return tid;
148}
149
150static inline A_UINT8
151ol_tx_tid_by_ipv6(A_UINT8 *pkt)
152{
153 return (ipv6_traffic_class((struct ipv6_hdr_t *)pkt) >> 5) & 0x7;
154}
155
156static inline void
157ol_tx_set_ether_type(
158 A_UINT8 *datap,
159 struct ol_txrx_msdu_info_t *tx_msdu_info)
160{
161 A_UINT16 typeorlength;
162 A_UINT8 *ptr;
163 A_UINT8 *l3_data_ptr;
164
165 if (tx_msdu_info->htt.info.l2_hdr_type == htt_pkt_type_raw) {
166 /* adjust hdr_ptr to RA */
167 struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
168
169 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
170 IEEE80211_FC0_TYPE_DATA) {
171 struct llc_snap_hdr_t *llc;
172 /* dot11 encapsulated frame */
173 struct ieee80211_qosframe *whqos =
174 (struct ieee80211_qosframe *)datap;
175 if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
176 tx_msdu_info->htt.info.l3_hdr_offset =
177 sizeof(struct ieee80211_qosframe);
178 } else {
179 tx_msdu_info->htt.info.l3_hdr_offset =
180 sizeof(struct ieee80211_frame);
181 }
182 llc = (struct llc_snap_hdr_t *)
183 (datap + tx_msdu_info->htt.info.l3_hdr_offset);
184 tx_msdu_info->htt.info.ethertype =
185 (llc->ethertype[0] << 8) | llc->ethertype[1];
186 /*
187 * l3_hdr_offset refers to the end of the 802.3 or
188 * 802.11 header, which may be a LLC/SNAP header rather
189 * than the IP header.
190 * Thus, don't increment l3_hdr_offset += sizeof(*llc);
191 * rather,leave it as is.
192 */
193 } else {
194 /*
195 * This function should only be applied to data frames.
196 * For management frames, we already know to use
197 * HTT_TX_EXT_TID_MGMT.
198 */
199 TXRX_ASSERT2(0);
200 }
201 } else if (tx_msdu_info->htt.info.l2_hdr_type ==
202 htt_pkt_type_ethernet) {
203 ptr = (datap + ETHERNET_ADDR_LEN * 2);
204 typeorlength = (ptr[0] << 8) | ptr[1];
205 /*ETHERNET_HDR_LEN;*/
206 l3_data_ptr = datap + sizeof(struct ethernet_hdr_t);
207
208 if (typeorlength == ETHERTYPE_VLAN) {
209 ptr = (datap + ETHERNET_ADDR_LEN * 2
210 + ETHERTYPE_VLAN_LEN);
211 typeorlength = (ptr[0] << 8) | ptr[1];
212 l3_data_ptr += ETHERTYPE_VLAN_LEN;
213 }
214
215 if (!IS_ETHERTYPE(typeorlength)) {
216 /* 802.3 header*/
217 struct llc_snap_hdr_t *llc_hdr =
218 (struct llc_snap_hdr_t *)l3_data_ptr;
219 typeorlength = (llc_hdr->ethertype[0] << 8) |
220 llc_hdr->ethertype[1];
221 l3_data_ptr += sizeof(struct llc_snap_hdr_t);
222 }
223
224 tx_msdu_info->htt.info.l3_hdr_offset = (A_UINT8)(l3_data_ptr -
225 datap);
226 tx_msdu_info->htt.info.ethertype = typeorlength;
227 }
228}
229
230static inline A_UINT8
231ol_tx_tid_by_ether_type(
232 A_UINT8 *datap,
233 struct ol_txrx_msdu_info_t *tx_msdu_info)
234{
235 A_UINT8 tid;
236 A_UINT8 *l3_data_ptr;
237 A_UINT16 typeorlength;
238
239 l3_data_ptr = datap + tx_msdu_info->htt.info.l3_hdr_offset;
240 typeorlength = tx_msdu_info->htt.info.ethertype;
241
242 /* IP packet, do packet inspection for TID */
243 if (typeorlength == ETHERTYPE_IPV4) {
244 tid = ol_tx_tid_by_ipv4(l3_data_ptr);
245 } else if (typeorlength == ETHERTYPE_IPV6) {
246 tid = ol_tx_tid_by_ipv6(l3_data_ptr);
247 } else if (ETHERTYPE_IS_EAPOL_WAPI(typeorlength)) {
248 /* EAPOL go with voice priority*/
249 tid = TX_EAPOL_TID;
250 } else if (typeorlength == ETHERTYPE_ARP) {
251 tid = TX_ARP_TID;
252 } else {
253 /* For non-IP case, use default TID */
254 tid = TX_DEFAULT_TID;
255 }
256 return tid;
257}
258
259static inline A_UINT8
260ol_tx_tid_by_raw_type(
261 A_UINT8 *datap,
262 struct ol_txrx_msdu_info_t *tx_msdu_info)
263{
264 A_UINT8 tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
265
266 /* adjust hdr_ptr to RA */
267 struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
268
269 /* FIXME: This code does not handle 4 address formats. The QOS field
270 * is not at usual location.
271 */
272 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
273 IEEE80211_FC0_TYPE_DATA) {
274 /* dot11 encapsulated frame */
275 struct ieee80211_qosframe *whqos =
276 (struct ieee80211_qosframe *)datap;
277 if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)
278 tid = whqos->i_qos[0] & IEEE80211_QOS_TID;
279 else
280 tid = HTT_NON_QOS_TID;
281 } else {
282 /*
283 * This function should only be applied to data frames.
284 * For management frames, we already know to use
285 * HTT_TX_EXT_TID_MGMT.
286 */
287 qdf_assert(0);
288 }
289 return tid;
290}
291
292static A_UINT8
293ol_tx_tid(
294 struct ol_txrx_pdev_t *pdev,
295 qdf_nbuf_t tx_nbuf,
296 struct ol_txrx_msdu_info_t *tx_msdu_info)
297{
298 A_UINT8 *datap = qdf_nbuf_data(tx_nbuf);
299 A_UINT8 tid;
300
301 if (pdev->frame_format == wlan_frm_fmt_raw) {
302 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_raw;
303
304 ol_tx_set_ether_type(datap, tx_msdu_info);
305 tid = tx_msdu_info->htt.info.ext_tid ==
306 QDF_NBUF_TX_EXT_TID_INVALID ?
307 ol_tx_tid_by_raw_type(datap, tx_msdu_info) :
308 tx_msdu_info->htt.info.ext_tid;
309 } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
310 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_ethernet;
311
312 ol_tx_set_ether_type(datap, tx_msdu_info);
313 tid =
314 tx_msdu_info->htt.info.ext_tid ==
315 QDF_NBUF_TX_EXT_TID_INVALID ?
316 ol_tx_tid_by_ether_type(datap, tx_msdu_info) :
317 tx_msdu_info->htt.info.ext_tid;
318 } else if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
319 struct llc_snap_hdr_t *llc;
320
321 tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
322 tx_msdu_info->htt.info.l3_hdr_offset =
323 sizeof(struct ieee80211_frame);
324 llc = (struct llc_snap_hdr_t *)
325 (datap + tx_msdu_info->htt.info.l3_hdr_offset);
326 tx_msdu_info->htt.info.ethertype =
327 (llc->ethertype[0] << 8) | llc->ethertype[1];
328 /*
329 * Native WiFi is a special case of "raw" 802.11 header format.
330 * However, we expect that for all cases that use native WiFi,
331 * the TID will be directly specified out of band.
332 */
333 tid = tx_msdu_info->htt.info.ext_tid;
334 } else {
335 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
336 "Invalid standard frame type: %d\n",
337 pdev->frame_format);
338 qdf_assert(0);
339 tid = HTT_TX_EXT_TID_INVALID;
340 }
341 return tid;
342}
343
344#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
345static inline
346struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
347 struct ol_txrx_vdev_t *vdev,
348 uint8_t *peer_id)
349{
350 struct ol_txrx_peer_t *peer = NULL;
351
352 if (vdev->hlTdlsFlag) {
Frank Liu4362e462018-01-16 11:51:55 +0800353 peer = ol_txrx_peer_find_hash_find_get_ref(pdev,
354 vdev->hl_tdls_ap_mac_addr.raw, 0, 1,
355 PEER_DEBUG_ID_OL_INTERNAL);
356
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530357 if (peer && (peer->peer_ids[0] == HTT_INVALID_PEER_ID)) {
Frank Liu4362e462018-01-16 11:51:55 +0800358 ol_txrx_peer_release_ref(peer,
359 PEER_DEBUG_ID_OL_INTERNAL);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530360 peer = NULL;
361 } else {
362 if (peer)
Frank Liu4362e462018-01-16 11:51:55 +0800363 *peer_id = peer->local_id;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530364 }
365 }
366 if (!peer)
367 peer = ol_txrx_assoc_peer_find(vdev);
368
369 return peer;
370}
371
372#else
Yingying Tang84e0d5f2017-02-22 21:10:19 +0800373static struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530374 struct ol_txrx_vdev_t *vdev,
375 uint8_t *peer_id)
376{
377 struct ol_txrx_peer_t *peer = NULL;
Yun Park04097e82017-04-05 13:59:13 -0700378
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530379 peer = ol_txrx_assoc_peer_find(vdev);
380
381 return peer;
382}
383
384
385#endif
386
387struct ol_tx_frms_queue_t *
388ol_tx_classify(
389 struct ol_txrx_vdev_t *vdev,
390 struct ol_tx_desc_t *tx_desc,
391 qdf_nbuf_t tx_nbuf,
392 struct ol_txrx_msdu_info_t *tx_msdu_info)
393{
394 struct ol_txrx_pdev_t *pdev = vdev->pdev;
395 struct ol_txrx_peer_t *peer = NULL;
396 struct ol_tx_frms_queue_t *txq = NULL;
397 A_UINT8 *dest_addr;
398 A_UINT8 tid;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530399 u_int8_t peer_id;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530400
401 TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
402 dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
jge35cba622017-06-12 17:15:15 +0800403 if (unlikely(NULL == dest_addr)) {
404 QDF_TRACE(QDF_MODULE_ID_TXRX,
405 QDF_TRACE_LEVEL_ERROR,
406 "Error: dest_addr is NULL.\n");
407 return NULL; /*error*/
408 }
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530409 if ((IEEE80211_IS_MULTICAST(dest_addr)) ||
410 (vdev->opmode == wlan_op_mode_ocb)) {
411 txq = &vdev->txqs[OL_TX_VDEV_MCAST_BCAST];
412 tx_msdu_info->htt.info.ext_tid =
413 HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST;
414 if (vdev->opmode == wlan_op_mode_sta) {
415 /*
416 * The STA sends a frame with a broadcast
417 * dest addr (DA) as a
418 * unicast frame to the AP's receive addr (RA).
419 * Find the peer object that represents the AP
420 * that the STA is associated with.
421 */
422 peer = ol_txrx_assoc_peer_find(vdev);
423 if (!peer) {
424 QDF_TRACE(QDF_MODULE_ID_TXRX,
425 QDF_TRACE_LEVEL_ERROR,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700426 "Error: STA %pK (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast DA tx data frame w/o association\n",
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530427 vdev,
428 vdev->mac_addr.raw[0],
429 vdev->mac_addr.raw[1],
430 vdev->mac_addr.raw[2],
431 vdev->mac_addr.raw[3],
432 vdev->mac_addr.raw[4],
433 vdev->mac_addr.raw[5]);
434 return NULL; /* error */
435 } else if ((peer->security[
436 OL_TXRX_PEER_SECURITY_MULTICAST].sec_type
437 != htt_sec_type_wapi) &&
438 (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530439 if (QDF_NBUF_CB_PACKET_TYPE_DHCP ==
440 QDF_NBUF_CB_GET_PACKET_TYPE(
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530441 tx_nbuf)) {
442 /* DHCP frame to go with
443 * voice priority
444 */
445 txq = &peer->txqs[TX_DHCP_TID];
446 tx_msdu_info->htt.info.ext_tid =
447 TX_DHCP_TID;
448 }
449 }
450 /*
451 * The following line assumes each peer object has a
452 * single ID. This is currently true, and is expected
453 * to remain true.
454 */
455 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
456 } else if (vdev->opmode == wlan_op_mode_ocb) {
457 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
Yun Park04097e82017-04-05 13:59:13 -0700458 /*
459 * In OCB mode, don't worry about the peer.
460 * We don't need it.
461 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530462 peer = NULL;
463 } else {
464 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
465 /*
466 * Look up the vdev's BSS peer, so that the
467 * classify_extension function can check whether to
468 * encrypt multicast / broadcast frames.
469 */
Mohit Khannab7bec722017-11-10 11:43:44 -0800470 peer = ol_txrx_peer_find_hash_find_get_ref
471 (pdev,
472 vdev->mac_addr.raw,
473 0, 1,
474 PEER_DEBUG_ID_OL_INTERNAL);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530475 if (!peer) {
476 QDF_TRACE(QDF_MODULE_ID_TXRX,
477 QDF_TRACE_LEVEL_ERROR,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700478 "Error: vdev %pK (%02x:%02x:%02x:%02x:%02x:%02x) trying to send bcast/mcast, but no self-peer found\n",
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530479 vdev,
480 vdev->mac_addr.raw[0],
481 vdev->mac_addr.raw[1],
482 vdev->mac_addr.raw[2],
483 vdev->mac_addr.raw[3],
484 vdev->mac_addr.raw[4],
485 vdev->mac_addr.raw[5]);
486 return NULL; /* error */
487 }
488 }
489 tx_msdu_info->htt.info.is_unicast = false;
490 } else {
491 /* tid would be overwritten for non QoS case*/
492 tid = ol_tx_tid(pdev, tx_nbuf, tx_msdu_info);
493 if ((HTT_TX_EXT_TID_INVALID == tid) ||
494 (tid >= OL_TX_NUM_TIDS)) {
495 QDF_TRACE(QDF_MODULE_ID_TXRX,
496 QDF_TRACE_LEVEL_ERROR,
497 "%s Error: could not classify packet into valid TID(%d).\n",
498 __func__, tid);
499 return NULL;
500 }
501#ifdef ATH_SUPPORT_WAPI
502 /* Check to see if a frame is a WAI frame */
503 if (tx_msdu_info->htt.info.ethertype == ETHERTYPE_WAI) {
504 /* WAI frames should not be encrypted */
505 tx_msdu_info->htt.action.do_encrypt = 0;
506 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
507 "Tx Frame is a WAI frame\n");
508 }
509#endif /* ATH_SUPPORT_WAPI */
510
511 /*
512 * Find the peer and increment its reference count.
513 * If this vdev is an AP, use the dest addr (DA) to determine
514 * which peer STA this unicast data frame is for.
515 * If this vdev is a STA, the unicast data frame is for the
516 * AP the STA is associated with.
517 */
518 if (vdev->opmode == wlan_op_mode_sta) {
519 /*
520 * TO DO:
521 * To support TDLS, first check if there is a TDLS
522 * peer STA,
523 * and if so, check if the DA matches the TDLS peer
524 * STA's MAC address. If there is no peer TDLS STA,
525 * or if the DA is not the TDLS STA's address,
526 * then the frame is either for the AP itself, or is
527 * supposed to be sent to the AP for forwarding.
528 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530529 peer = ol_tx_tdls_peer_find(pdev, vdev, &peer_id);
530 } else {
Mohit Khannab7bec722017-11-10 11:43:44 -0800531 peer = ol_txrx_peer_find_hash_find_get_ref(pdev,
532 dest_addr,
533 0, 1,
534 PEER_DEBUG_ID_OL_INTERNAL);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530535 }
536 tx_msdu_info->htt.info.is_unicast = true;
537 if (!peer) {
538 /*
539 * Unicast data xfer can only happen to an
540 * associated peer. It is illegitimate to send unicast
541 * data if there is no peer to send it to.
542 */
543 QDF_TRACE(QDF_MODULE_ID_TXRX,
544 QDF_TRACE_LEVEL_ERROR,
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700545 "Error: vdev %pK (%02x:%02x:%02x:%02x:%02x:%02x) trying to send unicast tx data frame to an unknown peer\n",
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530546 vdev,
547 vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
548 vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
549 vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
550 return NULL; /* error */
551 }
552 TX_SCHED_DEBUG_PRINT("Peer found\n");
553 if (!peer->qos_capable) {
554 tid = OL_TX_NON_QOS_TID;
555 } else if ((peer->security[
556 OL_TXRX_PEER_SECURITY_UNICAST].sec_type
557 != htt_sec_type_wapi) &&
558 (qdf_nbuf_is_ipv4_pkt(tx_nbuf) == true)) {
Nirav Shah5e74bb82016-07-20 16:01:27 +0530559 if (QDF_NBUF_CB_PACKET_TYPE_DHCP ==
560 QDF_NBUF_CB_GET_PACKET_TYPE(tx_nbuf))
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530561 /* DHCP frame to go with voice priority */
562 tid = TX_DHCP_TID;
563 }
564
565 /* Only allow encryption when in authenticated state */
566 if (OL_TXRX_PEER_STATE_AUTH != peer->state)
567 tx_msdu_info->htt.action.do_encrypt = 0;
568
569 txq = &peer->txqs[tid];
570 tx_msdu_info->htt.info.ext_tid = tid;
571 /*
572 * The following line assumes each peer object has a single ID.
573 * This is currently true, and is expected to remain true.
574 */
575 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
576 /*
577 * WORKAROUND - check that the peer ID is valid.
578 * If tx data is provided before ol_rx_peer_map_handler is
579 * called to record the peer ID specified by the target,
580 * then we could end up here with an invalid peer ID.
581 * TO DO: rather than dropping the tx frame, pause the txq it
582 * goes into, then fill in the peer ID for the entries in the
583 * txq when the peer_map event provides the peer ID, and then
584 * unpause the txq.
585 */
586 if (tx_msdu_info->htt.info.peer_id == HTT_INVALID_PEER_ID) {
587 if (peer) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530588 ol_txrx_info(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700589 "%s: remove the peer for invalid peer_id %pK\n",
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530590 __func__, peer);
591 /* remove the peer reference added above */
Mohit Khannab7bec722017-11-10 11:43:44 -0800592 ol_txrx_peer_release_ref
593 (peer,
594 PEER_DEBUG_ID_OL_INTERNAL);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530595 tx_msdu_info->peer = NULL;
596 }
597 return NULL;
598 }
599 }
600 tx_msdu_info->peer = peer;
601 if (ol_if_tx_bad_peer_txq_overflow(pdev, peer, txq))
602 return NULL;
603 /*
604 * If relevant, do a deeper inspection to determine additional
605 * characteristics of the tx frame.
606 * If the frame is invalid, then the txq will be set to NULL to
607 * indicate an error.
608 */
609 OL_TX_CLASSIFY_EXTENSION(vdev, tx_desc, tx_nbuf, tx_msdu_info, txq);
610 if (IEEE80211_IS_MULTICAST(dest_addr) && vdev->opmode !=
611 wlan_op_mode_sta && tx_msdu_info->peer !=
612 NULL) {
Poddar, Siddarth14521792017-03-14 21:19:42 +0530613 ol_txrx_dbg(
Jeff Johnsonc13bfe02017-09-18 08:16:17 -0700614 "%s: remove the peer reference %pK\n",
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530615 __func__, peer);
616 /* remove the peer reference added above */
Mohit Khannab7bec722017-11-10 11:43:44 -0800617 ol_txrx_peer_release_ref(tx_msdu_info->peer,
618 PEER_DEBUG_ID_OL_INTERNAL);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530619 /* Making peer NULL in case if multicast non STA mode */
620 tx_msdu_info->peer = NULL;
621 }
622
623 /* Whether this frame can download though HTT2 data pipe or not. */
624 OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
625
626 /* Update Tx Queue info */
627 tx_desc->txq = txq;
628
629 TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
630 return txq;
631}
632
633struct ol_tx_frms_queue_t *
634ol_tx_classify_mgmt(
635 struct ol_txrx_vdev_t *vdev,
636 struct ol_tx_desc_t *tx_desc,
637 qdf_nbuf_t tx_nbuf,
638 struct ol_txrx_msdu_info_t *tx_msdu_info)
639{
640 struct ol_txrx_pdev_t *pdev = vdev->pdev;
641 struct ol_txrx_peer_t *peer = NULL;
642 struct ol_tx_frms_queue_t *txq = NULL;
643 A_UINT8 *dest_addr;
644 union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
645
646 TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
647 dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
jge35cba622017-06-12 17:15:15 +0800648 if (unlikely(NULL == dest_addr)) {
649 QDF_TRACE(QDF_MODULE_ID_TXRX,
650 QDF_TRACE_LEVEL_ERROR,
651 "Error: dest_addr is NULL.\n");
652 return NULL; /*error*/
653 }
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530654 if (IEEE80211_IS_MULTICAST(dest_addr)) {
655 /*
656 * AP: beacons are broadcast,
657 * public action frames (e.g. extended channel
658 * switch announce) may be broadcast
659 * STA: probe requests can be either broadcast or unicast
660 */
661 txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
662 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
663 tx_msdu_info->peer = NULL;
664 tx_msdu_info->htt.info.is_unicast = 0;
665 } else {
666 /*
667 * Find the peer and increment its reference count.
668 * If this vdev is an AP, use the receiver addr (RA) to
669 * determine which peer STA this unicast mgmt frame is for.
670 * If this vdev is a STA, the unicast mgmt frame is for the
671 * AP the STA is associated with.
672 * Probe request / response and Assoc request / response are
673 * sent before the peer exists - in this case, use the
674 * vdev's default tx queue.
675 */
676 if (vdev->opmode == wlan_op_mode_sta) {
677 /*
678 * TO DO:
679 * To support TDLS, first check if there is a TDLS
680 * peer STA, and if so, check if the DA matches
681 * the TDLS peer STA's MAC address.
682 */
683 peer = ol_txrx_assoc_peer_find(vdev);
684 /*
685 * Some special case(preauth for example) needs to send
686 * unicast mgmt frame to unassociated AP. In such case,
687 * we need to check if dest addr match the associated
688 * peer addr. If not, we set peer as NULL to queue this
689 * frame to vdev queue.
690 */
691 if (peer) {
Orhan K AKYILDIZ0278cd02016-12-08 18:19:58 -0800692
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530693 qdf_mem_copy(
694 &local_mac_addr_aligned.raw[0],
695 dest_addr, OL_TXRX_MAC_ADDR_LEN);
696 mac_addr = &local_mac_addr_aligned;
Mohit Khannab7bec722017-11-10 11:43:44 -0800697 if (ol_txrx_peer_find_mac_addr_cmp
698 (mac_addr,
699 &peer->mac_addr) != 0) {
700 ol_txrx_peer_release_ref
701 (peer,
702 PEER_DEBUG_ID_OL_INTERNAL);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530703 peer = NULL;
704 }
705 }
706 } else {
707 /* find the peer and increment its reference count */
Mohit Khannab7bec722017-11-10 11:43:44 -0800708 peer = ol_txrx_peer_find_hash_find_get_ref(pdev,
709 dest_addr,
710 0, 1,
711 PEER_DEBUG_ID_OL_INTERNAL);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530712 }
713 tx_msdu_info->peer = peer;
714 if (!peer) {
715 txq = &vdev->txqs[OL_TX_VDEV_DEFAULT_MGMT];
716 tx_msdu_info->htt.info.peer_id = HTT_INVALID_PEER_ID;
717 } else {
718 txq = &peer->txqs[HTT_TX_EXT_TID_MGMT];
719 tx_msdu_info->htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
720 /*
721 * The following line assumes each peer object has a
722 * single ID. This is currently true, and is expected
723 * to remain true.
724 */
725 tx_msdu_info->htt.info.peer_id = peer->peer_ids[0];
726 }
727 tx_msdu_info->htt.info.is_unicast = 1;
728 }
729 /*
730 * If relevant, do a deeper inspection to determine additional
731 * characteristics of the tx frame.
732 * If the frame is invalid, then the txq will be set to NULL to
733 * indicate an error.
734 */
735 OL_TX_CLASSIFY_MGMT_EXTENSION(vdev, tx_desc, tx_nbuf,
736 tx_msdu_info, txq);
737
738 /* Whether this frame can download though HTT2 data pipe or not. */
739 OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
740
741 /* Update Tx Queue info */
742 tx_desc->txq = txq;
743
744 TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
745 return txq;
746}
747
Nirav Shaheb017be2018-02-15 11:20:58 +0530748#ifdef currently_unused
749QDF_STATUS
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530750ol_tx_classify_extension(
751 struct ol_txrx_vdev_t *vdev,
752 struct ol_tx_desc_t *tx_desc,
753 qdf_nbuf_t tx_msdu,
754 struct ol_txrx_msdu_info_t *msdu_info)
755{
Nirav Shaheb017be2018-02-15 11:20:58 +0530756 u8 *datap = qdf_nbuf_data(tx_msdu);
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530757 struct ol_txrx_peer_t *peer;
758 int which_key;
759
760 /*
761 * The following msdu_info fields were already filled in by the
762 * ol_tx entry function or the regular ol_tx_classify function:
763 * htt.info.vdev_id (ol_tx_hl or ol_tx_non_std_hl)
764 * htt.info.ext_tid (ol_tx_non_std_hl or ol_tx_classify)
765 * htt.info.frame_type (ol_tx_hl or ol_tx_non_std_hl)
766 * htt.info.l2_hdr_type (ol_tx_hl or ol_tx_non_std_hl)
767 * htt.info.is_unicast (ol_tx_classify)
768 * htt.info.peer_id (ol_tx_classify)
769 * peer (ol_tx_classify)
770 * if (is_unicast) {
771 * htt.info.ethertype (ol_tx_classify)
772 * htt.info.l3_hdr_offset (ol_tx_classify)
773 * }
774 * The following fields need to be filled in by this function:
775 * if (!is_unicast) {
776 * htt.info.ethertype
777 * htt.info.l3_hdr_offset
778 * }
779 * htt.action.band (NOT CURRENTLY USED)
780 * htt.action.do_encrypt
781 * htt.action.do_tx_complete
782 * The following fields are not needed for data frames, and can
783 * be left uninitialized:
784 * htt.info.frame_subtype
785 */
786
787 if (!msdu_info->htt.info.is_unicast) {
788 int l2_hdr_size;
Nirav Shaheb017be2018-02-15 11:20:58 +0530789 u16 ethertype;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530790
791 if (msdu_info->htt.info.l2_hdr_type == htt_pkt_type_ethernet) {
792 struct ethernet_hdr_t *eh;
793
794 eh = (struct ethernet_hdr_t *)datap;
795 l2_hdr_size = sizeof(*eh);
796 ethertype = (eh->ethertype[0] << 8) | eh->ethertype[1];
797
798 if (ethertype == ETHERTYPE_VLAN) {
799 struct ethernet_vlan_hdr_t *evh;
800
801 evh = (struct ethernet_vlan_hdr_t *)datap;
802 l2_hdr_size = sizeof(*evh);
803 ethertype = (evh->ethertype[0] << 8) |
804 evh->ethertype[1];
805 }
806
807 if (!IS_ETHERTYPE(ethertype)) {
808 /* 802.3 header*/
809 struct llc_snap_hdr_t *llc =
810 (struct llc_snap_hdr_t *)(datap +
811 l2_hdr_size);
812 ethertype = (llc->ethertype[0] << 8) |
813 llc->ethertype[1];
814 l2_hdr_size += sizeof(*llc);
815 }
816 msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
817 msdu_info->htt.info.ethertype = ethertype;
818 } else { /* 802.11 */
819 struct llc_snap_hdr_t *llc;
Srinivas Girigowda4d65ebe2017-10-13 21:41:42 -0700820
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530821 l2_hdr_size = ol_txrx_ieee80211_hdrsize(datap);
822 llc = (struct llc_snap_hdr_t *)(datap + l2_hdr_size);
823 ethertype = (llc->ethertype[0] << 8) |
824 llc->ethertype[1];
825 /*
826 * Don't include the LLC/SNAP header in l2_hdr_size,
827 * because l3_hdr_offset is actually supposed to refer
828 * to the header after the 802.3 or 802.11 header,
829 * which could be a LLC/SNAP header rather
830 * than the L3 header.
831 */
832 }
833 msdu_info->htt.info.l3_hdr_offset = l2_hdr_size;
834 msdu_info->htt.info.ethertype = ethertype;
835 which_key = txrx_sec_mcast;
836 } else {
837 which_key = txrx_sec_ucast;
838 }
839 peer = msdu_info->peer;
840 /*
841 * msdu_info->htt.action.do_encrypt is initially set in ol_tx_desc_hl.
842 * Add more check here.
843 */
844 msdu_info->htt.action.do_encrypt = (!peer) ? 0 :
845 (peer->security[which_key].sec_type == htt_sec_type_none) ? 0 :
846 msdu_info->htt.action.do_encrypt;
847 /*
848 * For systems that have a frame by frame spec for whether to receive
849 * a tx completion notification, use the tx completion notification
850 * only for certain management frames, not for data frames.
851 * (In the future, this may be changed slightly, e.g. to request a
852 * tx completion notification for the final EAPOL message sent by a
853 * STA during the key delivery handshake.)
854 */
855 msdu_info->htt.action.do_tx_complete = 0;
856
Nirav Shaheb017be2018-02-15 11:20:58 +0530857 return QDF_STATUS_SUCCESS;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530858}
859
Nirav Shaheb017be2018-02-15 11:20:58 +0530860QDF_STATUS
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530861ol_tx_classify_mgmt_extension(
862 struct ol_txrx_vdev_t *vdev,
863 struct ol_tx_desc_t *tx_desc,
864 qdf_nbuf_t tx_msdu,
865 struct ol_txrx_msdu_info_t *msdu_info)
866{
867 struct ieee80211_frame *wh;
868
869 /*
870 * The following msdu_info fields were already filled in by the
871 * ol_tx entry function or the regular ol_tx_classify_mgmt function:
872 * htt.info.vdev_id (ol_txrx_mgmt_send)
873 * htt.info.frame_type (ol_txrx_mgmt_send)
874 * htt.info.l2_hdr_type (ol_txrx_mgmt_send)
875 * htt.action.do_tx_complete (ol_txrx_mgmt_send)
876 * htt.info.peer_id (ol_tx_classify_mgmt)
877 * htt.info.ext_tid (ol_tx_classify_mgmt)
878 * htt.info.is_unicast (ol_tx_classify_mgmt)
879 * peer (ol_tx_classify_mgmt)
880 * The following fields need to be filled in by this function:
881 * htt.info.frame_subtype
882 * htt.info.l3_hdr_offset
883 * htt.action.band (NOT CURRENTLY USED)
884 * The following fields are not needed for mgmt frames, and can
885 * be left uninitialized:
886 * htt.info.ethertype
887 * htt.action.do_encrypt
888 * (This will be filled in by other SW, which knows whether
889 * the peer has robust-managment-frames enabled.)
890 */
891 wh = (struct ieee80211_frame *)qdf_nbuf_data(tx_msdu);
892 msdu_info->htt.info.frame_subtype =
893 (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >>
894 IEEE80211_FC0_SUBTYPE_SHIFT;
895 msdu_info->htt.info.l3_hdr_offset = sizeof(struct ieee80211_frame);
896
Nirav Shaheb017be2018-02-15 11:20:58 +0530897 return QDF_STATUS_SUCCESS;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530898}
Nirav Shaheb017be2018-02-15 11:20:58 +0530899#endif
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530900#endif /* defined(CONFIG_HL_SUPPORT) */