blob: 825d57ebc8aaddd54de47dd0e71c5d38aa888f7b [file] [log] [blame]
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -06001/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * RMNET Data ingress/egress handler
13 */
14
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/module.h>
18#include <linux/rmnet_data.h>
19#include <linux/net_map.h>
20#include <linux/netdev_features.h>
21#include <linux/ip.h>
22#include <linux/ipv6.h>
23#include <net/rmnet_config.h>
24#include "rmnet_data_private.h"
25#include "rmnet_data_config.h"
26#include "rmnet_data_vnd.h"
27#include "rmnet_map.h"
28#include "rmnet_data_stats.h"
29#include "rmnet_data_trace.h"
30#include "rmnet_data_handlers.h"
31
32RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_HANDLER);
33
34#ifdef CONFIG_RMNET_DATA_DEBUG_PKT
35unsigned int dump_pkt_rx;
36module_param(dump_pkt_rx, uint, 0644);
37MODULE_PARM_DESC(dump_pkt_rx, "Dump packets entering ingress handler");
38
39unsigned int dump_pkt_tx;
40module_param(dump_pkt_tx, uint, 0644);
41MODULE_PARM_DESC(dump_pkt_tx, "Dump packets exiting egress handler");
42#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
43
44/* Time in nano seconds. This number must be less that a second. */
45long gro_flush_time __read_mostly = 10000L;
46module_param(gro_flush_time, long, 0644);
47MODULE_PARM_DESC(gro_flush_time, "Flush GRO when spaced more than this");
48
49#define RMNET_DATA_IP_VERSION_4 0x40
50#define RMNET_DATA_IP_VERSION_6 0x60
51
52#define RMNET_DATA_GRO_RCV_FAIL 0
53#define RMNET_DATA_GRO_RCV_PASS 1
54
55/* Helper Functions */
56
57/* __rmnet_data_set_skb_proto() - Set skb->protocol field
58 * @skb: packet being modified
59 *
60 * Peek at the first byte of the packet and set the protocol. There is not
61 * good way to determine if a packet has a MAP header. As of writing this,
62 * the reserved bit in the MAP frame will prevent it from overlapping with
63 * IPv4/IPv6 frames. This could change in the future!
64 */
65static inline void __rmnet_data_set_skb_proto(struct sk_buff *skb)
66{
67 switch (skb->data[0] & 0xF0) {
68 case RMNET_DATA_IP_VERSION_4:
69 skb->protocol = htons(ETH_P_IP);
70 break;
71 case RMNET_DATA_IP_VERSION_6:
72 skb->protocol = htons(ETH_P_IPV6);
73 break;
74 default:
75 skb->protocol = htons(ETH_P_MAP);
76 break;
77 }
78}
79
80#ifdef CONFIG_RMNET_DATA_DEBUG_PKT
81/* rmnet_print_packet() - Print packet / diagnostics
82 * @skb: Packet to print
83 * @printlen: Number of bytes to print
84 * @dev: Name of interface
85 * @dir: Character representing direction (e.g.. 'r' for receive)
86 *
87 * This function prints out raw bytes in an SKB. Use of this will have major
88 * performance impacts and may even trigger watchdog resets if too much is being
89 * printed. Hence, this should always be compiled out unless absolutely needed.
90 */
91void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
92{
93 char buffer[200];
94 unsigned int len, printlen;
95 int i, buffloc = 0;
96
97 switch (dir) {
98 case 'r':
99 printlen = dump_pkt_rx;
100 break;
101
102 case 't':
103 printlen = dump_pkt_tx;
104 break;
105
106 default:
107 printlen = 0;
108 break;
109 }
110
111 if (!printlen)
112 return;
113
114 pr_err("[%s][%c] - PKT skb->len=%d skb->head=%pK skb->data=%pK\n",
115 dev, dir, skb->len, (void *)skb->head, (void *)skb->data);
116 pr_err("[%s][%c] - PKT skb->tail=%pK skb->end=%pK\n",
117 dev, dir, skb_tail_pointer(skb), skb_end_pointer(skb));
118
119 if (skb->len > 0)
120 len = skb->len;
121 else
122 len = ((unsigned int)(uintptr_t)skb->end) -
123 ((unsigned int)(uintptr_t)skb->data);
124
125 pr_err("[%s][%c] - PKT len: %d, printing first %d bytes\n",
126 dev, dir, len, printlen);
127
128 memset(buffer, 0, sizeof(buffer));
129 for (i = 0; (i < printlen) && (i < len); i++) {
130 if ((i % 16) == 0) {
131 pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
132 memset(buffer, 0, sizeof(buffer));
133 buffloc = 0;
134 buffloc += snprintf(&buffer[buffloc],
135 sizeof(buffer) - buffloc, "%04X:",
136 i);
137 }
138
139 buffloc += snprintf(&buffer[buffloc], sizeof(buffer) - buffloc,
140 " %02x", skb->data[i]);
141 }
142 pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
143}
144#else
145void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
146{
147}
148#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
149
150/* Generic handler */
151
152/* rmnet_bridge_handler() - Bridge related functionality
153 *
154 * Return:
155 * - RX_HANDLER_CONSUMED in all cases
156 */
157static rx_handler_result_t rmnet_bridge_handler
158 (struct sk_buff *skb, struct rmnet_logical_ep_conf_s *ep)
159{
160 if (!ep->egress_dev) {
161 LOGD("Missing egress device for packet arriving on %s",
162 skb->dev->name);
163 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_BRDG_NO_EGRESS);
164 } else {
165 rmnet_egress_handler(skb, ep);
166 }
167
168 return RX_HANDLER_CONSUMED;
169}
170
171#ifdef NET_SKBUFF_DATA_USES_OFFSET
172static void rmnet_reset_mac_header(struct sk_buff *skb)
173{
174 skb->mac_header = 0;
175 skb->mac_len = 0;
176}
177#else
178static void rmnet_reset_mac_header(struct sk_buff *skb)
179{
180 skb->mac_header = skb->network_header;
181 skb->mac_len = 0;
182}
183#endif /*NET_SKBUFF_DATA_USES_OFFSET*/
184
185/* rmnet_check_skb_can_gro() - Check is skb can be passed through GRO handler
186 *
187 * Determines whether to pass the skb to the GRO handler napi_gro_receive() or
188 * handle normally by passing to netif_receive_skb().
189 *
190 * Warning:
191 * This assumes that only TCP packets can be coalesced by the GRO handler which
192 * is not true in general. We lose the ability to use GRO for cases like UDP
193 * encapsulation protocols.
194 *
195 * Return:
196 * - RMNET_DATA_GRO_RCV_FAIL if packet is sent to netif_receive_skb()
197 * - RMNET_DATA_GRO_RCV_PASS if packet is sent to napi_gro_receive()
198 */
199static int rmnet_check_skb_can_gro(struct sk_buff *skb)
200{
201 switch (skb->data[0] & 0xF0) {
202 case RMNET_DATA_IP_VERSION_4:
203 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
204 return RMNET_DATA_GRO_RCV_PASS;
205 break;
206 case RMNET_DATA_IP_VERSION_6:
207 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
208 return RMNET_DATA_GRO_RCV_PASS;
209 /* Fall through */
210 }
211
212 return RMNET_DATA_GRO_RCV_FAIL;
213}
214
215/* rmnet_optional_gro_flush() - Check if GRO handler needs to flush now
216 *
217 * Determines whether GRO handler needs to flush packets which it has
218 * coalesced so far.
219 *
220 * Tuning this parameter will trade TCP slow start performance for GRO coalesce
221 * ratio.
222 */
223static void rmnet_optional_gro_flush(struct napi_struct *napi,
224 struct rmnet_logical_ep_conf_s *ep)
225{
226 struct timespec curr_time, diff;
227
228 if (!gro_flush_time)
229 return;
230
231 if (unlikely(ep->flush_time.tv_sec == 0)) {
232 getnstimeofday(&ep->flush_time);
233 } else {
234 getnstimeofday(&(curr_time));
235 diff = timespec_sub(curr_time, ep->flush_time);
236 if ((diff.tv_sec > 0) || (diff.tv_nsec > gro_flush_time)) {
237 napi_gro_flush(napi, false);
238 getnstimeofday(&ep->flush_time);
239 }
240 }
241}
242
243/* __rmnet_deliver_skb() - Deliver skb
244 *
245 * Determines where to deliver skb. Options are: consume by network stack,
246 * pass to bridge handler, or pass to virtual network device
247 *
248 * Return:
249 * - RX_HANDLER_CONSUMED if packet forwarded or dropped
250 * - RX_HANDLER_PASS if packet is to be consumed by network stack as-is
251 */
252static rx_handler_result_t __rmnet_deliver_skb
253 (struct sk_buff *skb, struct rmnet_logical_ep_conf_s *ep)
254{
255 struct napi_struct *napi = NULL;
256 gro_result_t gro_res;
257
258 trace___rmnet_deliver_skb(skb);
259 switch (ep->rmnet_mode) {
260 case RMNET_EPMODE_NONE:
261 return RX_HANDLER_PASS;
262
263 case RMNET_EPMODE_BRIDGE:
264 return rmnet_bridge_handler(skb, ep);
265
266 case RMNET_EPMODE_VND:
267 skb_reset_transport_header(skb);
268 skb_reset_network_header(skb);
269 switch (rmnet_vnd_rx_fixup(skb, skb->dev)) {
270 case RX_HANDLER_CONSUMED:
271 return RX_HANDLER_CONSUMED;
272
273 case RX_HANDLER_PASS:
274 skb->pkt_type = PACKET_HOST;
275 rmnet_reset_mac_header(skb);
276 if (rmnet_check_skb_can_gro(skb) &&
277 (skb->dev->features & NETIF_F_GRO)) {
278 napi = get_current_napi_context();
279 if (napi) {
280 gro_res = napi_gro_receive(napi, skb);
281 trace_rmnet_gro_downlink(gro_res);
282 rmnet_optional_gro_flush(napi, ep);
283 } else {
284 WARN_ONCE(1, "current napi is NULL\n");
285 netif_receive_skb(skb);
286 }
287 } else {
288 netif_receive_skb(skb);
289 }
290 return RX_HANDLER_CONSUMED;
291 }
292 return RX_HANDLER_PASS;
293
294 default:
295 LOGD("Unknown ep mode %d", ep->rmnet_mode);
296 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_DELIVER_NO_EP);
297 return RX_HANDLER_CONSUMED;
298 }
299}
300
301/* rmnet_ingress_deliver_packet() - Ingress handler for raw IP and bridged
302 * MAP packets.
303 * @skb: Packet needing a destination.
304 * @config: Physical end point configuration that the packet arrived on.
305 *
306 * Return:
307 * - RX_HANDLER_CONSUMED if packet forwarded/dropped
308 * - RX_HANDLER_PASS if packet should be passed up the stack by caller
309 */
310static rx_handler_result_t rmnet_ingress_deliver_packet
311 (struct sk_buff *skb, struct rmnet_phys_ep_config *config)
312{
313 if (!config) {
314 LOGD("%s", "NULL physical EP provided");
315 kfree_skb(skb);
316 return RX_HANDLER_CONSUMED;
317 }
318
319 if (!(config->local_ep.refcount)) {
320 LOGD("Packet on %s has no local endpoint configuration",
321 skb->dev->name);
322 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_IPINGRESS_NO_EP);
323 return RX_HANDLER_CONSUMED;
324 }
325
326 skb->dev = config->local_ep.egress_dev;
327
328 return __rmnet_deliver_skb(skb, &config->local_ep);
329}
330
331/* MAP handler */
332
333/* _rmnet_map_ingress_handler() - Actual MAP ingress handler
334 * @skb: Packet being received
335 * @config: Physical endpoint configuration for the ingress device
336 *
337 * Most MAP ingress functions are processed here. Packets are processed
338 * individually; aggregated packets should use rmnet_map_ingress_handler()
339 *
340 * Return:
341 * - RX_HANDLER_CONSUMED if packet is dropped
342 * - result of __rmnet_deliver_skb() for all other cases
343 */
344static rx_handler_result_t _rmnet_map_ingress_handler
345 (struct sk_buff *skb, struct rmnet_phys_ep_config *config)
346{
347 struct rmnet_logical_ep_conf_s *ep;
348 u8 mux_id;
349 u16 len;
350 int ckresult;
351
352 if (RMNET_MAP_GET_CD_BIT(skb)) {
353 if (config->ingress_data_format
354 & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
355 return rmnet_map_command(skb, config);
356
357 LOGM("MAP command packet on %s; %s", skb->dev->name,
358 "Not configured for MAP commands");
359 rmnet_kfree_skb(skb,
360 RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPC);
361 return RX_HANDLER_CONSUMED;
362 }
363
364 mux_id = RMNET_MAP_GET_MUX_ID(skb);
365 len = RMNET_MAP_GET_LENGTH(skb)
366 - RMNET_MAP_GET_PAD(skb)
367 - config->tail_spacing;
368
369 if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) {
370 LOGD("Got packet on %s with bad mux id %d",
371 skb->dev->name, mux_id);
372 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX);
373 return RX_HANDLER_CONSUMED;
374 }
375
376 ep = &config->muxed_ep[mux_id];
377
378 if (!ep->refcount) {
379 LOGD("Packet on %s:%d; has no logical endpoint config",
380 skb->dev->name, mux_id);
381
382 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP);
383 return RX_HANDLER_CONSUMED;
384 }
385
386 if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
387 skb->dev = ep->egress_dev;
388
389 if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
390 (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) {
391 ckresult = rmnet_map_checksum_downlink_packet(skb);
392 trace_rmnet_map_checksum_downlink_packet(skb, ckresult);
393 rmnet_stats_dl_checksum(ckresult);
394 if (likely((ckresult == RMNET_MAP_CHECKSUM_OK) ||
395 (ckresult == RMNET_MAP_CHECKSUM_SKIPPED)))
396 skb->ip_summed |= CHECKSUM_UNNECESSARY;
397 else if (ckresult !=
398 RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION &&
399 ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT &&
400 ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET &&
401 ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) {
402 rmnet_kfree_skb
403 (skb, RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM);
404 return RX_HANDLER_CONSUMED;
405 }
406 }
407
408 /* Subtract MAP header */
409 skb_pull(skb, sizeof(struct rmnet_map_header_s));
410 skb_trim(skb, len);
411 __rmnet_data_set_skb_proto(skb);
412 return __rmnet_deliver_skb(skb, ep);
413}
414
415/* rmnet_map_ingress_handler() - MAP ingress handler
416 * @skb: Packet being received
417 * @config: Physical endpoint configuration for the ingress device
418 *
419 * Called if and only if MAP is configured in the ingress device's ingress data
420 * format. Deaggregation is done here, actual MAP processing is done in
421 * _rmnet_map_ingress_handler().
422 *
423 * Return:
424 * - RX_HANDLER_CONSUMED for aggregated packets
425 * - RX_HANDLER_CONSUMED for dropped packets
426 * - result of _rmnet_map_ingress_handler() for all other cases
427 */
428static rx_handler_result_t rmnet_map_ingress_handler
429 (struct sk_buff *skb, struct rmnet_phys_ep_config *config)
430{
431 struct sk_buff *skbn;
432 int rc, co = 0;
433
434 if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
435 trace_rmnet_start_deaggregation(skb);
436 while ((skbn = rmnet_map_deaggregate(skb, config)) != 0) {
437 _rmnet_map_ingress_handler(skbn, config);
438 co++;
439 }
440 trace_rmnet_end_deaggregation(skb, co);
441 LOGD("De-aggregated %d packets", co);
442 rmnet_stats_deagg_pkts(co);
443 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF);
444 rc = RX_HANDLER_CONSUMED;
445 } else {
446 rc = _rmnet_map_ingress_handler(skb, config);
447 }
448
449 return rc;
450}
451
452/* rmnet_map_egress_handler() - MAP egress handler
453 * @skb: Packet being sent
454 * @config: Physical endpoint configuration for the egress device
455 * @ep: logical endpoint configuration of the packet originator
456 * (e.g.. RmNet virtual network device)
457 * @orig_dev: The originator vnd device
458 *
459 * Called if and only if MAP is configured in the egress device's egress data
460 * format. Will expand skb if there is insufficient headroom for MAP protocol.
461 * Note: headroomexpansion will incur a performance penalty.
462 *
463 * Return:
464 * - 0 on success
465 * - 1 on failure
466 */
467static int rmnet_map_egress_handler(struct sk_buff *skb,
468 struct rmnet_phys_ep_config *config,
469 struct rmnet_logical_ep_conf_s *ep,
470 struct net_device *orig_dev)
471{
472 int required_headroom, additional_header_length, ckresult;
473 struct rmnet_map_header_s *map_header;
474
475 additional_header_length = 0;
476
477 required_headroom = sizeof(struct rmnet_map_header_s);
478 if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
479 (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) {
480 required_headroom +=
481 sizeof(struct rmnet_map_ul_checksum_header_s);
482 additional_header_length +=
483 sizeof(struct rmnet_map_ul_checksum_header_s);
484 }
485
486 LOGD("headroom of %d bytes", required_headroom);
487
488 if (skb_headroom(skb) < required_headroom) {
489 if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) {
490 LOGD("Failed to add headroom of %d bytes",
491 required_headroom);
492 return 1;
493 }
494 }
495
496 if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
497 (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) {
498 ckresult = rmnet_map_checksum_uplink_packet
499 (skb, orig_dev, config->egress_data_format);
500 trace_rmnet_map_checksum_uplink_packet(orig_dev, ckresult);
501 rmnet_stats_ul_checksum(ckresult);
502 }
503
504 if ((!(config->egress_data_format &
505 RMNET_EGRESS_FORMAT_AGGREGATION)) ||
506 ((orig_dev->features & NETIF_F_GSO) && skb_is_nonlinear(skb)))
507 map_header = rmnet_map_add_map_header
508 (skb, additional_header_length, RMNET_MAP_NO_PAD_BYTES);
509 else
510 map_header = rmnet_map_add_map_header
511 (skb, additional_header_length, RMNET_MAP_ADD_PAD_BYTES);
512
513 if (!map_header) {
514 LOGD("%s", "Failed to add MAP header to egress packet");
515 return 1;
516 }
517
518 if (config->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
519 if (ep->mux_id == 0xff)
520 map_header->mux_id = 0;
521 else
522 map_header->mux_id = ep->mux_id;
523 }
524
525 skb->protocol = htons(ETH_P_MAP);
526
527 if (config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
528 rmnet_map_aggregate(skb, config);
529 return RMNET_MAP_CONSUMED;
530 }
531
532 return RMNET_MAP_SUCCESS;
533}
534
535/* Ingress / Egress Entry Points */
536
537/* rmnet_ingress_handler() - Ingress handler entry point
538 * @skb: Packet being received
539 *
540 * Processes packet as per ingress data format for receiving device. Logical
541 * endpoint is determined from packet inspection. Packet is then sent to the
542 * egress device listed in the logical endpoint configuration.
543 *
544 * Return:
545 * - RX_HANDLER_PASS if packet is not processed by handler (caller must
546 * deal with the packet)
547 * - RX_HANDLER_CONSUMED if packet is forwarded or processed by MAP
548 */
549rx_handler_result_t rmnet_ingress_handler(struct sk_buff *skb)
550{
551 struct rmnet_phys_ep_config *config;
552 struct net_device *dev;
553 int rc;
554
555 if (!skb)
556 return RX_HANDLER_CONSUMED;
557
558 dev = skb->dev;
559 trace_rmnet_ingress_handler(skb);
560 rmnet_print_packet(skb, dev->name, 'r');
561
562 config = _rmnet_get_phys_ep_config(skb->dev);
563
564 if (!config) {
565 LOGD("%s is not associated with rmnet_data", skb->dev->name);
566 kfree_skb(skb);
567 return RX_HANDLER_CONSUMED;
568 }
569
570 /* Sometimes devices operate in ethernet mode even thouth there is no
571 * ethernet header. This causes the skb->protocol to contain a bogus
572 * value and the skb->data pointer to be off by 14 bytes. Fix it if
573 * configured to do so
574 */
575 if (config->ingress_data_format & RMNET_INGRESS_FIX_ETHERNET) {
576 skb_push(skb, RMNET_ETHERNET_HEADER_LENGTH);
577 __rmnet_data_set_skb_proto(skb);
578 }
579
580 if (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) {
581 rc = rmnet_map_ingress_handler(skb, config);
582 } else {
583 switch (ntohs(skb->protocol)) {
584 case ETH_P_MAP:
585 if (config->local_ep.rmnet_mode ==
586 RMNET_EPMODE_BRIDGE) {
587 rc = rmnet_ingress_deliver_packet(skb, config);
588 } else {
589 LOGD("MAP packet on %s; MAP not set",
590 dev->name);
591 rmnet_kfree_skb
592 (skb,
593 RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPD);
594 rc = RX_HANDLER_CONSUMED;
595 }
596 break;
597
598 case ETH_P_ARP:
599 case ETH_P_IP:
600 case ETH_P_IPV6:
601 rc = rmnet_ingress_deliver_packet(skb, config);
602 break;
603
604 default:
605 LOGD("Unknown skb->proto 0x%04X\n",
606 ntohs(skb->protocol) & 0xFFFF);
607 rc = RX_HANDLER_PASS;
608 }
609 }
610
611 return rc;
612}
613
614/* rmnet_rx_handler() - Rx handler callback registered with kernel
615 * @pskb: Packet to be processed by rx handler
616 *
617 * Standard kernel-expected footprint for rx handlers. Calls
618 * rmnet_ingress_handler with correctly formatted arguments
619 *
620 * Return:
621 * - Whatever rmnet_ingress_handler() returns
622 */
623rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
624{
625 return rmnet_ingress_handler(*pskb);
626}
627
628/* rmnet_egress_handler() - Egress handler entry point
629 * @skb: packet to transmit
630 * @ep: logical endpoint configuration of the packet originator
631 * (e.g.. RmNet virtual network device)
632 *
633 * Modifies packet as per logical endpoint configuration and egress data format
634 * for egress device configured in logical endpoint. Packet is then transmitted
635 * on the egress device.
636 */
637void rmnet_egress_handler(struct sk_buff *skb,
638 struct rmnet_logical_ep_conf_s *ep)
639{
640 struct rmnet_phys_ep_config *config;
641 struct net_device *orig_dev;
642 int rc;
643
644 orig_dev = skb->dev;
645 skb->dev = ep->egress_dev;
646
647 config = _rmnet_get_phys_ep_config(skb->dev);
648
649 if (!config) {
650 LOGD("%s is not associated with rmnet_data", skb->dev->name);
651 kfree_skb(skb);
652 return;
653 }
654
655 LOGD("Packet going out on %s with egress format 0x%08X",
656 skb->dev->name, config->egress_data_format);
657
658 if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
659 switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) {
660 case RMNET_MAP_CONSUMED:
661 LOGD("%s", "MAP process consumed packet");
662 return;
663
664 case RMNET_MAP_SUCCESS:
665 break;
666
667 default:
668 LOGD("MAP egress failed on packet on %s",
669 skb->dev->name);
670 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_EGR_MAPFAIL);
671 return;
672 }
673 }
674
675 if (ep->rmnet_mode == RMNET_EPMODE_VND)
676 rmnet_vnd_tx_fixup(skb, orig_dev);
677
678 rmnet_print_packet(skb, skb->dev->name, 't');
679 trace_rmnet_egress_handler(skb);
680 rc = dev_queue_xmit(skb);
681 if (rc != 0) {
682 LOGD("Failed to queue packet for transmission on [%s]",
683 skb->dev->name);
684 }
685 rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_EGRESS);
686}