blob: 57646efe43df19f3efee11a1ceeebd7de77e74fe [file] [log] [blame]
Subash Abhinov Kasiviswanathan991811c2016-11-07 16:44:44 -07001/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * RMNET Data ingress/egress handler
13 */
14
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/module.h>
18#include <linux/rmnet_data.h>
19#include <linux/net_map.h>
20#include <linux/netdev_features.h>
21#include <linux/ip.h>
22#include <linux/ipv6.h>
23#include <net/rmnet_config.h>
24#include "rmnet_data_private.h"
25#include "rmnet_data_config.h"
26#include "rmnet_data_vnd.h"
27#include "rmnet_map.h"
28#include "rmnet_data_stats.h"
29#include "rmnet_data_trace.h"
30#include "rmnet_data_handlers.h"
31
32RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_HANDLER);
33
34#ifdef CONFIG_RMNET_DATA_DEBUG_PKT
35unsigned int dump_pkt_rx;
36module_param(dump_pkt_rx, uint, 0644);
37MODULE_PARM_DESC(dump_pkt_rx, "Dump packets entering ingress handler");
38
39unsigned int dump_pkt_tx;
40module_param(dump_pkt_tx, uint, 0644);
41MODULE_PARM_DESC(dump_pkt_tx, "Dump packets exiting egress handler");
42#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
43
44/* Time in nano seconds. This number must be less that a second. */
45long gro_flush_time __read_mostly = 10000L;
46module_param(gro_flush_time, long, 0644);
47MODULE_PARM_DESC(gro_flush_time, "Flush GRO when spaced more than this");
48
Conner Hufff90a2502016-10-31 11:22:15 -070049unsigned int gro_min_byte_thresh __read_mostly = 7500;
50module_param(gro_min_byte_thresh, uint, 0644);
51MODULE_PARM_DESC(gro_min_byte_thresh, "Min byte thresh to change flush time");
52
53unsigned int dynamic_gro_on __read_mostly = 1;
54module_param(dynamic_gro_on, uint, 0644);
55MODULE_PARM_DESC(dynamic_gro_on, "Toggle to turn on dynamic gro logic");
56
57unsigned int upper_flush_time __read_mostly = 15000;
58module_param(upper_flush_time, uint, 0644);
59MODULE_PARM_DESC(upper_flush_time, "Upper limit on flush time");
60
61unsigned int upper_byte_limit __read_mostly = 10500;
62module_param(upper_byte_limit, uint, 0644);
63MODULE_PARM_DESC(upper_byte_limit, "Upper byte limit");
64
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -060065#define RMNET_DATA_IP_VERSION_4 0x40
66#define RMNET_DATA_IP_VERSION_6 0x60
67
68#define RMNET_DATA_GRO_RCV_FAIL 0
69#define RMNET_DATA_GRO_RCV_PASS 1
70
71/* Helper Functions */
72
73/* __rmnet_data_set_skb_proto() - Set skb->protocol field
74 * @skb: packet being modified
75 *
76 * Peek at the first byte of the packet and set the protocol. There is not
77 * good way to determine if a packet has a MAP header. As of writing this,
78 * the reserved bit in the MAP frame will prevent it from overlapping with
79 * IPv4/IPv6 frames. This could change in the future!
80 */
81static inline void __rmnet_data_set_skb_proto(struct sk_buff *skb)
82{
83 switch (skb->data[0] & 0xF0) {
84 case RMNET_DATA_IP_VERSION_4:
85 skb->protocol = htons(ETH_P_IP);
86 break;
87 case RMNET_DATA_IP_VERSION_6:
88 skb->protocol = htons(ETH_P_IPV6);
89 break;
90 default:
91 skb->protocol = htons(ETH_P_MAP);
92 break;
93 }
94}
95
96#ifdef CONFIG_RMNET_DATA_DEBUG_PKT
97/* rmnet_print_packet() - Print packet / diagnostics
98 * @skb: Packet to print
99 * @printlen: Number of bytes to print
100 * @dev: Name of interface
101 * @dir: Character representing direction (e.g.. 'r' for receive)
102 *
103 * This function prints out raw bytes in an SKB. Use of this will have major
104 * performance impacts and may even trigger watchdog resets if too much is being
105 * printed. Hence, this should always be compiled out unless absolutely needed.
106 */
107void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
108{
109 char buffer[200];
110 unsigned int len, printlen;
111 int i, buffloc = 0;
112
113 switch (dir) {
114 case 'r':
115 printlen = dump_pkt_rx;
116 break;
117
118 case 't':
119 printlen = dump_pkt_tx;
120 break;
121
122 default:
123 printlen = 0;
124 break;
125 }
126
127 if (!printlen)
128 return;
129
130 pr_err("[%s][%c] - PKT skb->len=%d skb->head=%pK skb->data=%pK\n",
131 dev, dir, skb->len, (void *)skb->head, (void *)skb->data);
132 pr_err("[%s][%c] - PKT skb->tail=%pK skb->end=%pK\n",
133 dev, dir, skb_tail_pointer(skb), skb_end_pointer(skb));
134
135 if (skb->len > 0)
136 len = skb->len;
137 else
138 len = ((unsigned int)(uintptr_t)skb->end) -
139 ((unsigned int)(uintptr_t)skb->data);
140
141 pr_err("[%s][%c] - PKT len: %d, printing first %d bytes\n",
142 dev, dir, len, printlen);
143
144 memset(buffer, 0, sizeof(buffer));
145 for (i = 0; (i < printlen) && (i < len); i++) {
146 if ((i % 16) == 0) {
147 pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
148 memset(buffer, 0, sizeof(buffer));
149 buffloc = 0;
150 buffloc += snprintf(&buffer[buffloc],
151 sizeof(buffer) - buffloc, "%04X:",
152 i);
153 }
154
155 buffloc += snprintf(&buffer[buffloc], sizeof(buffer) - buffloc,
156 " %02x", skb->data[i]);
157 }
158 pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
159}
160#else
161void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
162{
163}
164#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
165
166/* Generic handler */
167
168/* rmnet_bridge_handler() - Bridge related functionality
169 *
170 * Return:
171 * - RX_HANDLER_CONSUMED in all cases
172 */
173static rx_handler_result_t rmnet_bridge_handler
174 (struct sk_buff *skb, struct rmnet_logical_ep_conf_s *ep)
175{
176 if (!ep->egress_dev) {
177 LOGD("Missing egress device for packet arriving on %s",
178 skb->dev->name);
179 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_BRDG_NO_EGRESS);
180 } else {
181 rmnet_egress_handler(skb, ep);
182 }
183
184 return RX_HANDLER_CONSUMED;
185}
186
187#ifdef NET_SKBUFF_DATA_USES_OFFSET
188static void rmnet_reset_mac_header(struct sk_buff *skb)
189{
190 skb->mac_header = 0;
191 skb->mac_len = 0;
192}
193#else
194static void rmnet_reset_mac_header(struct sk_buff *skb)
195{
196 skb->mac_header = skb->network_header;
197 skb->mac_len = 0;
198}
199#endif /*NET_SKBUFF_DATA_USES_OFFSET*/
200
201/* rmnet_check_skb_can_gro() - Check is skb can be passed through GRO handler
202 *
203 * Determines whether to pass the skb to the GRO handler napi_gro_receive() or
204 * handle normally by passing to netif_receive_skb().
205 *
206 * Warning:
207 * This assumes that only TCP packets can be coalesced by the GRO handler which
208 * is not true in general. We lose the ability to use GRO for cases like UDP
209 * encapsulation protocols.
210 *
211 * Return:
212 * - RMNET_DATA_GRO_RCV_FAIL if packet is sent to netif_receive_skb()
213 * - RMNET_DATA_GRO_RCV_PASS if packet is sent to napi_gro_receive()
214 */
215static int rmnet_check_skb_can_gro(struct sk_buff *skb)
216{
217 switch (skb->data[0] & 0xF0) {
218 case RMNET_DATA_IP_VERSION_4:
219 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
220 return RMNET_DATA_GRO_RCV_PASS;
221 break;
222 case RMNET_DATA_IP_VERSION_6:
223 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
224 return RMNET_DATA_GRO_RCV_PASS;
225 /* Fall through */
226 }
227
228 return RMNET_DATA_GRO_RCV_FAIL;
229}
230
231/* rmnet_optional_gro_flush() - Check if GRO handler needs to flush now
232 *
233 * Determines whether GRO handler needs to flush packets which it has
234 * coalesced so far.
235 *
236 * Tuning this parameter will trade TCP slow start performance for GRO coalesce
237 * ratio.
238 */
239static void rmnet_optional_gro_flush(struct napi_struct *napi,
Conner Hufff90a2502016-10-31 11:22:15 -0700240 struct rmnet_logical_ep_conf_s *ep,
241 unsigned int skb_size)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600242{
243 struct timespec curr_time, diff;
244
245 if (!gro_flush_time)
246 return;
247
248 if (unlikely(ep->flush_time.tv_sec == 0)) {
249 getnstimeofday(&ep->flush_time);
Conner Hufff90a2502016-10-31 11:22:15 -0700250 ep->flush_byte_count = 0;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600251 } else {
252 getnstimeofday(&(curr_time));
253 diff = timespec_sub(curr_time, ep->flush_time);
Conner Hufff90a2502016-10-31 11:22:15 -0700254 ep->flush_byte_count += skb_size;
255
256 if (dynamic_gro_on) {
257 if ((!(diff.tv_sec > 0) || diff.tv_nsec <=
258 gro_flush_time) &&
259 ep->flush_byte_count >=
260 gro_min_byte_thresh) {
261 /* Processed many bytes in a small time window.
262 * No longer need to flush so often and we can
263 * increase our byte limit
264 */
265 gro_flush_time = upper_flush_time;
266 gro_min_byte_thresh = upper_byte_limit;
267 } else if ((diff.tv_sec > 0 ||
268 diff.tv_nsec > gro_flush_time) &&
269 ep->flush_byte_count <
270 gro_min_byte_thresh) {
271 /* We have not hit our time limit and we are not
272 * receive many bytes. Demote ourselves to the
273 * lowest limits and flush
274 */
275 napi_gro_flush(napi, false);
276 getnstimeofday(&ep->flush_time);
277 ep->flush_byte_count = 0;
278 gro_flush_time = 10000L;
279 gro_min_byte_thresh = 7500L;
280 } else if ((diff.tv_sec > 0 ||
281 diff.tv_nsec > gro_flush_time) &&
282 ep->flush_byte_count >=
283 gro_min_byte_thresh) {
284 /* Above byte and time limt, therefore we can
285 * move/maintain our limits to be the max
286 * and flush
287 */
288 napi_gro_flush(napi, false);
289 getnstimeofday(&ep->flush_time);
290 ep->flush_byte_count = 0;
291 gro_flush_time = upper_flush_time;
292 gro_min_byte_thresh = upper_byte_limit;
293 }
294 /* else, below time limit and below
295 * byte thresh, so change nothing
296 */
297 } else if (diff.tv_sec > 0 ||
298 diff.tv_nsec >= gro_flush_time) {
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600299 napi_gro_flush(napi, false);
300 getnstimeofday(&ep->flush_time);
Conner Hufff90a2502016-10-31 11:22:15 -0700301 ep->flush_byte_count = 0;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600302 }
303 }
304}
305
306/* __rmnet_deliver_skb() - Deliver skb
307 *
308 * Determines where to deliver skb. Options are: consume by network stack,
309 * pass to bridge handler, or pass to virtual network device
310 *
311 * Return:
312 * - RX_HANDLER_CONSUMED if packet forwarded or dropped
313 * - RX_HANDLER_PASS if packet is to be consumed by network stack as-is
314 */
315static rx_handler_result_t __rmnet_deliver_skb
316 (struct sk_buff *skb, struct rmnet_logical_ep_conf_s *ep)
317{
318 struct napi_struct *napi = NULL;
319 gro_result_t gro_res;
Conner Hufff90a2502016-10-31 11:22:15 -0700320 unsigned int skb_size;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600321
322 trace___rmnet_deliver_skb(skb);
323 switch (ep->rmnet_mode) {
324 case RMNET_EPMODE_NONE:
325 return RX_HANDLER_PASS;
326
327 case RMNET_EPMODE_BRIDGE:
328 return rmnet_bridge_handler(skb, ep);
329
330 case RMNET_EPMODE_VND:
331 skb_reset_transport_header(skb);
332 skb_reset_network_header(skb);
333 switch (rmnet_vnd_rx_fixup(skb, skb->dev)) {
334 case RX_HANDLER_CONSUMED:
335 return RX_HANDLER_CONSUMED;
336
337 case RX_HANDLER_PASS:
338 skb->pkt_type = PACKET_HOST;
339 rmnet_reset_mac_header(skb);
340 if (rmnet_check_skb_can_gro(skb) &&
341 (skb->dev->features & NETIF_F_GRO)) {
342 napi = get_current_napi_context();
343 if (napi) {
Conner Hufff90a2502016-10-31 11:22:15 -0700344 skb_size = skb->len;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600345 gro_res = napi_gro_receive(napi, skb);
346 trace_rmnet_gro_downlink(gro_res);
Conner Hufff90a2502016-10-31 11:22:15 -0700347 rmnet_optional_gro_flush(napi, ep,
348 skb_size);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600349 } else {
350 WARN_ONCE(1, "current napi is NULL\n");
351 netif_receive_skb(skb);
352 }
353 } else {
354 netif_receive_skb(skb);
355 }
356 return RX_HANDLER_CONSUMED;
357 }
358 return RX_HANDLER_PASS;
359
360 default:
361 LOGD("Unknown ep mode %d", ep->rmnet_mode);
362 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_DELIVER_NO_EP);
363 return RX_HANDLER_CONSUMED;
364 }
365}
366
367/* rmnet_ingress_deliver_packet() - Ingress handler for raw IP and bridged
368 * MAP packets.
369 * @skb: Packet needing a destination.
370 * @config: Physical end point configuration that the packet arrived on.
371 *
372 * Return:
373 * - RX_HANDLER_CONSUMED if packet forwarded/dropped
374 * - RX_HANDLER_PASS if packet should be passed up the stack by caller
375 */
376static rx_handler_result_t rmnet_ingress_deliver_packet
377 (struct sk_buff *skb, struct rmnet_phys_ep_config *config)
378{
379 if (!config) {
380 LOGD("%s", "NULL physical EP provided");
381 kfree_skb(skb);
382 return RX_HANDLER_CONSUMED;
383 }
384
385 if (!(config->local_ep.refcount)) {
386 LOGD("Packet on %s has no local endpoint configuration",
387 skb->dev->name);
388 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_IPINGRESS_NO_EP);
389 return RX_HANDLER_CONSUMED;
390 }
391
392 skb->dev = config->local_ep.egress_dev;
393
394 return __rmnet_deliver_skb(skb, &config->local_ep);
395}
396
397/* MAP handler */
398
399/* _rmnet_map_ingress_handler() - Actual MAP ingress handler
400 * @skb: Packet being received
401 * @config: Physical endpoint configuration for the ingress device
402 *
403 * Most MAP ingress functions are processed here. Packets are processed
404 * individually; aggregated packets should use rmnet_map_ingress_handler()
405 *
406 * Return:
407 * - RX_HANDLER_CONSUMED if packet is dropped
408 * - result of __rmnet_deliver_skb() for all other cases
409 */
410static rx_handler_result_t _rmnet_map_ingress_handler
411 (struct sk_buff *skb, struct rmnet_phys_ep_config *config)
412{
413 struct rmnet_logical_ep_conf_s *ep;
414 u8 mux_id;
415 u16 len;
416 int ckresult;
417
418 if (RMNET_MAP_GET_CD_BIT(skb)) {
419 if (config->ingress_data_format
420 & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
421 return rmnet_map_command(skb, config);
422
423 LOGM("MAP command packet on %s; %s", skb->dev->name,
424 "Not configured for MAP commands");
425 rmnet_kfree_skb(skb,
426 RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPC);
427 return RX_HANDLER_CONSUMED;
428 }
429
430 mux_id = RMNET_MAP_GET_MUX_ID(skb);
431 len = RMNET_MAP_GET_LENGTH(skb)
432 - RMNET_MAP_GET_PAD(skb)
433 - config->tail_spacing;
434
435 if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) {
436 LOGD("Got packet on %s with bad mux id %d",
437 skb->dev->name, mux_id);
438 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX);
439 return RX_HANDLER_CONSUMED;
440 }
441
442 ep = &config->muxed_ep[mux_id];
443
444 if (!ep->refcount) {
445 LOGD("Packet on %s:%d; has no logical endpoint config",
446 skb->dev->name, mux_id);
447
448 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP);
449 return RX_HANDLER_CONSUMED;
450 }
451
452 if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
453 skb->dev = ep->egress_dev;
454
455 if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
456 (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) {
457 ckresult = rmnet_map_checksum_downlink_packet(skb);
458 trace_rmnet_map_checksum_downlink_packet(skb, ckresult);
459 rmnet_stats_dl_checksum(ckresult);
460 if (likely((ckresult == RMNET_MAP_CHECKSUM_OK) ||
461 (ckresult == RMNET_MAP_CHECKSUM_SKIPPED)))
462 skb->ip_summed |= CHECKSUM_UNNECESSARY;
463 else if (ckresult !=
464 RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION &&
465 ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT &&
466 ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET &&
467 ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) {
468 rmnet_kfree_skb
469 (skb, RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM);
470 return RX_HANDLER_CONSUMED;
471 }
472 }
473
474 /* Subtract MAP header */
475 skb_pull(skb, sizeof(struct rmnet_map_header_s));
476 skb_trim(skb, len);
477 __rmnet_data_set_skb_proto(skb);
478 return __rmnet_deliver_skb(skb, ep);
479}
480
481/* rmnet_map_ingress_handler() - MAP ingress handler
482 * @skb: Packet being received
483 * @config: Physical endpoint configuration for the ingress device
484 *
485 * Called if and only if MAP is configured in the ingress device's ingress data
486 * format. Deaggregation is done here, actual MAP processing is done in
487 * _rmnet_map_ingress_handler().
488 *
489 * Return:
490 * - RX_HANDLER_CONSUMED for aggregated packets
491 * - RX_HANDLER_CONSUMED for dropped packets
492 * - result of _rmnet_map_ingress_handler() for all other cases
493 */
494static rx_handler_result_t rmnet_map_ingress_handler
495 (struct sk_buff *skb, struct rmnet_phys_ep_config *config)
496{
497 struct sk_buff *skbn;
498 int rc, co = 0;
499
500 if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
501 trace_rmnet_start_deaggregation(skb);
502 while ((skbn = rmnet_map_deaggregate(skb, config)) != 0) {
503 _rmnet_map_ingress_handler(skbn, config);
504 co++;
505 }
506 trace_rmnet_end_deaggregation(skb, co);
507 LOGD("De-aggregated %d packets", co);
508 rmnet_stats_deagg_pkts(co);
509 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF);
510 rc = RX_HANDLER_CONSUMED;
511 } else {
512 rc = _rmnet_map_ingress_handler(skb, config);
513 }
514
515 return rc;
516}
517
518/* rmnet_map_egress_handler() - MAP egress handler
519 * @skb: Packet being sent
520 * @config: Physical endpoint configuration for the egress device
521 * @ep: logical endpoint configuration of the packet originator
522 * (e.g.. RmNet virtual network device)
523 * @orig_dev: The originator vnd device
524 *
525 * Called if and only if MAP is configured in the egress device's egress data
526 * format. Will expand skb if there is insufficient headroom for MAP protocol.
527 * Note: headroomexpansion will incur a performance penalty.
528 *
529 * Return:
530 * - 0 on success
531 * - 1 on failure
532 */
533static int rmnet_map_egress_handler(struct sk_buff *skb,
534 struct rmnet_phys_ep_config *config,
535 struct rmnet_logical_ep_conf_s *ep,
536 struct net_device *orig_dev)
537{
538 int required_headroom, additional_header_length, ckresult;
539 struct rmnet_map_header_s *map_header;
Subash Abhinov Kasiviswanathanc64a7802017-09-26 13:29:22 -0600540 int non_linear_skb;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600541
542 additional_header_length = 0;
543
544 required_headroom = sizeof(struct rmnet_map_header_s);
545 if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
546 (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) {
547 required_headroom +=
548 sizeof(struct rmnet_map_ul_checksum_header_s);
549 additional_header_length +=
550 sizeof(struct rmnet_map_ul_checksum_header_s);
551 }
552
553 LOGD("headroom of %d bytes", required_headroom);
554
555 if (skb_headroom(skb) < required_headroom) {
Ashwanth Golic85bfbe2017-02-24 11:03:03 -0700556 LOGE("Not enough headroom for %d bytes", required_headroom);
557 kfree_skb(skb);
558 return 1;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600559 }
560
561 if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
562 (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) {
563 ckresult = rmnet_map_checksum_uplink_packet
564 (skb, orig_dev, config->egress_data_format);
565 trace_rmnet_map_checksum_uplink_packet(orig_dev, ckresult);
566 rmnet_stats_ul_checksum(ckresult);
567 }
568
Subash Abhinov Kasiviswanathanc64a7802017-09-26 13:29:22 -0600569 non_linear_skb = (orig_dev->features & NETIF_F_GSO) &&
570 skb_is_nonlinear(skb);
571
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600572 if ((!(config->egress_data_format &
Subash Abhinov Kasiviswanathanc64a7802017-09-26 13:29:22 -0600573 RMNET_EGRESS_FORMAT_AGGREGATION)) || non_linear_skb)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600574 map_header = rmnet_map_add_map_header
575 (skb, additional_header_length, RMNET_MAP_NO_PAD_BYTES);
576 else
577 map_header = rmnet_map_add_map_header
578 (skb, additional_header_length, RMNET_MAP_ADD_PAD_BYTES);
579
580 if (!map_header) {
581 LOGD("%s", "Failed to add MAP header to egress packet");
Subash Abhinov Kasiviswanathan991811c2016-11-07 16:44:44 -0700582 kfree_skb(skb);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600583 return 1;
584 }
585
586 if (config->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
587 if (ep->mux_id == 0xff)
588 map_header->mux_id = 0;
589 else
590 map_header->mux_id = ep->mux_id;
591 }
592
593 skb->protocol = htons(ETH_P_MAP);
594
Subash Abhinov Kasiviswanathanc64a7802017-09-26 13:29:22 -0600595 if ((config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) &&
596 !non_linear_skb) {
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600597 rmnet_map_aggregate(skb, config);
598 return RMNET_MAP_CONSUMED;
599 }
600
601 return RMNET_MAP_SUCCESS;
602}
603
604/* Ingress / Egress Entry Points */
605
606/* rmnet_ingress_handler() - Ingress handler entry point
607 * @skb: Packet being received
608 *
609 * Processes packet as per ingress data format for receiving device. Logical
610 * endpoint is determined from packet inspection. Packet is then sent to the
611 * egress device listed in the logical endpoint configuration.
612 *
613 * Return:
614 * - RX_HANDLER_PASS if packet is not processed by handler (caller must
615 * deal with the packet)
616 * - RX_HANDLER_CONSUMED if packet is forwarded or processed by MAP
617 */
618rx_handler_result_t rmnet_ingress_handler(struct sk_buff *skb)
619{
620 struct rmnet_phys_ep_config *config;
621 struct net_device *dev;
622 int rc;
623
624 if (!skb)
625 return RX_HANDLER_CONSUMED;
626
627 dev = skb->dev;
628 trace_rmnet_ingress_handler(skb);
629 rmnet_print_packet(skb, dev->name, 'r');
630
631 config = _rmnet_get_phys_ep_config(skb->dev);
632
633 if (!config) {
634 LOGD("%s is not associated with rmnet_data", skb->dev->name);
635 kfree_skb(skb);
636 return RX_HANDLER_CONSUMED;
637 }
638
639 /* Sometimes devices operate in ethernet mode even thouth there is no
640 * ethernet header. This causes the skb->protocol to contain a bogus
641 * value and the skb->data pointer to be off by 14 bytes. Fix it if
642 * configured to do so
643 */
644 if (config->ingress_data_format & RMNET_INGRESS_FIX_ETHERNET) {
645 skb_push(skb, RMNET_ETHERNET_HEADER_LENGTH);
646 __rmnet_data_set_skb_proto(skb);
647 }
648
649 if (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) {
650 rc = rmnet_map_ingress_handler(skb, config);
651 } else {
652 switch (ntohs(skb->protocol)) {
653 case ETH_P_MAP:
654 if (config->local_ep.rmnet_mode ==
655 RMNET_EPMODE_BRIDGE) {
656 rc = rmnet_ingress_deliver_packet(skb, config);
657 } else {
658 LOGD("MAP packet on %s; MAP not set",
659 dev->name);
660 rmnet_kfree_skb
661 (skb,
662 RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPD);
663 rc = RX_HANDLER_CONSUMED;
664 }
665 break;
666
667 case ETH_P_ARP:
668 case ETH_P_IP:
669 case ETH_P_IPV6:
670 rc = rmnet_ingress_deliver_packet(skb, config);
671 break;
672
673 default:
674 LOGD("Unknown skb->proto 0x%04X\n",
675 ntohs(skb->protocol) & 0xFFFF);
676 rc = RX_HANDLER_PASS;
677 }
678 }
679
680 return rc;
681}
682
683/* rmnet_rx_handler() - Rx handler callback registered with kernel
684 * @pskb: Packet to be processed by rx handler
685 *
686 * Standard kernel-expected footprint for rx handlers. Calls
687 * rmnet_ingress_handler with correctly formatted arguments
688 *
689 * Return:
690 * - Whatever rmnet_ingress_handler() returns
691 */
692rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
693{
694 return rmnet_ingress_handler(*pskb);
695}
696
697/* rmnet_egress_handler() - Egress handler entry point
698 * @skb: packet to transmit
699 * @ep: logical endpoint configuration of the packet originator
700 * (e.g.. RmNet virtual network device)
701 *
702 * Modifies packet as per logical endpoint configuration and egress data format
703 * for egress device configured in logical endpoint. Packet is then transmitted
704 * on the egress device.
705 */
706void rmnet_egress_handler(struct sk_buff *skb,
707 struct rmnet_logical_ep_conf_s *ep)
708{
709 struct rmnet_phys_ep_config *config;
710 struct net_device *orig_dev;
711 int rc;
712
713 orig_dev = skb->dev;
714 skb->dev = ep->egress_dev;
715
716 config = _rmnet_get_phys_ep_config(skb->dev);
717
718 if (!config) {
719 LOGD("%s is not associated with rmnet_data", skb->dev->name);
720 kfree_skb(skb);
721 return;
722 }
723
724 LOGD("Packet going out on %s with egress format 0x%08X",
725 skb->dev->name, config->egress_data_format);
726
727 if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
728 switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) {
729 case RMNET_MAP_CONSUMED:
730 LOGD("%s", "MAP process consumed packet");
731 return;
732
733 case RMNET_MAP_SUCCESS:
734 break;
735
736 default:
737 LOGD("MAP egress failed on packet on %s",
738 skb->dev->name);
739 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_EGR_MAPFAIL);
740 return;
741 }
742 }
743
744 if (ep->rmnet_mode == RMNET_EPMODE_VND)
745 rmnet_vnd_tx_fixup(skb, orig_dev);
746
747 rmnet_print_packet(skb, skb->dev->name, 't');
748 trace_rmnet_egress_handler(skb);
749 rc = dev_queue_xmit(skb);
750 if (rc != 0) {
751 LOGD("Failed to queue packet for transmission on [%s]",
752 skb->dev->name);
753 }
754 rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_EGRESS);
755}