blob: 98b251184d7f6112ab5def1fdea9a896440020f3 [file] [log] [blame]
Subash Abhinov Kasiviswanathan4b5151f2018-01-25 23:14:39 -07001/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * RMNET Data ingress/egress handler
13 */
14
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/module.h>
18#include <linux/rmnet_data.h>
19#include <linux/net_map.h>
20#include <linux/netdev_features.h>
21#include <linux/ip.h>
22#include <linux/ipv6.h>
23#include <net/rmnet_config.h>
24#include "rmnet_data_private.h"
25#include "rmnet_data_config.h"
26#include "rmnet_data_vnd.h"
27#include "rmnet_map.h"
28#include "rmnet_data_stats.h"
29#include "rmnet_data_trace.h"
30#include "rmnet_data_handlers.h"
31
32RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_HANDLER);
33
34#ifdef CONFIG_RMNET_DATA_DEBUG_PKT
35unsigned int dump_pkt_rx;
36module_param(dump_pkt_rx, uint, 0644);
37MODULE_PARM_DESC(dump_pkt_rx, "Dump packets entering ingress handler");
38
39unsigned int dump_pkt_tx;
40module_param(dump_pkt_tx, uint, 0644);
41MODULE_PARM_DESC(dump_pkt_tx, "Dump packets exiting egress handler");
42#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
43
Conner Huffa9112ff2017-10-10 14:19:50 -070044static bool gro_flush_logic_on __read_mostly = 1;
45module_param(gro_flush_logic_on, bool, 0644);
46MODULE_PARM_DESC(gro_flush_logic_on, "If off let GRO determine flushing");
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -060047
Conner Huffa9112ff2017-10-10 14:19:50 -070048static bool dynamic_gro_on __read_mostly = 1;
49module_param(dynamic_gro_on, bool, 0644);
Conner Hufff90a2502016-10-31 11:22:15 -070050MODULE_PARM_DESC(dynamic_gro_on, "Toggle to turn on dynamic gro logic");
51
Conner Huffa9112ff2017-10-10 14:19:50 -070052/* Time in nano seconds. This number must be less that a second. */
53static long lower_flush_time __read_mostly = 10000L;
54module_param(lower_flush_time, long, 0644);
55MODULE_PARM_DESC(lower_flush_time, "Min time value for flushing GRO");
56
57static unsigned int lower_byte_limit __read_mostly = 7500;
58module_param(lower_byte_limit, uint, 0644);
59MODULE_PARM_DESC(lower_byte_limit, "Min byte count for flushing GRO");
60
Conner Hufff90a2502016-10-31 11:22:15 -070061unsigned int upper_flush_time __read_mostly = 15000;
62module_param(upper_flush_time, uint, 0644);
Conner Huffa9112ff2017-10-10 14:19:50 -070063MODULE_PARM_DESC(upper_flush_time, "Max time value for flushing GRO");
Conner Hufff90a2502016-10-31 11:22:15 -070064
65unsigned int upper_byte_limit __read_mostly = 10500;
66module_param(upper_byte_limit, uint, 0644);
Conner Huffa9112ff2017-10-10 14:19:50 -070067MODULE_PARM_DESC(upper_byte_limit, "Max byte count for flushing GRO");
Conner Hufff90a2502016-10-31 11:22:15 -070068
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -060069#define RMNET_DATA_IP_VERSION_4 0x40
70#define RMNET_DATA_IP_VERSION_6 0x60
71
72#define RMNET_DATA_GRO_RCV_FAIL 0
73#define RMNET_DATA_GRO_RCV_PASS 1
74
75/* Helper Functions */
76
77/* __rmnet_data_set_skb_proto() - Set skb->protocol field
78 * @skb: packet being modified
79 *
80 * Peek at the first byte of the packet and set the protocol. There is not
81 * good way to determine if a packet has a MAP header. As of writing this,
82 * the reserved bit in the MAP frame will prevent it from overlapping with
83 * IPv4/IPv6 frames. This could change in the future!
84 */
85static inline void __rmnet_data_set_skb_proto(struct sk_buff *skb)
86{
87 switch (skb->data[0] & 0xF0) {
88 case RMNET_DATA_IP_VERSION_4:
89 skb->protocol = htons(ETH_P_IP);
90 break;
91 case RMNET_DATA_IP_VERSION_6:
92 skb->protocol = htons(ETH_P_IPV6);
93 break;
94 default:
95 skb->protocol = htons(ETH_P_MAP);
96 break;
97 }
98}
99
100#ifdef CONFIG_RMNET_DATA_DEBUG_PKT
101/* rmnet_print_packet() - Print packet / diagnostics
102 * @skb: Packet to print
103 * @printlen: Number of bytes to print
104 * @dev: Name of interface
105 * @dir: Character representing direction (e.g.. 'r' for receive)
106 *
107 * This function prints out raw bytes in an SKB. Use of this will have major
108 * performance impacts and may even trigger watchdog resets if too much is being
109 * printed. Hence, this should always be compiled out unless absolutely needed.
110 */
111void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
112{
113 char buffer[200];
114 unsigned int len, printlen;
115 int i, buffloc = 0;
116
117 switch (dir) {
118 case 'r':
119 printlen = dump_pkt_rx;
120 break;
121
122 case 't':
123 printlen = dump_pkt_tx;
124 break;
125
126 default:
127 printlen = 0;
128 break;
129 }
130
131 if (!printlen)
132 return;
133
134 pr_err("[%s][%c] - PKT skb->len=%d skb->head=%pK skb->data=%pK\n",
135 dev, dir, skb->len, (void *)skb->head, (void *)skb->data);
136 pr_err("[%s][%c] - PKT skb->tail=%pK skb->end=%pK\n",
137 dev, dir, skb_tail_pointer(skb), skb_end_pointer(skb));
138
139 if (skb->len > 0)
140 len = skb->len;
141 else
142 len = ((unsigned int)(uintptr_t)skb->end) -
143 ((unsigned int)(uintptr_t)skb->data);
144
145 pr_err("[%s][%c] - PKT len: %d, printing first %d bytes\n",
146 dev, dir, len, printlen);
147
148 memset(buffer, 0, sizeof(buffer));
149 for (i = 0; (i < printlen) && (i < len); i++) {
150 if ((i % 16) == 0) {
151 pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
152 memset(buffer, 0, sizeof(buffer));
153 buffloc = 0;
154 buffloc += snprintf(&buffer[buffloc],
155 sizeof(buffer) - buffloc, "%04X:",
156 i);
157 }
158
159 buffloc += snprintf(&buffer[buffloc], sizeof(buffer) - buffloc,
160 " %02x", skb->data[i]);
161 }
162 pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
163}
164#else
165void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
166{
167}
168#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
169
170/* Generic handler */
171
172/* rmnet_bridge_handler() - Bridge related functionality
173 *
174 * Return:
175 * - RX_HANDLER_CONSUMED in all cases
176 */
177static rx_handler_result_t rmnet_bridge_handler
178 (struct sk_buff *skb, struct rmnet_logical_ep_conf_s *ep)
179{
180 if (!ep->egress_dev) {
181 LOGD("Missing egress device for packet arriving on %s",
182 skb->dev->name);
183 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_BRDG_NO_EGRESS);
184 } else {
185 rmnet_egress_handler(skb, ep);
186 }
187
188 return RX_HANDLER_CONSUMED;
189}
190
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600191/* RX/TX Fixup */
192
193/* rmnet_vnd_rx_fixup() - Virtual Network Device receive fixup hook
194 * @skb: Socket buffer ("packet") to modify
195 * @dev: Virtual network device
196 *
197 * Additional VND specific packet processing for ingress packets
198 *
199 * Return: void
200 */
201static void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600202{
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600203 dev->stats.rx_packets++;
204 dev->stats.rx_bytes += skb->len;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600205}
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600206
207/* rmnet_vnd_tx_fixup() - Virtual Network Device transmic fixup hook
208 * @skb: Socket buffer ("packet") to modify
209 * @dev: Virtual network device
210 *
211 * Additional VND specific packet processing for egress packets
212 *
213 * Return: void
214 */
215static void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600216{
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600217 dev->stats.tx_packets++;
218 dev->stats.tx_bytes += skb->len;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600219}
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600220
221/* rmnet_check_skb_can_gro() - Check is skb can be passed through GRO handler
222 *
223 * Determines whether to pass the skb to the GRO handler napi_gro_receive() or
224 * handle normally by passing to netif_receive_skb().
225 *
226 * Warning:
227 * This assumes that only TCP packets can be coalesced by the GRO handler which
228 * is not true in general. We lose the ability to use GRO for cases like UDP
229 * encapsulation protocols.
230 *
231 * Return:
232 * - RMNET_DATA_GRO_RCV_FAIL if packet is sent to netif_receive_skb()
233 * - RMNET_DATA_GRO_RCV_PASS if packet is sent to napi_gro_receive()
234 */
235static int rmnet_check_skb_can_gro(struct sk_buff *skb)
236{
237 switch (skb->data[0] & 0xF0) {
238 case RMNET_DATA_IP_VERSION_4:
239 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
240 return RMNET_DATA_GRO_RCV_PASS;
241 break;
242 case RMNET_DATA_IP_VERSION_6:
243 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
244 return RMNET_DATA_GRO_RCV_PASS;
245 /* Fall through */
246 }
247
248 return RMNET_DATA_GRO_RCV_FAIL;
249}
250
251/* rmnet_optional_gro_flush() - Check if GRO handler needs to flush now
252 *
253 * Determines whether GRO handler needs to flush packets which it has
254 * coalesced so far.
255 *
256 * Tuning this parameter will trade TCP slow start performance for GRO coalesce
257 * ratio.
258 */
259static void rmnet_optional_gro_flush(struct napi_struct *napi,
Conner Hufff90a2502016-10-31 11:22:15 -0700260 struct rmnet_logical_ep_conf_s *ep,
261 unsigned int skb_size)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600262{
263 struct timespec curr_time, diff;
264
Conner Huffa9112ff2017-10-10 14:19:50 -0700265 if (!gro_flush_logic_on)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600266 return;
267
Conner Huffa9112ff2017-10-10 14:19:50 -0700268 if (unlikely(ep->last_flush_time.tv_sec == 0)) {
269 getnstimeofday(&ep->last_flush_time);
Conner Hufff90a2502016-10-31 11:22:15 -0700270 ep->flush_byte_count = 0;
Conner Huffa9112ff2017-10-10 14:19:50 -0700271 ep->curr_time_limit = lower_flush_time;
272 ep->curr_byte_threshold = lower_byte_limit;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600273 } else {
274 getnstimeofday(&(curr_time));
Conner Huffa9112ff2017-10-10 14:19:50 -0700275 diff = timespec_sub(curr_time, ep->last_flush_time);
Conner Hufff90a2502016-10-31 11:22:15 -0700276 ep->flush_byte_count += skb_size;
277
278 if (dynamic_gro_on) {
279 if ((!(diff.tv_sec > 0) || diff.tv_nsec <=
Conner Huffa9112ff2017-10-10 14:19:50 -0700280 ep->curr_time_limit) &&
Conner Hufff90a2502016-10-31 11:22:15 -0700281 ep->flush_byte_count >=
Conner Huffa9112ff2017-10-10 14:19:50 -0700282 ep->curr_byte_threshold) {
Conner Hufff90a2502016-10-31 11:22:15 -0700283 /* Processed many bytes in a small time window.
284 * No longer need to flush so often and we can
285 * increase our byte limit
286 */
Conner Huffa9112ff2017-10-10 14:19:50 -0700287 ep->curr_time_limit = upper_flush_time;
288 ep->curr_byte_threshold = upper_byte_limit;
Conner Hufff90a2502016-10-31 11:22:15 -0700289 } else if ((diff.tv_sec > 0 ||
Conner Huffa9112ff2017-10-10 14:19:50 -0700290 diff.tv_nsec > ep->curr_time_limit) &&
Conner Hufff90a2502016-10-31 11:22:15 -0700291 ep->flush_byte_count <
Conner Huffa9112ff2017-10-10 14:19:50 -0700292 ep->curr_byte_threshold) {
Conner Hufff90a2502016-10-31 11:22:15 -0700293 /* We have not hit our time limit and we are not
294 * receive many bytes. Demote ourselves to the
295 * lowest limits and flush
296 */
297 napi_gro_flush(napi, false);
Conner Huffa9112ff2017-10-10 14:19:50 -0700298 ep->last_flush_time = curr_time;
Conner Hufff90a2502016-10-31 11:22:15 -0700299 ep->flush_byte_count = 0;
Conner Huffa9112ff2017-10-10 14:19:50 -0700300 ep->curr_time_limit = lower_flush_time;
301 ep->curr_byte_threshold = lower_byte_limit;
Conner Hufff90a2502016-10-31 11:22:15 -0700302 } else if ((diff.tv_sec > 0 ||
Conner Huffa9112ff2017-10-10 14:19:50 -0700303 diff.tv_nsec > ep->curr_time_limit) &&
Conner Hufff90a2502016-10-31 11:22:15 -0700304 ep->flush_byte_count >=
Conner Huffa9112ff2017-10-10 14:19:50 -0700305 ep->curr_byte_threshold) {
Conner Hufff90a2502016-10-31 11:22:15 -0700306 /* Above byte and time limt, therefore we can
307 * move/maintain our limits to be the max
308 * and flush
309 */
310 napi_gro_flush(napi, false);
Conner Huffa9112ff2017-10-10 14:19:50 -0700311 ep->last_flush_time = curr_time;
Conner Hufff90a2502016-10-31 11:22:15 -0700312 ep->flush_byte_count = 0;
Conner Huffa9112ff2017-10-10 14:19:50 -0700313 ep->curr_time_limit = upper_flush_time;
314 ep->curr_byte_threshold = upper_byte_limit;
Conner Hufff90a2502016-10-31 11:22:15 -0700315 }
316 /* else, below time limit and below
317 * byte thresh, so change nothing
318 */
319 } else if (diff.tv_sec > 0 ||
Conner Huffa9112ff2017-10-10 14:19:50 -0700320 diff.tv_nsec >= lower_flush_time) {
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600321 napi_gro_flush(napi, false);
Conner Huffa9112ff2017-10-10 14:19:50 -0700322 ep->last_flush_time = curr_time;
Conner Hufff90a2502016-10-31 11:22:15 -0700323 ep->flush_byte_count = 0;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600324 }
325 }
326}
327
328/* __rmnet_deliver_skb() - Deliver skb
329 *
330 * Determines where to deliver skb. Options are: consume by network stack,
331 * pass to bridge handler, or pass to virtual network device
332 *
333 * Return:
334 * - RX_HANDLER_CONSUMED if packet forwarded or dropped
335 * - RX_HANDLER_PASS if packet is to be consumed by network stack as-is
336 */
337static rx_handler_result_t __rmnet_deliver_skb
338 (struct sk_buff *skb, struct rmnet_logical_ep_conf_s *ep)
339{
340 struct napi_struct *napi = NULL;
341 gro_result_t gro_res;
Conner Hufff90a2502016-10-31 11:22:15 -0700342 unsigned int skb_size;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600343
344 trace___rmnet_deliver_skb(skb);
345 switch (ep->rmnet_mode) {
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600346 case RMNET_EPMODE_VND:
347 skb_reset_transport_header(skb);
348 skb_reset_network_header(skb);
349 rmnet_vnd_rx_fixup(skb, skb->dev);
350
351 skb->pkt_type = PACKET_HOST;
352 skb_set_mac_header(skb, 0);
353
354 if (rmnet_check_skb_can_gro(skb) &&
355 (skb->dev->features & NETIF_F_GRO)) {
356 napi = get_current_napi_context();
357
358 skb_size = skb->len;
Tejaswi Tanikella2a640942018-02-27 22:12:37 +0530359 skb_get_hash(skb);
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600360 gro_res = napi_gro_receive(napi, skb);
361 trace_rmnet_gro_downlink(gro_res);
362 rmnet_optional_gro_flush(napi, ep, skb_size);
363 } else{
364 netif_receive_skb(skb);
365 }
366 return RX_HANDLER_CONSUMED;
367
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600368 case RMNET_EPMODE_NONE:
369 return RX_HANDLER_PASS;
370
371 case RMNET_EPMODE_BRIDGE:
372 return rmnet_bridge_handler(skb, ep);
373
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600374 default:
375 LOGD("Unknown ep mode %d", ep->rmnet_mode);
376 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_DELIVER_NO_EP);
377 return RX_HANDLER_CONSUMED;
378 }
379}
380
381/* rmnet_ingress_deliver_packet() - Ingress handler for raw IP and bridged
382 * MAP packets.
383 * @skb: Packet needing a destination.
384 * @config: Physical end point configuration that the packet arrived on.
385 *
386 * Return:
387 * - RX_HANDLER_CONSUMED if packet forwarded/dropped
388 * - RX_HANDLER_PASS if packet should be passed up the stack by caller
389 */
390static rx_handler_result_t rmnet_ingress_deliver_packet
391 (struct sk_buff *skb, struct rmnet_phys_ep_config *config)
392{
393 if (!config) {
394 LOGD("%s", "NULL physical EP provided");
395 kfree_skb(skb);
396 return RX_HANDLER_CONSUMED;
397 }
398
399 if (!(config->local_ep.refcount)) {
400 LOGD("Packet on %s has no local endpoint configuration",
401 skb->dev->name);
402 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_IPINGRESS_NO_EP);
403 return RX_HANDLER_CONSUMED;
404 }
405
406 skb->dev = config->local_ep.egress_dev;
407
408 return __rmnet_deliver_skb(skb, &config->local_ep);
409}
410
411/* MAP handler */
412
413/* _rmnet_map_ingress_handler() - Actual MAP ingress handler
414 * @skb: Packet being received
415 * @config: Physical endpoint configuration for the ingress device
416 *
417 * Most MAP ingress functions are processed here. Packets are processed
418 * individually; aggregated packets should use rmnet_map_ingress_handler()
419 *
420 * Return:
421 * - RX_HANDLER_CONSUMED if packet is dropped
422 * - result of __rmnet_deliver_skb() for all other cases
423 */
424static rx_handler_result_t _rmnet_map_ingress_handler
425 (struct sk_buff *skb, struct rmnet_phys_ep_config *config)
426{
427 struct rmnet_logical_ep_conf_s *ep;
428 u8 mux_id;
429 u16 len;
430 int ckresult;
431
432 if (RMNET_MAP_GET_CD_BIT(skb)) {
433 if (config->ingress_data_format
434 & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
435 return rmnet_map_command(skb, config);
436
437 LOGM("MAP command packet on %s; %s", skb->dev->name,
438 "Not configured for MAP commands");
439 rmnet_kfree_skb(skb,
440 RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPC);
441 return RX_HANDLER_CONSUMED;
442 }
443
444 mux_id = RMNET_MAP_GET_MUX_ID(skb);
445 len = RMNET_MAP_GET_LENGTH(skb)
446 - RMNET_MAP_GET_PAD(skb)
447 - config->tail_spacing;
448
449 if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) {
450 LOGD("Got packet on %s with bad mux id %d",
451 skb->dev->name, mux_id);
452 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX);
453 return RX_HANDLER_CONSUMED;
454 }
455
456 ep = &config->muxed_ep[mux_id];
Subash Abhinov Kasiviswanathan4b5151f2018-01-25 23:14:39 -0700457 if (!ep->refcount) {
458 LOGD("Packet on %s:%d; has no logical endpoint config",
459 skb->dev->name, mux_id);
460
461 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP);
462 return RX_HANDLER_CONSUMED;
463 }
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600464
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600465 skb->dev = ep->egress_dev;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600466
467 if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
468 (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) {
469 ckresult = rmnet_map_checksum_downlink_packet(skb);
470 trace_rmnet_map_checksum_downlink_packet(skb, ckresult);
471 rmnet_stats_dl_checksum(ckresult);
472 if (likely((ckresult == RMNET_MAP_CHECKSUM_OK) ||
473 (ckresult == RMNET_MAP_CHECKSUM_SKIPPED)))
474 skb->ip_summed |= CHECKSUM_UNNECESSARY;
475 else if (ckresult !=
476 RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION &&
Ashwanth Goli3965fbf2017-06-27 10:29:06 +0530477 ckresult != RMNET_MAP_CHECKSUM_VALIDATION_FAILED &&
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600478 ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT &&
479 ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET &&
480 ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) {
481 rmnet_kfree_skb
482 (skb, RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM);
483 return RX_HANDLER_CONSUMED;
484 }
485 }
486
487 /* Subtract MAP header */
488 skb_pull(skb, sizeof(struct rmnet_map_header_s));
489 skb_trim(skb, len);
490 __rmnet_data_set_skb_proto(skb);
491 return __rmnet_deliver_skb(skb, ep);
492}
493
494/* rmnet_map_ingress_handler() - MAP ingress handler
495 * @skb: Packet being received
496 * @config: Physical endpoint configuration for the ingress device
497 *
498 * Called if and only if MAP is configured in the ingress device's ingress data
499 * format. Deaggregation is done here, actual MAP processing is done in
500 * _rmnet_map_ingress_handler().
501 *
502 * Return:
503 * - RX_HANDLER_CONSUMED for aggregated packets
504 * - RX_HANDLER_CONSUMED for dropped packets
505 * - result of _rmnet_map_ingress_handler() for all other cases
506 */
507static rx_handler_result_t rmnet_map_ingress_handler
508 (struct sk_buff *skb, struct rmnet_phys_ep_config *config)
509{
510 struct sk_buff *skbn;
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600511 int rc;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600512
513 if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
514 trace_rmnet_start_deaggregation(skb);
515 while ((skbn = rmnet_map_deaggregate(skb, config)) != 0) {
516 _rmnet_map_ingress_handler(skbn, config);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600517 }
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600518 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF);
519 rc = RX_HANDLER_CONSUMED;
520 } else {
521 rc = _rmnet_map_ingress_handler(skb, config);
522 }
523
524 return rc;
525}
526
527/* rmnet_map_egress_handler() - MAP egress handler
528 * @skb: Packet being sent
529 * @config: Physical endpoint configuration for the egress device
530 * @ep: logical endpoint configuration of the packet originator
531 * (e.g.. RmNet virtual network device)
532 * @orig_dev: The originator vnd device
533 *
534 * Called if and only if MAP is configured in the egress device's egress data
535 * format. Will expand skb if there is insufficient headroom for MAP protocol.
536 * Note: headroomexpansion will incur a performance penalty.
537 *
538 * Return:
539 * - 0 on success
540 * - 1 on failure
541 */
542static int rmnet_map_egress_handler(struct sk_buff *skb,
543 struct rmnet_phys_ep_config *config,
544 struct rmnet_logical_ep_conf_s *ep,
545 struct net_device *orig_dev)
546{
547 int required_headroom, additional_header_length, ckresult;
548 struct rmnet_map_header_s *map_header;
Subash Abhinov Kasiviswanathanc64a7802017-09-26 13:29:22 -0600549 int non_linear_skb;
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600550 int csum_required = (config->egress_data_format &
551 RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
552 (config->egress_data_format &
553 RMNET_EGRESS_FORMAT_MAP_CKSUMV4);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600554
555 additional_header_length = 0;
556
557 required_headroom = sizeof(struct rmnet_map_header_s);
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600558 if (csum_required) {
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600559 required_headroom +=
560 sizeof(struct rmnet_map_ul_checksum_header_s);
561 additional_header_length +=
562 sizeof(struct rmnet_map_ul_checksum_header_s);
563 }
564
565 LOGD("headroom of %d bytes", required_headroom);
566
567 if (skb_headroom(skb) < required_headroom) {
Ashwanth Golic85bfbe2017-02-24 11:03:03 -0700568 LOGE("Not enough headroom for %d bytes", required_headroom);
569 kfree_skb(skb);
570 return 1;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600571 }
572
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600573 if (csum_required) {
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600574 ckresult = rmnet_map_checksum_uplink_packet
575 (skb, orig_dev, config->egress_data_format);
576 trace_rmnet_map_checksum_uplink_packet(orig_dev, ckresult);
577 rmnet_stats_ul_checksum(ckresult);
578 }
579
Subash Abhinov Kasiviswanathanc64a7802017-09-26 13:29:22 -0600580 non_linear_skb = (orig_dev->features & NETIF_F_GSO) &&
581 skb_is_nonlinear(skb);
582
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600583 if ((!(config->egress_data_format &
Subash Abhinov Kasiviswanathan54448f22017-11-09 18:33:34 -0700584 RMNET_EGRESS_FORMAT_AGGREGATION)) || csum_required ||
585 non_linear_skb)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600586 map_header = rmnet_map_add_map_header
587 (skb, additional_header_length, RMNET_MAP_NO_PAD_BYTES);
588 else
589 map_header = rmnet_map_add_map_header
590 (skb, additional_header_length, RMNET_MAP_ADD_PAD_BYTES);
591
592 if (!map_header) {
593 LOGD("%s", "Failed to add MAP header to egress packet");
Subash Abhinov Kasiviswanathan991811c2016-11-07 16:44:44 -0700594 kfree_skb(skb);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600595 return 1;
596 }
597
598 if (config->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
599 if (ep->mux_id == 0xff)
600 map_header->mux_id = 0;
601 else
602 map_header->mux_id = ep->mux_id;
603 }
604
605 skb->protocol = htons(ETH_P_MAP);
606
Subash Abhinov Kasiviswanathan54448f22017-11-09 18:33:34 -0700607 if (config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700608 if (rmnet_ul_aggregation_skip(skb, required_headroom))
609 return RMNET_MAP_SUCCESS;
610
Subash Abhinov Kasiviswanathan54448f22017-11-09 18:33:34 -0700611 if (non_linear_skb)
612 if (unlikely(__skb_linearize(skb)))
613 return RMNET_MAP_SUCCESS;
614
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600615 rmnet_map_aggregate(skb, config);
616 return RMNET_MAP_CONSUMED;
617 }
618
619 return RMNET_MAP_SUCCESS;
620}
621
622/* Ingress / Egress Entry Points */
623
624/* rmnet_ingress_handler() - Ingress handler entry point
625 * @skb: Packet being received
626 *
627 * Processes packet as per ingress data format for receiving device. Logical
628 * endpoint is determined from packet inspection. Packet is then sent to the
629 * egress device listed in the logical endpoint configuration.
630 *
631 * Return:
632 * - RX_HANDLER_PASS if packet is not processed by handler (caller must
633 * deal with the packet)
634 * - RX_HANDLER_CONSUMED if packet is forwarded or processed by MAP
635 */
636rx_handler_result_t rmnet_ingress_handler(struct sk_buff *skb)
637{
638 struct rmnet_phys_ep_config *config;
639 struct net_device *dev;
640 int rc;
641
642 if (!skb)
643 return RX_HANDLER_CONSUMED;
644
645 dev = skb->dev;
646 trace_rmnet_ingress_handler(skb);
647 rmnet_print_packet(skb, dev->name, 'r');
648
649 config = _rmnet_get_phys_ep_config(skb->dev);
650
651 if (!config) {
652 LOGD("%s is not associated with rmnet_data", skb->dev->name);
653 kfree_skb(skb);
654 return RX_HANDLER_CONSUMED;
655 }
656
657 /* Sometimes devices operate in ethernet mode even thouth there is no
658 * ethernet header. This causes the skb->protocol to contain a bogus
659 * value and the skb->data pointer to be off by 14 bytes. Fix it if
660 * configured to do so
661 */
662 if (config->ingress_data_format & RMNET_INGRESS_FIX_ETHERNET) {
663 skb_push(skb, RMNET_ETHERNET_HEADER_LENGTH);
664 __rmnet_data_set_skb_proto(skb);
665 }
666
667 if (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) {
668 rc = rmnet_map_ingress_handler(skb, config);
669 } else {
670 switch (ntohs(skb->protocol)) {
671 case ETH_P_MAP:
672 if (config->local_ep.rmnet_mode ==
673 RMNET_EPMODE_BRIDGE) {
674 rc = rmnet_ingress_deliver_packet(skb, config);
675 } else {
676 LOGD("MAP packet on %s; MAP not set",
677 dev->name);
678 rmnet_kfree_skb
679 (skb,
680 RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPD);
681 rc = RX_HANDLER_CONSUMED;
682 }
683 break;
684
685 case ETH_P_ARP:
686 case ETH_P_IP:
687 case ETH_P_IPV6:
688 rc = rmnet_ingress_deliver_packet(skb, config);
689 break;
690
691 default:
692 LOGD("Unknown skb->proto 0x%04X\n",
693 ntohs(skb->protocol) & 0xFFFF);
694 rc = RX_HANDLER_PASS;
695 }
696 }
697
698 return rc;
699}
700
701/* rmnet_rx_handler() - Rx handler callback registered with kernel
702 * @pskb: Packet to be processed by rx handler
703 *
704 * Standard kernel-expected footprint for rx handlers. Calls
705 * rmnet_ingress_handler with correctly formatted arguments
706 *
707 * Return:
708 * - Whatever rmnet_ingress_handler() returns
709 */
710rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
711{
712 return rmnet_ingress_handler(*pskb);
713}
714
715/* rmnet_egress_handler() - Egress handler entry point
716 * @skb: packet to transmit
717 * @ep: logical endpoint configuration of the packet originator
718 * (e.g.. RmNet virtual network device)
719 *
720 * Modifies packet as per logical endpoint configuration and egress data format
721 * for egress device configured in logical endpoint. Packet is then transmitted
722 * on the egress device.
723 */
724void rmnet_egress_handler(struct sk_buff *skb,
725 struct rmnet_logical_ep_conf_s *ep)
726{
727 struct rmnet_phys_ep_config *config;
728 struct net_device *orig_dev;
729 int rc;
730
731 orig_dev = skb->dev;
732 skb->dev = ep->egress_dev;
733
734 config = _rmnet_get_phys_ep_config(skb->dev);
735
736 if (!config) {
737 LOGD("%s is not associated with rmnet_data", skb->dev->name);
738 kfree_skb(skb);
739 return;
740 }
741
742 LOGD("Packet going out on %s with egress format 0x%08X",
743 skb->dev->name, config->egress_data_format);
744
Subash Abhinov Kasiviswanathan08fed022017-11-17 17:54:49 -0700745 if (ep->rmnet_mode == RMNET_EPMODE_VND)
746 rmnet_vnd_tx_fixup(skb, orig_dev);
747
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600748 if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
749 switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) {
750 case RMNET_MAP_CONSUMED:
751 LOGD("%s", "MAP process consumed packet");
752 return;
753
754 case RMNET_MAP_SUCCESS:
755 break;
756
757 default:
758 LOGD("MAP egress failed on packet on %s",
759 skb->dev->name);
760 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_EGR_MAPFAIL);
761 return;
762 }
763 }
764
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600765 rmnet_print_packet(skb, skb->dev->name, 't');
766 trace_rmnet_egress_handler(skb);
767 rc = dev_queue_xmit(skb);
768 if (rc != 0) {
769 LOGD("Failed to queue packet for transmission on [%s]",
770 skb->dev->name);
771 }
772 rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_EGRESS);
773}