blob: a5b22c423934c6290b843b2b4dc02652d04cbeac [file] [log] [blame]
Subash Abhinov Kasiviswanathan991811c2016-11-07 16:44:44 -07001/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * RMNET Data ingress/egress handler
13 */
14
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/module.h>
18#include <linux/rmnet_data.h>
19#include <linux/net_map.h>
20#include <linux/netdev_features.h>
21#include <linux/ip.h>
22#include <linux/ipv6.h>
23#include <net/rmnet_config.h>
24#include "rmnet_data_private.h"
25#include "rmnet_data_config.h"
26#include "rmnet_data_vnd.h"
27#include "rmnet_map.h"
28#include "rmnet_data_stats.h"
29#include "rmnet_data_trace.h"
30#include "rmnet_data_handlers.h"
31
32RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_HANDLER);
33
34#ifdef CONFIG_RMNET_DATA_DEBUG_PKT
35unsigned int dump_pkt_rx;
36module_param(dump_pkt_rx, uint, 0644);
37MODULE_PARM_DESC(dump_pkt_rx, "Dump packets entering ingress handler");
38
39unsigned int dump_pkt_tx;
40module_param(dump_pkt_tx, uint, 0644);
41MODULE_PARM_DESC(dump_pkt_tx, "Dump packets exiting egress handler");
42#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
43
Conner Huffa9112ff2017-10-10 14:19:50 -070044static bool gro_flush_logic_on __read_mostly = 1;
45module_param(gro_flush_logic_on, bool, 0644);
46MODULE_PARM_DESC(gro_flush_logic_on, "If off let GRO determine flushing");
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -060047
Conner Huffa9112ff2017-10-10 14:19:50 -070048static bool dynamic_gro_on __read_mostly = 1;
49module_param(dynamic_gro_on, bool, 0644);
Conner Hufff90a2502016-10-31 11:22:15 -070050MODULE_PARM_DESC(dynamic_gro_on, "Toggle to turn on dynamic gro logic");
51
Conner Huffa9112ff2017-10-10 14:19:50 -070052/* Time in nano seconds. This number must be less that a second. */
53static long lower_flush_time __read_mostly = 10000L;
54module_param(lower_flush_time, long, 0644);
55MODULE_PARM_DESC(lower_flush_time, "Min time value for flushing GRO");
56
57static unsigned int lower_byte_limit __read_mostly = 7500;
58module_param(lower_byte_limit, uint, 0644);
59MODULE_PARM_DESC(lower_byte_limit, "Min byte count for flushing GRO");
60
Conner Hufff90a2502016-10-31 11:22:15 -070061unsigned int upper_flush_time __read_mostly = 15000;
62module_param(upper_flush_time, uint, 0644);
Conner Huffa9112ff2017-10-10 14:19:50 -070063MODULE_PARM_DESC(upper_flush_time, "Max time value for flushing GRO");
Conner Hufff90a2502016-10-31 11:22:15 -070064
65unsigned int upper_byte_limit __read_mostly = 10500;
66module_param(upper_byte_limit, uint, 0644);
Conner Huffa9112ff2017-10-10 14:19:50 -070067MODULE_PARM_DESC(upper_byte_limit, "Max byte count for flushing GRO");
Conner Hufff90a2502016-10-31 11:22:15 -070068
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -060069#define RMNET_DATA_IP_VERSION_4 0x40
70#define RMNET_DATA_IP_VERSION_6 0x60
71
72#define RMNET_DATA_GRO_RCV_FAIL 0
73#define RMNET_DATA_GRO_RCV_PASS 1
74
75/* Helper Functions */
76
77/* __rmnet_data_set_skb_proto() - Set skb->protocol field
78 * @skb: packet being modified
79 *
80 * Peek at the first byte of the packet and set the protocol. There is not
81 * good way to determine if a packet has a MAP header. As of writing this,
82 * the reserved bit in the MAP frame will prevent it from overlapping with
83 * IPv4/IPv6 frames. This could change in the future!
84 */
85static inline void __rmnet_data_set_skb_proto(struct sk_buff *skb)
86{
87 switch (skb->data[0] & 0xF0) {
88 case RMNET_DATA_IP_VERSION_4:
89 skb->protocol = htons(ETH_P_IP);
90 break;
91 case RMNET_DATA_IP_VERSION_6:
92 skb->protocol = htons(ETH_P_IPV6);
93 break;
94 default:
95 skb->protocol = htons(ETH_P_MAP);
96 break;
97 }
98}
99
100#ifdef CONFIG_RMNET_DATA_DEBUG_PKT
101/* rmnet_print_packet() - Print packet / diagnostics
102 * @skb: Packet to print
103 * @printlen: Number of bytes to print
104 * @dev: Name of interface
105 * @dir: Character representing direction (e.g.. 'r' for receive)
106 *
107 * This function prints out raw bytes in an SKB. Use of this will have major
108 * performance impacts and may even trigger watchdog resets if too much is being
109 * printed. Hence, this should always be compiled out unless absolutely needed.
110 */
111void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
112{
113 char buffer[200];
114 unsigned int len, printlen;
115 int i, buffloc = 0;
116
117 switch (dir) {
118 case 'r':
119 printlen = dump_pkt_rx;
120 break;
121
122 case 't':
123 printlen = dump_pkt_tx;
124 break;
125
126 default:
127 printlen = 0;
128 break;
129 }
130
131 if (!printlen)
132 return;
133
134 pr_err("[%s][%c] - PKT skb->len=%d skb->head=%pK skb->data=%pK\n",
135 dev, dir, skb->len, (void *)skb->head, (void *)skb->data);
136 pr_err("[%s][%c] - PKT skb->tail=%pK skb->end=%pK\n",
137 dev, dir, skb_tail_pointer(skb), skb_end_pointer(skb));
138
139 if (skb->len > 0)
140 len = skb->len;
141 else
142 len = ((unsigned int)(uintptr_t)skb->end) -
143 ((unsigned int)(uintptr_t)skb->data);
144
145 pr_err("[%s][%c] - PKT len: %d, printing first %d bytes\n",
146 dev, dir, len, printlen);
147
148 memset(buffer, 0, sizeof(buffer));
149 for (i = 0; (i < printlen) && (i < len); i++) {
150 if ((i % 16) == 0) {
151 pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
152 memset(buffer, 0, sizeof(buffer));
153 buffloc = 0;
154 buffloc += snprintf(&buffer[buffloc],
155 sizeof(buffer) - buffloc, "%04X:",
156 i);
157 }
158
159 buffloc += snprintf(&buffer[buffloc], sizeof(buffer) - buffloc,
160 " %02x", skb->data[i]);
161 }
162 pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
163}
164#else
165void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
166{
167}
168#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
169
170/* Generic handler */
171
172/* rmnet_bridge_handler() - Bridge related functionality
173 *
174 * Return:
175 * - RX_HANDLER_CONSUMED in all cases
176 */
177static rx_handler_result_t rmnet_bridge_handler
178 (struct sk_buff *skb, struct rmnet_logical_ep_conf_s *ep)
179{
180 if (!ep->egress_dev) {
181 LOGD("Missing egress device for packet arriving on %s",
182 skb->dev->name);
183 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_BRDG_NO_EGRESS);
184 } else {
185 rmnet_egress_handler(skb, ep);
186 }
187
188 return RX_HANDLER_CONSUMED;
189}
190
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600191/* RX/TX Fixup */
192
193/* rmnet_vnd_rx_fixup() - Virtual Network Device receive fixup hook
194 * @skb: Socket buffer ("packet") to modify
195 * @dev: Virtual network device
196 *
197 * Additional VND specific packet processing for ingress packets
198 *
199 * Return: void
200 */
201static void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600202{
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600203 dev->stats.rx_packets++;
204 dev->stats.rx_bytes += skb->len;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600205}
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600206
207/* rmnet_vnd_tx_fixup() - Virtual Network Device transmic fixup hook
208 * @skb: Socket buffer ("packet") to modify
209 * @dev: Virtual network device
210 *
211 * Additional VND specific packet processing for egress packets
212 *
213 * Return: void
214 */
215static void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600216{
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600217 dev->stats.tx_packets++;
218 dev->stats.tx_bytes += skb->len;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600219}
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600220
221/* rmnet_check_skb_can_gro() - Check is skb can be passed through GRO handler
222 *
223 * Determines whether to pass the skb to the GRO handler napi_gro_receive() or
224 * handle normally by passing to netif_receive_skb().
225 *
226 * Warning:
227 * This assumes that only TCP packets can be coalesced by the GRO handler which
228 * is not true in general. We lose the ability to use GRO for cases like UDP
229 * encapsulation protocols.
230 *
231 * Return:
232 * - RMNET_DATA_GRO_RCV_FAIL if packet is sent to netif_receive_skb()
233 * - RMNET_DATA_GRO_RCV_PASS if packet is sent to napi_gro_receive()
234 */
235static int rmnet_check_skb_can_gro(struct sk_buff *skb)
236{
237 switch (skb->data[0] & 0xF0) {
238 case RMNET_DATA_IP_VERSION_4:
239 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
240 return RMNET_DATA_GRO_RCV_PASS;
241 break;
242 case RMNET_DATA_IP_VERSION_6:
243 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
244 return RMNET_DATA_GRO_RCV_PASS;
245 /* Fall through */
246 }
247
248 return RMNET_DATA_GRO_RCV_FAIL;
249}
250
251/* rmnet_optional_gro_flush() - Check if GRO handler needs to flush now
252 *
253 * Determines whether GRO handler needs to flush packets which it has
254 * coalesced so far.
255 *
256 * Tuning this parameter will trade TCP slow start performance for GRO coalesce
257 * ratio.
258 */
259static void rmnet_optional_gro_flush(struct napi_struct *napi,
Conner Hufff90a2502016-10-31 11:22:15 -0700260 struct rmnet_logical_ep_conf_s *ep,
261 unsigned int skb_size)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600262{
263 struct timespec curr_time, diff;
264
Conner Huffa9112ff2017-10-10 14:19:50 -0700265 if (!gro_flush_logic_on)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600266 return;
267
Conner Huffa9112ff2017-10-10 14:19:50 -0700268 if (unlikely(ep->last_flush_time.tv_sec == 0)) {
269 getnstimeofday(&ep->last_flush_time);
Conner Hufff90a2502016-10-31 11:22:15 -0700270 ep->flush_byte_count = 0;
Conner Huffa9112ff2017-10-10 14:19:50 -0700271 ep->curr_time_limit = lower_flush_time;
272 ep->curr_byte_threshold = lower_byte_limit;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600273 } else {
274 getnstimeofday(&(curr_time));
Conner Huffa9112ff2017-10-10 14:19:50 -0700275 diff = timespec_sub(curr_time, ep->last_flush_time);
Conner Hufff90a2502016-10-31 11:22:15 -0700276 ep->flush_byte_count += skb_size;
277
278 if (dynamic_gro_on) {
279 if ((!(diff.tv_sec > 0) || diff.tv_nsec <=
Conner Huffa9112ff2017-10-10 14:19:50 -0700280 ep->curr_time_limit) &&
Conner Hufff90a2502016-10-31 11:22:15 -0700281 ep->flush_byte_count >=
Conner Huffa9112ff2017-10-10 14:19:50 -0700282 ep->curr_byte_threshold) {
Conner Hufff90a2502016-10-31 11:22:15 -0700283 /* Processed many bytes in a small time window.
284 * No longer need to flush so often and we can
285 * increase our byte limit
286 */
Conner Huffa9112ff2017-10-10 14:19:50 -0700287 ep->curr_time_limit = upper_flush_time;
288 ep->curr_byte_threshold = upper_byte_limit;
Conner Hufff90a2502016-10-31 11:22:15 -0700289 } else if ((diff.tv_sec > 0 ||
Conner Huffa9112ff2017-10-10 14:19:50 -0700290 diff.tv_nsec > ep->curr_time_limit) &&
Conner Hufff90a2502016-10-31 11:22:15 -0700291 ep->flush_byte_count <
Conner Huffa9112ff2017-10-10 14:19:50 -0700292 ep->curr_byte_threshold) {
Conner Hufff90a2502016-10-31 11:22:15 -0700293 /* We have not hit our time limit and we are not
294 * receive many bytes. Demote ourselves to the
295 * lowest limits and flush
296 */
297 napi_gro_flush(napi, false);
Conner Huffa9112ff2017-10-10 14:19:50 -0700298 ep->last_flush_time = curr_time;
Conner Hufff90a2502016-10-31 11:22:15 -0700299 ep->flush_byte_count = 0;
Conner Huffa9112ff2017-10-10 14:19:50 -0700300 ep->curr_time_limit = lower_flush_time;
301 ep->curr_byte_threshold = lower_byte_limit;
Conner Hufff90a2502016-10-31 11:22:15 -0700302 } else if ((diff.tv_sec > 0 ||
Conner Huffa9112ff2017-10-10 14:19:50 -0700303 diff.tv_nsec > ep->curr_time_limit) &&
Conner Hufff90a2502016-10-31 11:22:15 -0700304 ep->flush_byte_count >=
Conner Huffa9112ff2017-10-10 14:19:50 -0700305 ep->curr_byte_threshold) {
Conner Hufff90a2502016-10-31 11:22:15 -0700306 /* Above byte and time limt, therefore we can
307 * move/maintain our limits to be the max
308 * and flush
309 */
310 napi_gro_flush(napi, false);
Conner Huffa9112ff2017-10-10 14:19:50 -0700311 ep->last_flush_time = curr_time;
Conner Hufff90a2502016-10-31 11:22:15 -0700312 ep->flush_byte_count = 0;
Conner Huffa9112ff2017-10-10 14:19:50 -0700313 ep->curr_time_limit = upper_flush_time;
314 ep->curr_byte_threshold = upper_byte_limit;
Conner Hufff90a2502016-10-31 11:22:15 -0700315 }
316 /* else, below time limit and below
317 * byte thresh, so change nothing
318 */
319 } else if (diff.tv_sec > 0 ||
Conner Huffa9112ff2017-10-10 14:19:50 -0700320 diff.tv_nsec >= lower_flush_time) {
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600321 napi_gro_flush(napi, false);
Conner Huffa9112ff2017-10-10 14:19:50 -0700322 ep->last_flush_time = curr_time;
Conner Hufff90a2502016-10-31 11:22:15 -0700323 ep->flush_byte_count = 0;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600324 }
325 }
326}
327
328/* __rmnet_deliver_skb() - Deliver skb
329 *
330 * Determines where to deliver skb. Options are: consume by network stack,
331 * pass to bridge handler, or pass to virtual network device
332 *
333 * Return:
334 * - RX_HANDLER_CONSUMED if packet forwarded or dropped
335 * - RX_HANDLER_PASS if packet is to be consumed by network stack as-is
336 */
337static rx_handler_result_t __rmnet_deliver_skb
338 (struct sk_buff *skb, struct rmnet_logical_ep_conf_s *ep)
339{
340 struct napi_struct *napi = NULL;
341 gro_result_t gro_res;
Conner Hufff90a2502016-10-31 11:22:15 -0700342 unsigned int skb_size;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600343
344 trace___rmnet_deliver_skb(skb);
345 switch (ep->rmnet_mode) {
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600346 case RMNET_EPMODE_VND:
347 skb_reset_transport_header(skb);
348 skb_reset_network_header(skb);
349 rmnet_vnd_rx_fixup(skb, skb->dev);
350
351 skb->pkt_type = PACKET_HOST;
352 skb_set_mac_header(skb, 0);
353
354 if (rmnet_check_skb_can_gro(skb) &&
355 (skb->dev->features & NETIF_F_GRO)) {
356 napi = get_current_napi_context();
357
358 skb_size = skb->len;
359 gro_res = napi_gro_receive(napi, skb);
360 trace_rmnet_gro_downlink(gro_res);
361 rmnet_optional_gro_flush(napi, ep, skb_size);
362 } else{
363 netif_receive_skb(skb);
364 }
365 return RX_HANDLER_CONSUMED;
366
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600367 case RMNET_EPMODE_NONE:
368 return RX_HANDLER_PASS;
369
370 case RMNET_EPMODE_BRIDGE:
371 return rmnet_bridge_handler(skb, ep);
372
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600373 default:
374 LOGD("Unknown ep mode %d", ep->rmnet_mode);
375 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_DELIVER_NO_EP);
376 return RX_HANDLER_CONSUMED;
377 }
378}
379
380/* rmnet_ingress_deliver_packet() - Ingress handler for raw IP and bridged
381 * MAP packets.
382 * @skb: Packet needing a destination.
383 * @config: Physical end point configuration that the packet arrived on.
384 *
385 * Return:
386 * - RX_HANDLER_CONSUMED if packet forwarded/dropped
387 * - RX_HANDLER_PASS if packet should be passed up the stack by caller
388 */
389static rx_handler_result_t rmnet_ingress_deliver_packet
390 (struct sk_buff *skb, struct rmnet_phys_ep_config *config)
391{
392 if (!config) {
393 LOGD("%s", "NULL physical EP provided");
394 kfree_skb(skb);
395 return RX_HANDLER_CONSUMED;
396 }
397
398 if (!(config->local_ep.refcount)) {
399 LOGD("Packet on %s has no local endpoint configuration",
400 skb->dev->name);
401 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_IPINGRESS_NO_EP);
402 return RX_HANDLER_CONSUMED;
403 }
404
405 skb->dev = config->local_ep.egress_dev;
406
407 return __rmnet_deliver_skb(skb, &config->local_ep);
408}
409
410/* MAP handler */
411
412/* _rmnet_map_ingress_handler() - Actual MAP ingress handler
413 * @skb: Packet being received
414 * @config: Physical endpoint configuration for the ingress device
415 *
416 * Most MAP ingress functions are processed here. Packets are processed
417 * individually; aggregated packets should use rmnet_map_ingress_handler()
418 *
419 * Return:
420 * - RX_HANDLER_CONSUMED if packet is dropped
421 * - result of __rmnet_deliver_skb() for all other cases
422 */
423static rx_handler_result_t _rmnet_map_ingress_handler
424 (struct sk_buff *skb, struct rmnet_phys_ep_config *config)
425{
426 struct rmnet_logical_ep_conf_s *ep;
427 u8 mux_id;
428 u16 len;
429 int ckresult;
430
431 if (RMNET_MAP_GET_CD_BIT(skb)) {
432 if (config->ingress_data_format
433 & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
434 return rmnet_map_command(skb, config);
435
436 LOGM("MAP command packet on %s; %s", skb->dev->name,
437 "Not configured for MAP commands");
438 rmnet_kfree_skb(skb,
439 RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPC);
440 return RX_HANDLER_CONSUMED;
441 }
442
443 mux_id = RMNET_MAP_GET_MUX_ID(skb);
444 len = RMNET_MAP_GET_LENGTH(skb)
445 - RMNET_MAP_GET_PAD(skb)
446 - config->tail_spacing;
447
448 if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) {
449 LOGD("Got packet on %s with bad mux id %d",
450 skb->dev->name, mux_id);
451 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX);
452 return RX_HANDLER_CONSUMED;
453 }
454
455 ep = &config->muxed_ep[mux_id];
456
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600457 skb->dev = ep->egress_dev;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600458
459 if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
460 (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) {
461 ckresult = rmnet_map_checksum_downlink_packet(skb);
462 trace_rmnet_map_checksum_downlink_packet(skb, ckresult);
463 rmnet_stats_dl_checksum(ckresult);
464 if (likely((ckresult == RMNET_MAP_CHECKSUM_OK) ||
465 (ckresult == RMNET_MAP_CHECKSUM_SKIPPED)))
466 skb->ip_summed |= CHECKSUM_UNNECESSARY;
467 else if (ckresult !=
468 RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION &&
Ashwanth Goli3965fbf2017-06-27 10:29:06 +0530469 ckresult != RMNET_MAP_CHECKSUM_VALIDATION_FAILED &&
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600470 ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT &&
471 ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET &&
472 ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) {
473 rmnet_kfree_skb
474 (skb, RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM);
475 return RX_HANDLER_CONSUMED;
476 }
477 }
478
479 /* Subtract MAP header */
480 skb_pull(skb, sizeof(struct rmnet_map_header_s));
481 skb_trim(skb, len);
482 __rmnet_data_set_skb_proto(skb);
483 return __rmnet_deliver_skb(skb, ep);
484}
485
486/* rmnet_map_ingress_handler() - MAP ingress handler
487 * @skb: Packet being received
488 * @config: Physical endpoint configuration for the ingress device
489 *
490 * Called if and only if MAP is configured in the ingress device's ingress data
491 * format. Deaggregation is done here, actual MAP processing is done in
492 * _rmnet_map_ingress_handler().
493 *
494 * Return:
495 * - RX_HANDLER_CONSUMED for aggregated packets
496 * - RX_HANDLER_CONSUMED for dropped packets
497 * - result of _rmnet_map_ingress_handler() for all other cases
498 */
499static rx_handler_result_t rmnet_map_ingress_handler
500 (struct sk_buff *skb, struct rmnet_phys_ep_config *config)
501{
502 struct sk_buff *skbn;
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600503 int rc;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600504
505 if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
506 trace_rmnet_start_deaggregation(skb);
507 while ((skbn = rmnet_map_deaggregate(skb, config)) != 0) {
508 _rmnet_map_ingress_handler(skbn, config);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600509 }
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600510 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF);
511 rc = RX_HANDLER_CONSUMED;
512 } else {
513 rc = _rmnet_map_ingress_handler(skb, config);
514 }
515
516 return rc;
517}
518
519/* rmnet_map_egress_handler() - MAP egress handler
520 * @skb: Packet being sent
521 * @config: Physical endpoint configuration for the egress device
522 * @ep: logical endpoint configuration of the packet originator
523 * (e.g.. RmNet virtual network device)
524 * @orig_dev: The originator vnd device
525 *
526 * Called if and only if MAP is configured in the egress device's egress data
527 * format. Will expand skb if there is insufficient headroom for MAP protocol.
528 * Note: headroomexpansion will incur a performance penalty.
529 *
530 * Return:
531 * - 0 on success
532 * - 1 on failure
533 */
534static int rmnet_map_egress_handler(struct sk_buff *skb,
535 struct rmnet_phys_ep_config *config,
536 struct rmnet_logical_ep_conf_s *ep,
537 struct net_device *orig_dev)
538{
539 int required_headroom, additional_header_length, ckresult;
540 struct rmnet_map_header_s *map_header;
Subash Abhinov Kasiviswanathanc64a7802017-09-26 13:29:22 -0600541 int non_linear_skb;
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600542 int csum_required = (config->egress_data_format &
543 RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
544 (config->egress_data_format &
545 RMNET_EGRESS_FORMAT_MAP_CKSUMV4);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600546
547 additional_header_length = 0;
548
549 required_headroom = sizeof(struct rmnet_map_header_s);
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600550 if (csum_required) {
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600551 required_headroom +=
552 sizeof(struct rmnet_map_ul_checksum_header_s);
553 additional_header_length +=
554 sizeof(struct rmnet_map_ul_checksum_header_s);
555 }
556
557 LOGD("headroom of %d bytes", required_headroom);
558
559 if (skb_headroom(skb) < required_headroom) {
Ashwanth Golic85bfbe2017-02-24 11:03:03 -0700560 LOGE("Not enough headroom for %d bytes", required_headroom);
561 kfree_skb(skb);
562 return 1;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600563 }
564
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600565 if (csum_required) {
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600566 ckresult = rmnet_map_checksum_uplink_packet
567 (skb, orig_dev, config->egress_data_format);
568 trace_rmnet_map_checksum_uplink_packet(orig_dev, ckresult);
569 rmnet_stats_ul_checksum(ckresult);
570 }
571
Subash Abhinov Kasiviswanathanc64a7802017-09-26 13:29:22 -0600572 non_linear_skb = (orig_dev->features & NETIF_F_GSO) &&
573 skb_is_nonlinear(skb);
574
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600575 if ((!(config->egress_data_format &
Subash Abhinov Kasiviswanathan54448f22017-11-09 18:33:34 -0700576 RMNET_EGRESS_FORMAT_AGGREGATION)) || csum_required ||
577 non_linear_skb)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600578 map_header = rmnet_map_add_map_header
579 (skb, additional_header_length, RMNET_MAP_NO_PAD_BYTES);
580 else
581 map_header = rmnet_map_add_map_header
582 (skb, additional_header_length, RMNET_MAP_ADD_PAD_BYTES);
583
584 if (!map_header) {
585 LOGD("%s", "Failed to add MAP header to egress packet");
Subash Abhinov Kasiviswanathan991811c2016-11-07 16:44:44 -0700586 kfree_skb(skb);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600587 return 1;
588 }
589
590 if (config->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
591 if (ep->mux_id == 0xff)
592 map_header->mux_id = 0;
593 else
594 map_header->mux_id = ep->mux_id;
595 }
596
597 skb->protocol = htons(ETH_P_MAP);
598
Subash Abhinov Kasiviswanathan54448f22017-11-09 18:33:34 -0700599 if (config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700600 if (rmnet_ul_aggregation_skip(skb, required_headroom))
601 return RMNET_MAP_SUCCESS;
602
Subash Abhinov Kasiviswanathan54448f22017-11-09 18:33:34 -0700603 if (non_linear_skb)
604 if (unlikely(__skb_linearize(skb)))
605 return RMNET_MAP_SUCCESS;
606
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600607 rmnet_map_aggregate(skb, config);
608 return RMNET_MAP_CONSUMED;
609 }
610
611 return RMNET_MAP_SUCCESS;
612}
613
614/* Ingress / Egress Entry Points */
615
616/* rmnet_ingress_handler() - Ingress handler entry point
617 * @skb: Packet being received
618 *
619 * Processes packet as per ingress data format for receiving device. Logical
620 * endpoint is determined from packet inspection. Packet is then sent to the
621 * egress device listed in the logical endpoint configuration.
622 *
623 * Return:
624 * - RX_HANDLER_PASS if packet is not processed by handler (caller must
625 * deal with the packet)
626 * - RX_HANDLER_CONSUMED if packet is forwarded or processed by MAP
627 */
628rx_handler_result_t rmnet_ingress_handler(struct sk_buff *skb)
629{
630 struct rmnet_phys_ep_config *config;
631 struct net_device *dev;
632 int rc;
633
634 if (!skb)
635 return RX_HANDLER_CONSUMED;
636
637 dev = skb->dev;
638 trace_rmnet_ingress_handler(skb);
639 rmnet_print_packet(skb, dev->name, 'r');
640
641 config = _rmnet_get_phys_ep_config(skb->dev);
642
643 if (!config) {
644 LOGD("%s is not associated with rmnet_data", skb->dev->name);
645 kfree_skb(skb);
646 return RX_HANDLER_CONSUMED;
647 }
648
649 /* Sometimes devices operate in ethernet mode even thouth there is no
650 * ethernet header. This causes the skb->protocol to contain a bogus
651 * value and the skb->data pointer to be off by 14 bytes. Fix it if
652 * configured to do so
653 */
654 if (config->ingress_data_format & RMNET_INGRESS_FIX_ETHERNET) {
655 skb_push(skb, RMNET_ETHERNET_HEADER_LENGTH);
656 __rmnet_data_set_skb_proto(skb);
657 }
658
659 if (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) {
660 rc = rmnet_map_ingress_handler(skb, config);
661 } else {
662 switch (ntohs(skb->protocol)) {
663 case ETH_P_MAP:
664 if (config->local_ep.rmnet_mode ==
665 RMNET_EPMODE_BRIDGE) {
666 rc = rmnet_ingress_deliver_packet(skb, config);
667 } else {
668 LOGD("MAP packet on %s; MAP not set",
669 dev->name);
670 rmnet_kfree_skb
671 (skb,
672 RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPD);
673 rc = RX_HANDLER_CONSUMED;
674 }
675 break;
676
677 case ETH_P_ARP:
678 case ETH_P_IP:
679 case ETH_P_IPV6:
680 rc = rmnet_ingress_deliver_packet(skb, config);
681 break;
682
683 default:
684 LOGD("Unknown skb->proto 0x%04X\n",
685 ntohs(skb->protocol) & 0xFFFF);
686 rc = RX_HANDLER_PASS;
687 }
688 }
689
690 return rc;
691}
692
693/* rmnet_rx_handler() - Rx handler callback registered with kernel
694 * @pskb: Packet to be processed by rx handler
695 *
696 * Standard kernel-expected footprint for rx handlers. Calls
697 * rmnet_ingress_handler with correctly formatted arguments
698 *
699 * Return:
700 * - Whatever rmnet_ingress_handler() returns
701 */
702rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
703{
704 return rmnet_ingress_handler(*pskb);
705}
706
707/* rmnet_egress_handler() - Egress handler entry point
708 * @skb: packet to transmit
709 * @ep: logical endpoint configuration of the packet originator
710 * (e.g.. RmNet virtual network device)
711 *
712 * Modifies packet as per logical endpoint configuration and egress data format
713 * for egress device configured in logical endpoint. Packet is then transmitted
714 * on the egress device.
715 */
716void rmnet_egress_handler(struct sk_buff *skb,
717 struct rmnet_logical_ep_conf_s *ep)
718{
719 struct rmnet_phys_ep_config *config;
720 struct net_device *orig_dev;
721 int rc;
722
723 orig_dev = skb->dev;
724 skb->dev = ep->egress_dev;
725
726 config = _rmnet_get_phys_ep_config(skb->dev);
727
728 if (!config) {
729 LOGD("%s is not associated with rmnet_data", skb->dev->name);
730 kfree_skb(skb);
731 return;
732 }
733
734 LOGD("Packet going out on %s with egress format 0x%08X",
735 skb->dev->name, config->egress_data_format);
736
737 if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
738 switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) {
739 case RMNET_MAP_CONSUMED:
740 LOGD("%s", "MAP process consumed packet");
741 return;
742
743 case RMNET_MAP_SUCCESS:
744 break;
745
746 default:
747 LOGD("MAP egress failed on packet on %s",
748 skb->dev->name);
749 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_EGR_MAPFAIL);
750 return;
751 }
752 }
753
754 if (ep->rmnet_mode == RMNET_EPMODE_VND)
755 rmnet_vnd_tx_fixup(skb, orig_dev);
756
757 rmnet_print_packet(skb, skb->dev->name, 't');
758 trace_rmnet_egress_handler(skb);
759 rc = dev_queue_xmit(skb);
760 if (rc != 0) {
761 LOGD("Failed to queue packet for transmission on [%s]",
762 skb->dev->name);
763 }
764 rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_EGRESS);
765}