blob: f24b157698ee06b4503ab1f90b45708661188bf9 [file] [log] [blame]
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -07001/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * RMNET Data MAP protocol
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/skbuff.h>
18#include <linux/netdevice.h>
19#include <linux/rmnet_data.h>
20#include <linux/spinlock.h>
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -060021#include <linux/time.h>
22#include <linux/net_map.h>
23#include <linux/ip.h>
24#include <linux/ipv6.h>
25#include <linux/udp.h>
26#include <linux/tcp.h>
27#include <linux/in.h>
28#include <net/ip.h>
29#include <net/checksum.h>
30#include <net/ip6_checksum.h>
31#include <net/rmnet_config.h>
32#include "rmnet_data_config.h"
33#include "rmnet_map.h"
34#include "rmnet_data_private.h"
35#include "rmnet_data_stats.h"
36#include "rmnet_data_trace.h"
37
38RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_MAPD);
39
40/* Local Definitions */
41
42long agg_time_limit __read_mostly = 1000000L;
43module_param(agg_time_limit, long, 0644);
44MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf");
45
46long agg_bypass_time __read_mostly = 10000000L;
47module_param(agg_bypass_time, long, 0644);
48MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
49
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -060050#define RMNET_MAP_DEAGGR_SPACING 64
51#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
52
53/* rmnet_map_add_map_header() - Adds MAP header to front of skb->data
54 * @skb: Socket buffer ("packet") to modify
55 * @hdrlen: Number of bytes of header data which should not be included in
56 * MAP length field
57 * @pad: Specify if padding the MAP packet to make it 4 byte aligned is
58 * necessary
59 *
60 * Padding is calculated and set appropriately in MAP header. Mux ID is
61 * initialized to 0.
62 *
63 * Return:
64 * - Pointer to MAP structure
65 * - 0 (null) if insufficient headroom
66 * - 0 (null) if insufficient tailroom for padding bytes
67 */
68struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb,
69 int hdrlen, int pad)
70{
71 u32 padding, map_datalen;
72 u8 *padbytes;
73 struct rmnet_map_header_s *map_header;
74
75 if (skb_headroom(skb) < sizeof(struct rmnet_map_header_s))
76 return 0;
77
78 map_datalen = skb->len - hdrlen;
79 map_header = (struct rmnet_map_header_s *)
80 skb_push(skb, sizeof(struct rmnet_map_header_s));
81 memset(map_header, 0, sizeof(struct rmnet_map_header_s));
82
83 if (pad == RMNET_MAP_NO_PAD_BYTES) {
84 map_header->pkt_len = htons(map_datalen);
85 return map_header;
86 }
87
88 padding = ALIGN(map_datalen, 4) - map_datalen;
89
90 if (padding == 0)
91 goto done;
92
93 if (skb_tailroom(skb) < padding)
94 return 0;
95
96 padbytes = (u8 *)skb_put(skb, padding);
97 LOGD("pad: %d", padding);
98 memset(padbytes, 0, padding);
99
100done:
101 map_header->pkt_len = htons(map_datalen + padding);
102 map_header->pad_len = padding & 0x3F;
103
104 return map_header;
105}
106
107/* rmnet_map_deaggregate() - Deaggregates a single packet
108 * @skb: Source socket buffer containing multiple MAP frames
109 * @config: Physical endpoint configuration of the ingress device
110 *
111 * A whole new buffer is allocated for each portion of an aggregated frame.
112 * Caller should keep calling deaggregate() on the source skb until 0 is
113 * returned, indicating that there are no more packets to deaggregate. Caller
114 * is responsible for freeing the original skb.
115 *
116 * Return:
117 * - Pointer to new skb
118 * - 0 (null) if no more aggregated packets
119 */
120struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
121 struct rmnet_phys_ep_config *config)
122{
123 struct sk_buff *skbn;
124 struct rmnet_map_header_s *maph;
125 u32 packet_len;
126
127 if (skb->len == 0)
128 return 0;
129
130 maph = (struct rmnet_map_header_s *)skb->data;
131 packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header_s);
132
133 if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
134 (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4))
135 packet_len += sizeof(struct rmnet_map_dl_checksum_trailer_s);
136
137 if ((((int)skb->len) - ((int)packet_len)) < 0) {
138 LOGM("%s", "Got malformed packet. Dropping");
139 return 0;
140 }
141
142 skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
143 if (!skbn)
144 return 0;
145
146 skbn->dev = skb->dev;
147 skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
148 skb_put(skbn, packet_len);
149 memcpy(skbn->data, skb->data, packet_len);
150 skb_pull(skb, packet_len);
151
152 /* Some hardware can send us empty frames. Catch them */
153 if (ntohs(maph->pkt_len) == 0) {
154 LOGD("Dropping empty MAP frame");
155 rmnet_kfree_skb(skbn, RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0);
156 return 0;
157 }
158
159 return skbn;
160}
161
162/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600163 *
Subash Abhinov Kasiviswanathan855e50c2017-11-17 17:54:49 -0700164 * This function is scheduled to run in a specified number of ns after
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600165 * the last frame transmitted by the network stack. When run, the buffer
166 * containing aggregated packets is finally transmitted on the underlying link.
167 *
168 */
Subash Abhinov Kasiviswanathan855e50c2017-11-17 17:54:49 -0700169enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600170{
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600171 struct rmnet_phys_ep_config *config;
172 unsigned long flags;
173 struct sk_buff *skb;
174 int rc, agg_count = 0;
175
Subash Abhinov Kasiviswanathan855e50c2017-11-17 17:54:49 -0700176 config = container_of(t, struct rmnet_phys_ep_config, hrtimer);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600177 skb = 0;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600178 LOGD("%s", "Entering flush thread");
179 spin_lock_irqsave(&config->agg_lock, flags);
180 if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
181 /* Buffer may have already been shipped out */
182 if (likely(config->agg_skb)) {
183 rmnet_stats_agg_pkts(config->agg_count);
184 if (config->agg_count > 1)
185 LOGL("Agg count: %d", config->agg_count);
186 skb = config->agg_skb;
187 agg_count = config->agg_count;
188 config->agg_skb = 0;
189 config->agg_count = 0;
190 memset(&config->agg_time, 0, sizeof(struct timespec));
191 }
192 config->agg_state = RMNET_MAP_AGG_IDLE;
193 } else {
194 /* How did we get here? */
195 LOGE("Ran queued command when state %s",
196 "is idle. State machine likely broken");
197 }
198
199 spin_unlock_irqrestore(&config->agg_lock, flags);
200 if (skb) {
201 trace_rmnet_map_flush_packet_queue(skb, agg_count);
202 rc = dev_queue_xmit(skb);
203 rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
204 }
Subash Abhinov Kasiviswanathan855e50c2017-11-17 17:54:49 -0700205
206 return HRTIMER_NORESTART;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600207}
208
209/* rmnet_map_aggregate() - Software aggregates multiple packets.
210 * @skb: current packet being transmitted
211 * @config: Physical endpoint configuration of the ingress device
212 *
213 * Aggregates multiple SKBs into a single large SKB for transmission. MAP
214 * protocol is used to separate the packets in the buffer. This function
215 * consumes the argument SKB and should not be further processed by any other
216 * function.
217 */
218void rmnet_map_aggregate(struct sk_buff *skb,
219 struct rmnet_phys_ep_config *config) {
220 u8 *dest_buff;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600221 unsigned long flags;
222 struct sk_buff *agg_skb;
223 struct timespec diff, last;
224 int size, rc, agg_count = 0;
225
226 if (!skb || !config)
227 return;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600228
229new_packet:
230 spin_lock_irqsave(&config->agg_lock, flags);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600231 memcpy(&last, &config->agg_last, sizeof(struct timespec));
232 getnstimeofday(&config->agg_last);
233
234 if (!config->agg_skb) {
235 /* Check to see if we should agg first. If the traffic is very
236 * sparse, don't aggregate. We will need to tune this later
237 */
238 diff = timespec_sub(config->agg_last, last);
239
240 if ((diff.tv_sec > 0) || (diff.tv_nsec > agg_bypass_time)) {
241 spin_unlock_irqrestore(&config->agg_lock, flags);
242 LOGL("delta t: %ld.%09lu\tcount: bypass", diff.tv_sec,
243 diff.tv_nsec);
244 rmnet_stats_agg_pkts(1);
245 trace_rmnet_map_aggregate(skb, 0);
246 rc = dev_queue_xmit(skb);
247 rmnet_stats_queue_xmit(rc,
248 RMNET_STATS_QUEUE_XMIT_AGG_SKIP);
249 return;
250 }
251
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700252 size = config->egress_agg_size - skb->len;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600253 config->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
254 if (!config->agg_skb) {
255 config->agg_skb = 0;
256 config->agg_count = 0;
257 memset(&config->agg_time, 0, sizeof(struct timespec));
258 spin_unlock_irqrestore(&config->agg_lock, flags);
259 rmnet_stats_agg_pkts(1);
260 trace_rmnet_map_aggregate(skb, 0);
261 rc = dev_queue_xmit(skb);
262 rmnet_stats_queue_xmit
263 (rc,
264 RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL);
265 return;
266 }
267 config->agg_count = 1;
268 getnstimeofday(&config->agg_time);
269 trace_rmnet_start_aggregation(skb);
270 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_CPY_EXPAND);
271 goto schedule;
272 }
273 diff = timespec_sub(config->agg_last, config->agg_time);
274
275 if (skb->len > (config->egress_agg_size - config->agg_skb->len) ||
276 (config->agg_count >= config->egress_agg_count) ||
277 (diff.tv_sec > 0) || (diff.tv_nsec > agg_time_limit)) {
278 rmnet_stats_agg_pkts(config->agg_count);
279 agg_skb = config->agg_skb;
280 agg_count = config->agg_count;
281 config->agg_skb = 0;
282 config->agg_count = 0;
283 memset(&config->agg_time, 0, sizeof(struct timespec));
Subash Abhinov Kasiviswanathan855e50c2017-11-17 17:54:49 -0700284 config->agg_state = RMNET_MAP_AGG_IDLE;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600285 spin_unlock_irqrestore(&config->agg_lock, flags);
Subash Abhinov Kasiviswanathan855e50c2017-11-17 17:54:49 -0700286 hrtimer_cancel(&config->hrtimer);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600287 LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec,
288 diff.tv_nsec, agg_count);
289 trace_rmnet_map_aggregate(skb, agg_count);
290 rc = dev_queue_xmit(agg_skb);
291 rmnet_stats_queue_xmit(rc,
292 RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER);
293 goto new_packet;
294 }
295
296 dest_buff = skb_put(config->agg_skb, skb->len);
297 memcpy(dest_buff, skb->data, skb->len);
298 config->agg_count++;
299 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_INTO_BUFF);
300
301schedule:
302 if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600303 config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
Subash Abhinov Kasiviswanathan855e50c2017-11-17 17:54:49 -0700304 hrtimer_start(&config->hrtimer, ns_to_ktime(3000000),
305 HRTIMER_MODE_REL);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600306 }
307 spin_unlock_irqrestore(&config->agg_lock, flags);
308}
309
310/* Checksum Offload */
311
312static inline u16 *rmnet_map_get_checksum_field(unsigned char protocol,
313 const void *txporthdr)
314{
315 u16 *check = 0;
316
317 switch (protocol) {
318 case IPPROTO_TCP:
319 check = &(((struct tcphdr *)txporthdr)->check);
320 break;
321
322 case IPPROTO_UDP:
323 check = &(((struct udphdr *)txporthdr)->check);
324 break;
325
326 default:
327 check = 0;
328 break;
329 }
330
331 return check;
332}
333
334static inline u16 rmnet_map_add_checksums(u16 val1, u16 val2)
335{
336 int sum = val1 + val2;
337
338 sum = (((sum & 0xFFFF0000) >> 16) + sum) & 0x0000FFFF;
339 return (u16)(sum & 0x0000FFFF);
340}
341
342static inline u16 rmnet_map_subtract_checksums(u16 val1, u16 val2)
343{
344 return rmnet_map_add_checksums(val1, ~val2);
345}
346
347/* rmnet_map_validate_ipv4_packet_checksum() - Validates TCP/UDP checksum
348 * value for IPv4 packet
349 * @map_payload: Pointer to the beginning of the map payload
350 * @cksum_trailer: Pointer to the checksum trailer
351 *
352 * Validates the TCP/UDP checksum for the packet using the checksum value
353 * from the checksum trailer added to the packet.
354 * The validation formula is the following:
355 * 1. Performs 1's complement over the checksum value from the trailer
356 * 2. Computes 1's complement checksum over IPv4 header and subtracts it from
357 * the value from step 1
358 * 3. Computes 1's complement checksum over IPv4 pseudo header and adds it to
359 * the value from step 2
360 * 4. Subtracts the checksum value from the TCP/UDP header from the value from
361 * step 3
362 * 5. Compares the value from step 4 to the checksum value from the TCP/UDP
363 * header
364 *
365 * Fragmentation and tunneling are not supported.
366 *
367 * Return: 0 is validation succeeded.
368 */
369static int rmnet_map_validate_ipv4_packet_checksum
370 (unsigned char *map_payload,
371 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer)
372{
373 struct iphdr *ip4h;
374 u16 *checksum_field;
375 void *txporthdr;
376 u16 pseudo_checksum;
377 u16 ip_hdr_checksum;
378 u16 checksum_value;
379 u16 ip_payload_checksum;
380 u16 ip_pseudo_payload_checksum;
381 u16 checksum_value_final;
382
383 ip4h = (struct iphdr *)map_payload;
384 if ((ntohs(ip4h->frag_off) & IP_MF) ||
385 ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
386 return RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET;
387
388 txporthdr = map_payload + ip4h->ihl * 4;
389
390 checksum_field = rmnet_map_get_checksum_field(ip4h->protocol,
391 txporthdr);
392
393 if (unlikely(!checksum_field))
394 return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT;
395
396 /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
397 if ((*checksum_field == 0) && (ip4h->protocol == IPPROTO_UDP))
398 return RMNET_MAP_CHECKSUM_SKIPPED;
399
400 checksum_value = ~ntohs(cksum_trailer->checksum_value);
401 ip_hdr_checksum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
402 ip_payload_checksum = rmnet_map_subtract_checksums(checksum_value,
403 ip_hdr_checksum);
404
405 pseudo_checksum = ~ntohs(csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
406 (u16)(ntohs(ip4h->tot_len) - ip4h->ihl * 4),
407 (u16)ip4h->protocol, 0));
408 ip_pseudo_payload_checksum = rmnet_map_add_checksums(
409 ip_payload_checksum, pseudo_checksum);
410
411 checksum_value_final = ~rmnet_map_subtract_checksums(
412 ip_pseudo_payload_checksum, ntohs(*checksum_field));
413
414 if (unlikely(checksum_value_final == 0)) {
415 switch (ip4h->protocol) {
416 case IPPROTO_UDP:
417 /* RFC 768 */
418 LOGD("DL4 1's complement rule for UDP checksum 0");
419 checksum_value_final = ~checksum_value_final;
420 break;
421
422 case IPPROTO_TCP:
423 if (*checksum_field == 0xFFFF) {
424 LOGD(
425 "DL4 Non-RFC compliant TCP checksum found");
426 checksum_value_final = ~checksum_value_final;
427 }
428 break;
429 }
430 }
431
432 LOGD(
433 "DL4 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X",
434 ~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field),
435 pseudo_checksum, checksum_value_final);
436
437 if (checksum_value_final == ntohs(*checksum_field))
438 return RMNET_MAP_CHECKSUM_OK;
439 else
440 return RMNET_MAP_CHECKSUM_VALIDATION_FAILED;
441}
442
443/* rmnet_map_validate_ipv6_packet_checksum() - Validates TCP/UDP checksum
444 * value for IPv6 packet
445 * @map_payload: Pointer to the beginning of the map payload
446 * @cksum_trailer: Pointer to the checksum trailer
447 *
448 * Validates the TCP/UDP checksum for the packet using the checksum value
449 * from the checksum trailer added to the packet.
450 * The validation formula is the following:
451 * 1. Performs 1's complement over the checksum value from the trailer
452 * 2. Computes 1's complement checksum over IPv6 header and subtracts it from
453 * the value from step 1
454 * 3. Computes 1's complement checksum over IPv6 pseudo header and adds it to
455 * the value from step 2
456 * 4. Subtracts the checksum value from the TCP/UDP header from the value from
457 * step 3
458 * 5. Compares the value from step 4 to the checksum value from the TCP/UDP
459 * header
460 *
461 * Fragmentation, extension headers and tunneling are not supported.
462 *
463 * Return: 0 is validation succeeded.
464 */
465static int rmnet_map_validate_ipv6_packet_checksum
466 (unsigned char *map_payload,
467 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer)
468{
469 struct ipv6hdr *ip6h;
470 u16 *checksum_field;
471 void *txporthdr;
472 u16 pseudo_checksum;
473 u16 ip_hdr_checksum;
474 u16 checksum_value;
475 u16 ip_payload_checksum;
476 u16 ip_pseudo_payload_checksum;
477 u16 checksum_value_final;
478 u32 length;
479
480 ip6h = (struct ipv6hdr *)map_payload;
481
482 txporthdr = map_payload + sizeof(struct ipv6hdr);
483 checksum_field = rmnet_map_get_checksum_field(ip6h->nexthdr,
484 txporthdr);
485
486 if (unlikely(!checksum_field))
487 return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT;
488
489 checksum_value = ~ntohs(cksum_trailer->checksum_value);
490 ip_hdr_checksum = ~ntohs(ip_compute_csum(ip6h,
491 (int)(txporthdr - (void *)map_payload)));
492 ip_payload_checksum = rmnet_map_subtract_checksums
493 (checksum_value, ip_hdr_checksum);
494
495 length = (ip6h->nexthdr == IPPROTO_UDP) ?
496 ntohs(((struct udphdr *)txporthdr)->len) :
497 ntohs(ip6h->payload_len);
498 pseudo_checksum = ~ntohs(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
499 length, ip6h->nexthdr, 0));
500 ip_pseudo_payload_checksum = rmnet_map_add_checksums(
501 ip_payload_checksum, pseudo_checksum);
502
503 checksum_value_final = ~rmnet_map_subtract_checksums(
504 ip_pseudo_payload_checksum, ntohs(*checksum_field));
505
506 if (unlikely(checksum_value_final == 0)) {
507 switch (ip6h->nexthdr) {
508 case IPPROTO_UDP:
509 /* RFC 2460 section 8.1 */
510 LOGD("DL6 One's complement rule for UDP checksum 0");
511 checksum_value_final = ~checksum_value_final;
512 break;
513
514 case IPPROTO_TCP:
515 if (*checksum_field == 0xFFFF) {
516 LOGD(
517 "DL6 Non-RFC compliant TCP checksum found");
518 checksum_value_final = ~checksum_value_final;
519 }
520 break;
521 }
522 }
523
524 LOGD(
525 "DL6 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X",
526 ~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field),
527 pseudo_checksum, checksum_value_final);
528
529 if (checksum_value_final == ntohs(*checksum_field))
530 return RMNET_MAP_CHECKSUM_OK;
531 else
532 return RMNET_MAP_CHECKSUM_VALIDATION_FAILED;
533 }
534
535/* rmnet_map_checksum_downlink_packet() - Validates checksum on
536 * a downlink packet
537 * @skb: Pointer to the packet's skb.
538 *
539 * Validates packet checksums. Function takes a pointer to
540 * the beginning of a buffer which contains the entire MAP
541 * frame: MAP header + IP payload + padding + checksum trailer.
542 * Currently, only IPv4 and IPv6 are supported along with
543 * TCP & UDP. Fragmented or tunneled packets are not supported.
544 *
545 * Return:
546 * - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded.
547 * - RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER: Skb buffer given is corrupted.
548 * - RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET: Valid flag is not set in the
549 * checksum trailer.
550 * - RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET: The packet is a fragment.
551 * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT: The transport header is
552 * not TCP/UDP.
553 * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header.
554 * - RMNET_MAP_CHECKSUM_VALIDATION_FAILED: In case the validation failed.
555 */
556int rmnet_map_checksum_downlink_packet(struct sk_buff *skb)
557{
558 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer;
559 unsigned int data_len;
560 unsigned char *map_payload;
561 unsigned char ip_version;
562
563 data_len = RMNET_MAP_GET_LENGTH(skb);
564
565 if (unlikely(skb->len < (sizeof(struct rmnet_map_header_s) + data_len +
566 sizeof(struct rmnet_map_dl_checksum_trailer_s))))
567 return RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER;
568
569 cksum_trailer = (struct rmnet_map_dl_checksum_trailer_s *)
570 (skb->data + data_len
571 + sizeof(struct rmnet_map_header_s));
572
573 if (unlikely(!ntohs(cksum_trailer->valid)))
574 return RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET;
575
576 map_payload = (unsigned char *)(skb->data
577 + sizeof(struct rmnet_map_header_s));
578
579 ip_version = (*map_payload & 0xF0) >> 4;
580 if (ip_version == 0x04)
581 return rmnet_map_validate_ipv4_packet_checksum(map_payload,
582 cksum_trailer);
583 else if (ip_version == 0x06)
584 return rmnet_map_validate_ipv6_packet_checksum(map_payload,
585 cksum_trailer);
586
587 return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
588}
589
590static void rmnet_map_fill_ipv4_packet_ul_checksum_header
591 (void *iphdr, struct rmnet_map_ul_checksum_header_s *ul_header,
592 struct sk_buff *skb)
593{
594 struct iphdr *ip4h = (struct iphdr *)iphdr;
595 unsigned short *hdr = (unsigned short *)ul_header;
596
597 ul_header->checksum_start_offset = htons((unsigned short)
598 (skb_transport_header(skb) - (unsigned char *)iphdr));
599 ul_header->checksum_insert_offset = skb->csum_offset;
600 ul_header->cks_en = 1;
601 if (ip4h->protocol == IPPROTO_UDP)
602 ul_header->udp_ip4_ind = 1;
603 else
604 ul_header->udp_ip4_ind = 0;
605 /* Changing checksum_insert_offset to network order */
606 hdr++;
607 *hdr = htons(*hdr);
608 skb->ip_summed = CHECKSUM_NONE;
609}
610
611static void rmnet_map_fill_ipv6_packet_ul_checksum_header
612 (void *iphdr, struct rmnet_map_ul_checksum_header_s *ul_header,
613 struct sk_buff *skb)
614{
615 unsigned short *hdr = (unsigned short *)ul_header;
616
617 ul_header->checksum_start_offset = htons((unsigned short)
618 (skb_transport_header(skb) - (unsigned char *)iphdr));
619 ul_header->checksum_insert_offset = skb->csum_offset;
620 ul_header->cks_en = 1;
621 ul_header->udp_ip4_ind = 0;
622 /* Changing checksum_insert_offset to network order */
623 hdr++;
624 *hdr = htons(*hdr);
625 skb->ip_summed = CHECKSUM_NONE;
626}
627
628static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
629{
630 struct iphdr *ip4h = (struct iphdr *)iphdr;
631 void *txporthdr;
632 u16 *csum;
633
634 txporthdr = iphdr + ip4h->ihl * 4;
635
636 if ((ip4h->protocol == IPPROTO_TCP) ||
637 (ip4h->protocol == IPPROTO_UDP)) {
638 csum = (u16 *)rmnet_map_get_checksum_field(ip4h->protocol,
639 txporthdr);
640 *csum = ~(*csum);
641 }
642}
643
644static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
645{
646 struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
647 void *txporthdr;
648 u16 *csum;
649
650 txporthdr = ip6hdr + sizeof(struct ipv6hdr);
651
652 if ((ip6h->nexthdr == IPPROTO_TCP) || (ip6h->nexthdr == IPPROTO_UDP)) {
653 csum = (u16 *)rmnet_map_get_checksum_field(ip6h->nexthdr,
654 txporthdr);
655 *csum = ~(*csum);
656 }
657}
658
659/* rmnet_map_checksum_uplink_packet() - Generates UL checksum
660 * meta info header
661 * @skb: Pointer to the packet's skb.
662 *
663 * Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
664 * packets that are supported for UL checksum offload.
665 *
666 * Return:
667 * - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded.
668 * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header.
669 * - RMNET_MAP_CHECKSUM_SW: Unsupported packet for UL checksum offload.
670 */
671int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
672 struct net_device *orig_dev,
673 u32 egress_data_format)
674{
675 unsigned char ip_version;
676 struct rmnet_map_ul_checksum_header_s *ul_header;
677 void *iphdr;
678 int ret;
679
680 ul_header = (struct rmnet_map_ul_checksum_header_s *)
681 skb_push(skb, sizeof(struct rmnet_map_ul_checksum_header_s));
682
683 if (unlikely(!(orig_dev->features &
684 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) {
685 ret = RMNET_MAP_CHECKSUM_SW;
686 goto sw_checksum;
687 }
688
689 if (skb->ip_summed == CHECKSUM_PARTIAL) {
690 iphdr = (char *)ul_header +
691 sizeof(struct rmnet_map_ul_checksum_header_s);
692 ip_version = (*(char *)iphdr & 0xF0) >> 4;
693 if (ip_version == 0x04) {
694 rmnet_map_fill_ipv4_packet_ul_checksum_header
695 (iphdr, ul_header, skb);
696 if (egress_data_format &
697 RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
698 rmnet_map_complement_ipv4_txporthdr_csum_field(
699 iphdr);
700 ret = RMNET_MAP_CHECKSUM_OK;
701 goto done;
702 } else if (ip_version == 0x06) {
703 rmnet_map_fill_ipv6_packet_ul_checksum_header
704 (iphdr, ul_header, skb);
705 if (egress_data_format &
706 RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
707 rmnet_map_complement_ipv6_txporthdr_csum_field(
708 iphdr);
709 ret = RMNET_MAP_CHECKSUM_OK;
710 goto done;
711 } else {
712 ret = RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
713 goto sw_checksum;
714 }
715 } else {
716 ret = RMNET_MAP_CHECKSUM_SW;
717 goto sw_checksum;
718 }
719
720sw_checksum:
721 ul_header->checksum_start_offset = 0;
722 ul_header->checksum_insert_offset = 0;
723 ul_header->cks_en = 0;
724 ul_header->udp_ip4_ind = 0;
725done:
726 return ret;
727}
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700728
729int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset)
730{
731 unsigned char *packet_start = skb->data + offset;
732 int is_icmp = 0;
733
734 if ((skb->data[offset]) >> 4 == 0x04) {
735 struct iphdr *ip4h = (struct iphdr *)(packet_start);
736
737 if (ip4h->protocol == IPPROTO_ICMP)
738 is_icmp = 1;
Subash Abhinov Kasiviswanathanb7500cc2017-11-09 17:32:52 -0700739 } else if ((skb->data[offset]) >> 4 == 0x06) {
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700740 struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
741
Subash Abhinov Kasiviswanathanb7500cc2017-11-09 17:32:52 -0700742 if (ip6h->nexthdr == IPPROTO_ICMPV6) {
743 is_icmp = 1;
744 } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700745 struct frag_hdr *frag;
746
747 frag = (struct frag_hdr *)(packet_start
748 + sizeof(struct ipv6hdr));
749 if (frag->nexthdr == IPPROTO_ICMPV6)
750 is_icmp = 1;
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700751 }
752 }
753
754 return is_icmp;
755}