blob: 1c0f1060eebc479e7cccc027f10e37c58ecfc709 [file] [log] [blame]
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -07001/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * RMNET Data MAP protocol
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/skbuff.h>
18#include <linux/netdevice.h>
19#include <linux/rmnet_data.h>
20#include <linux/spinlock.h>
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -070021#include <linux/workqueue.h>
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -060022#include <linux/time.h>
23#include <linux/net_map.h>
24#include <linux/ip.h>
25#include <linux/ipv6.h>
26#include <linux/udp.h>
27#include <linux/tcp.h>
28#include <linux/in.h>
29#include <net/ip.h>
30#include <net/checksum.h>
31#include <net/ip6_checksum.h>
32#include <net/rmnet_config.h>
33#include "rmnet_data_config.h"
34#include "rmnet_map.h"
35#include "rmnet_data_private.h"
36#include "rmnet_data_stats.h"
37#include "rmnet_data_trace.h"
38
39RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_MAPD);
40
41/* Local Definitions */
42
43long agg_time_limit __read_mostly = 1000000L;
44module_param(agg_time_limit, long, 0644);
45MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf");
46
47long agg_bypass_time __read_mostly = 10000000L;
48module_param(agg_bypass_time, long, 0644);
49MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
50
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -070051struct agg_work {
52 struct delayed_work work;
53 struct rmnet_phys_ep_config *config;
54};
55
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -060056#define RMNET_MAP_DEAGGR_SPACING 64
57#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
58
59/* rmnet_map_add_map_header() - Adds MAP header to front of skb->data
60 * @skb: Socket buffer ("packet") to modify
61 * @hdrlen: Number of bytes of header data which should not be included in
62 * MAP length field
63 * @pad: Specify if padding the MAP packet to make it 4 byte aligned is
64 * necessary
65 *
66 * Padding is calculated and set appropriately in MAP header. Mux ID is
67 * initialized to 0.
68 *
69 * Return:
70 * - Pointer to MAP structure
71 * - 0 (null) if insufficient headroom
72 * - 0 (null) if insufficient tailroom for padding bytes
73 */
74struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb,
75 int hdrlen, int pad)
76{
77 u32 padding, map_datalen;
78 u8 *padbytes;
79 struct rmnet_map_header_s *map_header;
80
81 if (skb_headroom(skb) < sizeof(struct rmnet_map_header_s))
82 return 0;
83
84 map_datalen = skb->len - hdrlen;
85 map_header = (struct rmnet_map_header_s *)
86 skb_push(skb, sizeof(struct rmnet_map_header_s));
87 memset(map_header, 0, sizeof(struct rmnet_map_header_s));
88
89 if (pad == RMNET_MAP_NO_PAD_BYTES) {
90 map_header->pkt_len = htons(map_datalen);
91 return map_header;
92 }
93
94 padding = ALIGN(map_datalen, 4) - map_datalen;
95
96 if (padding == 0)
97 goto done;
98
99 if (skb_tailroom(skb) < padding)
100 return 0;
101
102 padbytes = (u8 *)skb_put(skb, padding);
103 LOGD("pad: %d", padding);
104 memset(padbytes, 0, padding);
105
106done:
107 map_header->pkt_len = htons(map_datalen + padding);
108 map_header->pad_len = padding & 0x3F;
109
110 return map_header;
111}
112
113/* rmnet_map_deaggregate() - Deaggregates a single packet
114 * @skb: Source socket buffer containing multiple MAP frames
115 * @config: Physical endpoint configuration of the ingress device
116 *
117 * A whole new buffer is allocated for each portion of an aggregated frame.
118 * Caller should keep calling deaggregate() on the source skb until 0 is
119 * returned, indicating that there are no more packets to deaggregate. Caller
120 * is responsible for freeing the original skb.
121 *
122 * Return:
123 * - Pointer to new skb
124 * - 0 (null) if no more aggregated packets
125 */
126struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
127 struct rmnet_phys_ep_config *config)
128{
129 struct sk_buff *skbn;
130 struct rmnet_map_header_s *maph;
131 u32 packet_len;
132
133 if (skb->len == 0)
134 return 0;
135
136 maph = (struct rmnet_map_header_s *)skb->data;
137 packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header_s);
138
139 if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
140 (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4))
141 packet_len += sizeof(struct rmnet_map_dl_checksum_trailer_s);
142
143 if ((((int)skb->len) - ((int)packet_len)) < 0) {
144 LOGM("%s", "Got malformed packet. Dropping");
145 return 0;
146 }
147
148 skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
149 if (!skbn)
150 return 0;
151
152 skbn->dev = skb->dev;
153 skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
154 skb_put(skbn, packet_len);
155 memcpy(skbn->data, skb->data, packet_len);
156 skb_pull(skb, packet_len);
157
158 /* Some hardware can send us empty frames. Catch them */
159 if (ntohs(maph->pkt_len) == 0) {
160 LOGD("Dropping empty MAP frame");
161 rmnet_kfree_skb(skbn, RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0);
162 return 0;
163 }
164
165 return skbn;
166}
167
168/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -0700169 * @work: struct agg_work containing delayed work and skb to flush
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600170 *
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -0700171 * This function is scheduled to run in a specified number of jiffies after
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600172 * the last frame transmitted by the network stack. When run, the buffer
173 * containing aggregated packets is finally transmitted on the underlying link.
174 *
175 */
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -0700176static void rmnet_map_flush_packet_queue(struct work_struct *work)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600177{
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -0700178 struct agg_work *real_work;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600179 struct rmnet_phys_ep_config *config;
180 unsigned long flags;
181 struct sk_buff *skb;
182 int rc, agg_count = 0;
183
184 skb = 0;
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -0700185 real_work = (struct agg_work *)work;
186 config = real_work->config;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600187 LOGD("%s", "Entering flush thread");
188 spin_lock_irqsave(&config->agg_lock, flags);
189 if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
190 /* Buffer may have already been shipped out */
191 if (likely(config->agg_skb)) {
192 rmnet_stats_agg_pkts(config->agg_count);
193 if (config->agg_count > 1)
194 LOGL("Agg count: %d", config->agg_count);
195 skb = config->agg_skb;
196 agg_count = config->agg_count;
197 config->agg_skb = 0;
198 config->agg_count = 0;
199 memset(&config->agg_time, 0, sizeof(struct timespec));
200 }
201 config->agg_state = RMNET_MAP_AGG_IDLE;
202 } else {
203 /* How did we get here? */
204 LOGE("Ran queued command when state %s",
205 "is idle. State machine likely broken");
206 }
207
208 spin_unlock_irqrestore(&config->agg_lock, flags);
209 if (skb) {
210 trace_rmnet_map_flush_packet_queue(skb, agg_count);
211 rc = dev_queue_xmit(skb);
212 rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
213 }
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -0700214 kfree(work);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600215}
216
217/* rmnet_map_aggregate() - Software aggregates multiple packets.
218 * @skb: current packet being transmitted
219 * @config: Physical endpoint configuration of the ingress device
220 *
221 * Aggregates multiple SKBs into a single large SKB for transmission. MAP
222 * protocol is used to separate the packets in the buffer. This function
223 * consumes the argument SKB and should not be further processed by any other
224 * function.
225 */
226void rmnet_map_aggregate(struct sk_buff *skb,
227 struct rmnet_phys_ep_config *config) {
228 u8 *dest_buff;
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -0700229 struct agg_work *work;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600230 unsigned long flags;
231 struct sk_buff *agg_skb;
232 struct timespec diff, last;
233 int size, rc, agg_count = 0;
234
235 if (!skb || !config)
236 return;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600237
238new_packet:
239 spin_lock_irqsave(&config->agg_lock, flags);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600240 memcpy(&last, &config->agg_last, sizeof(struct timespec));
241 getnstimeofday(&config->agg_last);
242
243 if (!config->agg_skb) {
244 /* Check to see if we should agg first. If the traffic is very
245 * sparse, don't aggregate. We will need to tune this later
246 */
247 diff = timespec_sub(config->agg_last, last);
248
249 if ((diff.tv_sec > 0) || (diff.tv_nsec > agg_bypass_time)) {
250 spin_unlock_irqrestore(&config->agg_lock, flags);
251 LOGL("delta t: %ld.%09lu\tcount: bypass", diff.tv_sec,
252 diff.tv_nsec);
253 rmnet_stats_agg_pkts(1);
254 trace_rmnet_map_aggregate(skb, 0);
255 rc = dev_queue_xmit(skb);
256 rmnet_stats_queue_xmit(rc,
257 RMNET_STATS_QUEUE_XMIT_AGG_SKIP);
258 return;
259 }
260
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700261 size = config->egress_agg_size - skb->len;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600262 config->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
263 if (!config->agg_skb) {
264 config->agg_skb = 0;
265 config->agg_count = 0;
266 memset(&config->agg_time, 0, sizeof(struct timespec));
267 spin_unlock_irqrestore(&config->agg_lock, flags);
268 rmnet_stats_agg_pkts(1);
269 trace_rmnet_map_aggregate(skb, 0);
270 rc = dev_queue_xmit(skb);
271 rmnet_stats_queue_xmit
272 (rc,
273 RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL);
274 return;
275 }
276 config->agg_count = 1;
277 getnstimeofday(&config->agg_time);
278 trace_rmnet_start_aggregation(skb);
279 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_CPY_EXPAND);
280 goto schedule;
281 }
282 diff = timespec_sub(config->agg_last, config->agg_time);
283
284 if (skb->len > (config->egress_agg_size - config->agg_skb->len) ||
285 (config->agg_count >= config->egress_agg_count) ||
286 (diff.tv_sec > 0) || (diff.tv_nsec > agg_time_limit)) {
287 rmnet_stats_agg_pkts(config->agg_count);
288 agg_skb = config->agg_skb;
289 agg_count = config->agg_count;
290 config->agg_skb = 0;
291 config->agg_count = 0;
292 memset(&config->agg_time, 0, sizeof(struct timespec));
293 spin_unlock_irqrestore(&config->agg_lock, flags);
294 LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec,
295 diff.tv_nsec, agg_count);
296 trace_rmnet_map_aggregate(skb, agg_count);
297 rc = dev_queue_xmit(agg_skb);
298 rmnet_stats_queue_xmit(rc,
299 RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER);
300 goto new_packet;
301 }
302
303 dest_buff = skb_put(config->agg_skb, skb->len);
304 memcpy(dest_buff, skb->data, skb->len);
305 config->agg_count++;
306 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_INTO_BUFF);
307
308schedule:
309 if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -0700310 work = kmalloc(sizeof(*work), GFP_ATOMIC);
311 if (!work) {
312 LOGE("Failed to allocate work item for packet %s",
313 "transfer. DATA PATH LIKELY BROKEN!");
314 config->agg_state = RMNET_MAP_AGG_IDLE;
315 spin_unlock_irqrestore(&config->agg_lock, flags);
316 return;
317 }
318 INIT_DELAYED_WORK((struct delayed_work *)work,
319 rmnet_map_flush_packet_queue);
320 work->config = config;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600321 config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -0700322 schedule_delayed_work((struct delayed_work *)work, 1);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600323 }
324 spin_unlock_irqrestore(&config->agg_lock, flags);
325}
326
327/* Checksum Offload */
328
329static inline u16 *rmnet_map_get_checksum_field(unsigned char protocol,
330 const void *txporthdr)
331{
332 u16 *check = 0;
333
334 switch (protocol) {
335 case IPPROTO_TCP:
336 check = &(((struct tcphdr *)txporthdr)->check);
337 break;
338
339 case IPPROTO_UDP:
340 check = &(((struct udphdr *)txporthdr)->check);
341 break;
342
343 default:
344 check = 0;
345 break;
346 }
347
348 return check;
349}
350
351static inline u16 rmnet_map_add_checksums(u16 val1, u16 val2)
352{
353 int sum = val1 + val2;
354
355 sum = (((sum & 0xFFFF0000) >> 16) + sum) & 0x0000FFFF;
356 return (u16)(sum & 0x0000FFFF);
357}
358
359static inline u16 rmnet_map_subtract_checksums(u16 val1, u16 val2)
360{
361 return rmnet_map_add_checksums(val1, ~val2);
362}
363
364/* rmnet_map_validate_ipv4_packet_checksum() - Validates TCP/UDP checksum
365 * value for IPv4 packet
366 * @map_payload: Pointer to the beginning of the map payload
367 * @cksum_trailer: Pointer to the checksum trailer
368 *
369 * Validates the TCP/UDP checksum for the packet using the checksum value
370 * from the checksum trailer added to the packet.
371 * The validation formula is the following:
372 * 1. Performs 1's complement over the checksum value from the trailer
373 * 2. Computes 1's complement checksum over IPv4 header and subtracts it from
374 * the value from step 1
375 * 3. Computes 1's complement checksum over IPv4 pseudo header and adds it to
376 * the value from step 2
377 * 4. Subtracts the checksum value from the TCP/UDP header from the value from
378 * step 3
379 * 5. Compares the value from step 4 to the checksum value from the TCP/UDP
380 * header
381 *
382 * Fragmentation and tunneling are not supported.
383 *
384 * Return: 0 is validation succeeded.
385 */
386static int rmnet_map_validate_ipv4_packet_checksum
387 (unsigned char *map_payload,
388 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer)
389{
390 struct iphdr *ip4h;
391 u16 *checksum_field;
392 void *txporthdr;
393 u16 pseudo_checksum;
394 u16 ip_hdr_checksum;
395 u16 checksum_value;
396 u16 ip_payload_checksum;
397 u16 ip_pseudo_payload_checksum;
398 u16 checksum_value_final;
399
400 ip4h = (struct iphdr *)map_payload;
401 if ((ntohs(ip4h->frag_off) & IP_MF) ||
402 ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
403 return RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET;
404
405 txporthdr = map_payload + ip4h->ihl * 4;
406
407 checksum_field = rmnet_map_get_checksum_field(ip4h->protocol,
408 txporthdr);
409
410 if (unlikely(!checksum_field))
411 return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT;
412
413 /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
414 if ((*checksum_field == 0) && (ip4h->protocol == IPPROTO_UDP))
415 return RMNET_MAP_CHECKSUM_SKIPPED;
416
417 checksum_value = ~ntohs(cksum_trailer->checksum_value);
418 ip_hdr_checksum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
419 ip_payload_checksum = rmnet_map_subtract_checksums(checksum_value,
420 ip_hdr_checksum);
421
422 pseudo_checksum = ~ntohs(csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
423 (u16)(ntohs(ip4h->tot_len) - ip4h->ihl * 4),
424 (u16)ip4h->protocol, 0));
425 ip_pseudo_payload_checksum = rmnet_map_add_checksums(
426 ip_payload_checksum, pseudo_checksum);
427
428 checksum_value_final = ~rmnet_map_subtract_checksums(
429 ip_pseudo_payload_checksum, ntohs(*checksum_field));
430
431 if (unlikely(checksum_value_final == 0)) {
432 switch (ip4h->protocol) {
433 case IPPROTO_UDP:
434 /* RFC 768 */
435 LOGD("DL4 1's complement rule for UDP checksum 0");
436 checksum_value_final = ~checksum_value_final;
437 break;
438
439 case IPPROTO_TCP:
440 if (*checksum_field == 0xFFFF) {
441 LOGD(
442 "DL4 Non-RFC compliant TCP checksum found");
443 checksum_value_final = ~checksum_value_final;
444 }
445 break;
446 }
447 }
448
449 LOGD(
450 "DL4 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X",
451 ~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field),
452 pseudo_checksum, checksum_value_final);
453
454 if (checksum_value_final == ntohs(*checksum_field))
455 return RMNET_MAP_CHECKSUM_OK;
456 else
457 return RMNET_MAP_CHECKSUM_VALIDATION_FAILED;
458}
459
460/* rmnet_map_validate_ipv6_packet_checksum() - Validates TCP/UDP checksum
461 * value for IPv6 packet
462 * @map_payload: Pointer to the beginning of the map payload
463 * @cksum_trailer: Pointer to the checksum trailer
464 *
465 * Validates the TCP/UDP checksum for the packet using the checksum value
466 * from the checksum trailer added to the packet.
467 * The validation formula is the following:
468 * 1. Performs 1's complement over the checksum value from the trailer
469 * 2. Computes 1's complement checksum over IPv6 header and subtracts it from
470 * the value from step 1
471 * 3. Computes 1's complement checksum over IPv6 pseudo header and adds it to
472 * the value from step 2
473 * 4. Subtracts the checksum value from the TCP/UDP header from the value from
474 * step 3
475 * 5. Compares the value from step 4 to the checksum value from the TCP/UDP
476 * header
477 *
478 * Fragmentation, extension headers and tunneling are not supported.
479 *
480 * Return: 0 is validation succeeded.
481 */
482static int rmnet_map_validate_ipv6_packet_checksum
483 (unsigned char *map_payload,
484 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer)
485{
486 struct ipv6hdr *ip6h;
487 u16 *checksum_field;
488 void *txporthdr;
489 u16 pseudo_checksum;
490 u16 ip_hdr_checksum;
491 u16 checksum_value;
492 u16 ip_payload_checksum;
493 u16 ip_pseudo_payload_checksum;
494 u16 checksum_value_final;
495 u32 length;
496
497 ip6h = (struct ipv6hdr *)map_payload;
498
499 txporthdr = map_payload + sizeof(struct ipv6hdr);
500 checksum_field = rmnet_map_get_checksum_field(ip6h->nexthdr,
501 txporthdr);
502
503 if (unlikely(!checksum_field))
504 return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT;
505
506 checksum_value = ~ntohs(cksum_trailer->checksum_value);
507 ip_hdr_checksum = ~ntohs(ip_compute_csum(ip6h,
508 (int)(txporthdr - (void *)map_payload)));
509 ip_payload_checksum = rmnet_map_subtract_checksums
510 (checksum_value, ip_hdr_checksum);
511
512 length = (ip6h->nexthdr == IPPROTO_UDP) ?
513 ntohs(((struct udphdr *)txporthdr)->len) :
514 ntohs(ip6h->payload_len);
515 pseudo_checksum = ~ntohs(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
516 length, ip6h->nexthdr, 0));
517 ip_pseudo_payload_checksum = rmnet_map_add_checksums(
518 ip_payload_checksum, pseudo_checksum);
519
520 checksum_value_final = ~rmnet_map_subtract_checksums(
521 ip_pseudo_payload_checksum, ntohs(*checksum_field));
522
523 if (unlikely(checksum_value_final == 0)) {
524 switch (ip6h->nexthdr) {
525 case IPPROTO_UDP:
526 /* RFC 2460 section 8.1 */
527 LOGD("DL6 One's complement rule for UDP checksum 0");
528 checksum_value_final = ~checksum_value_final;
529 break;
530
531 case IPPROTO_TCP:
532 if (*checksum_field == 0xFFFF) {
533 LOGD(
534 "DL6 Non-RFC compliant TCP checksum found");
535 checksum_value_final = ~checksum_value_final;
536 }
537 break;
538 }
539 }
540
541 LOGD(
542 "DL6 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X",
543 ~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field),
544 pseudo_checksum, checksum_value_final);
545
546 if (checksum_value_final == ntohs(*checksum_field))
547 return RMNET_MAP_CHECKSUM_OK;
548 else
549 return RMNET_MAP_CHECKSUM_VALIDATION_FAILED;
550 }
551
552/* rmnet_map_checksum_downlink_packet() - Validates checksum on
553 * a downlink packet
554 * @skb: Pointer to the packet's skb.
555 *
556 * Validates packet checksums. Function takes a pointer to
557 * the beginning of a buffer which contains the entire MAP
558 * frame: MAP header + IP payload + padding + checksum trailer.
559 * Currently, only IPv4 and IPv6 are supported along with
560 * TCP & UDP. Fragmented or tunneled packets are not supported.
561 *
562 * Return:
563 * - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded.
564 * - RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER: Skb buffer given is corrupted.
565 * - RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET: Valid flag is not set in the
566 * checksum trailer.
567 * - RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET: The packet is a fragment.
568 * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT: The transport header is
569 * not TCP/UDP.
570 * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header.
571 * - RMNET_MAP_CHECKSUM_VALIDATION_FAILED: In case the validation failed.
572 */
573int rmnet_map_checksum_downlink_packet(struct sk_buff *skb)
574{
575 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer;
576 unsigned int data_len;
577 unsigned char *map_payload;
578 unsigned char ip_version;
579
580 data_len = RMNET_MAP_GET_LENGTH(skb);
581
582 if (unlikely(skb->len < (sizeof(struct rmnet_map_header_s) + data_len +
583 sizeof(struct rmnet_map_dl_checksum_trailer_s))))
584 return RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER;
585
586 cksum_trailer = (struct rmnet_map_dl_checksum_trailer_s *)
587 (skb->data + data_len
588 + sizeof(struct rmnet_map_header_s));
589
590 if (unlikely(!ntohs(cksum_trailer->valid)))
591 return RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET;
592
593 map_payload = (unsigned char *)(skb->data
594 + sizeof(struct rmnet_map_header_s));
595
596 ip_version = (*map_payload & 0xF0) >> 4;
597 if (ip_version == 0x04)
598 return rmnet_map_validate_ipv4_packet_checksum(map_payload,
599 cksum_trailer);
600 else if (ip_version == 0x06)
601 return rmnet_map_validate_ipv6_packet_checksum(map_payload,
602 cksum_trailer);
603
604 return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
605}
606
607static void rmnet_map_fill_ipv4_packet_ul_checksum_header
608 (void *iphdr, struct rmnet_map_ul_checksum_header_s *ul_header,
609 struct sk_buff *skb)
610{
611 struct iphdr *ip4h = (struct iphdr *)iphdr;
612 unsigned short *hdr = (unsigned short *)ul_header;
613
614 ul_header->checksum_start_offset = htons((unsigned short)
615 (skb_transport_header(skb) - (unsigned char *)iphdr));
616 ul_header->checksum_insert_offset = skb->csum_offset;
617 ul_header->cks_en = 1;
618 if (ip4h->protocol == IPPROTO_UDP)
619 ul_header->udp_ip4_ind = 1;
620 else
621 ul_header->udp_ip4_ind = 0;
622 /* Changing checksum_insert_offset to network order */
623 hdr++;
624 *hdr = htons(*hdr);
625 skb->ip_summed = CHECKSUM_NONE;
626}
627
628static void rmnet_map_fill_ipv6_packet_ul_checksum_header
629 (void *iphdr, struct rmnet_map_ul_checksum_header_s *ul_header,
630 struct sk_buff *skb)
631{
632 unsigned short *hdr = (unsigned short *)ul_header;
633
634 ul_header->checksum_start_offset = htons((unsigned short)
635 (skb_transport_header(skb) - (unsigned char *)iphdr));
636 ul_header->checksum_insert_offset = skb->csum_offset;
637 ul_header->cks_en = 1;
638 ul_header->udp_ip4_ind = 0;
639 /* Changing checksum_insert_offset to network order */
640 hdr++;
641 *hdr = htons(*hdr);
642 skb->ip_summed = CHECKSUM_NONE;
643}
644
645static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
646{
647 struct iphdr *ip4h = (struct iphdr *)iphdr;
648 void *txporthdr;
649 u16 *csum;
650
651 txporthdr = iphdr + ip4h->ihl * 4;
652
653 if ((ip4h->protocol == IPPROTO_TCP) ||
654 (ip4h->protocol == IPPROTO_UDP)) {
655 csum = (u16 *)rmnet_map_get_checksum_field(ip4h->protocol,
656 txporthdr);
657 *csum = ~(*csum);
658 }
659}
660
661static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
662{
663 struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
664 void *txporthdr;
665 u16 *csum;
666
667 txporthdr = ip6hdr + sizeof(struct ipv6hdr);
668
669 if ((ip6h->nexthdr == IPPROTO_TCP) || (ip6h->nexthdr == IPPROTO_UDP)) {
670 csum = (u16 *)rmnet_map_get_checksum_field(ip6h->nexthdr,
671 txporthdr);
672 *csum = ~(*csum);
673 }
674}
675
676/* rmnet_map_checksum_uplink_packet() - Generates UL checksum
677 * meta info header
678 * @skb: Pointer to the packet's skb.
679 *
680 * Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
681 * packets that are supported for UL checksum offload.
682 *
683 * Return:
684 * - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded.
685 * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header.
686 * - RMNET_MAP_CHECKSUM_SW: Unsupported packet for UL checksum offload.
687 */
688int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
689 struct net_device *orig_dev,
690 u32 egress_data_format)
691{
692 unsigned char ip_version;
693 struct rmnet_map_ul_checksum_header_s *ul_header;
694 void *iphdr;
695 int ret;
696
697 ul_header = (struct rmnet_map_ul_checksum_header_s *)
698 skb_push(skb, sizeof(struct rmnet_map_ul_checksum_header_s));
699
700 if (unlikely(!(orig_dev->features &
701 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) {
702 ret = RMNET_MAP_CHECKSUM_SW;
703 goto sw_checksum;
704 }
705
706 if (skb->ip_summed == CHECKSUM_PARTIAL) {
707 iphdr = (char *)ul_header +
708 sizeof(struct rmnet_map_ul_checksum_header_s);
709 ip_version = (*(char *)iphdr & 0xF0) >> 4;
710 if (ip_version == 0x04) {
711 rmnet_map_fill_ipv4_packet_ul_checksum_header
712 (iphdr, ul_header, skb);
713 if (egress_data_format &
714 RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
715 rmnet_map_complement_ipv4_txporthdr_csum_field(
716 iphdr);
717 ret = RMNET_MAP_CHECKSUM_OK;
718 goto done;
719 } else if (ip_version == 0x06) {
720 rmnet_map_fill_ipv6_packet_ul_checksum_header
721 (iphdr, ul_header, skb);
722 if (egress_data_format &
723 RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
724 rmnet_map_complement_ipv6_txporthdr_csum_field(
725 iphdr);
726 ret = RMNET_MAP_CHECKSUM_OK;
727 goto done;
728 } else {
729 ret = RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
730 goto sw_checksum;
731 }
732 } else {
733 ret = RMNET_MAP_CHECKSUM_SW;
734 goto sw_checksum;
735 }
736
737sw_checksum:
738 ul_header->checksum_start_offset = 0;
739 ul_header->checksum_insert_offset = 0;
740 ul_header->cks_en = 0;
741 ul_header->udp_ip4_ind = 0;
742done:
743 return ret;
744}
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700745
746int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset)
747{
748 unsigned char *packet_start = skb->data + offset;
749 int is_icmp = 0;
750
751 if ((skb->data[offset]) >> 4 == 0x04) {
752 struct iphdr *ip4h = (struct iphdr *)(packet_start);
753
754 if (ip4h->protocol == IPPROTO_ICMP)
755 is_icmp = 1;
Subash Abhinov Kasiviswanathanb7500cc2017-11-09 17:32:52 -0700756 } else if ((skb->data[offset]) >> 4 == 0x06) {
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700757 struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
758
Subash Abhinov Kasiviswanathanb7500cc2017-11-09 17:32:52 -0700759 if (ip6h->nexthdr == IPPROTO_ICMPV6) {
760 is_icmp = 1;
761 } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700762 struct frag_hdr *frag;
763
764 frag = (struct frag_hdr *)(packet_start
765 + sizeof(struct ipv6hdr));
766 if (frag->nexthdr == IPPROTO_ICMPV6)
767 is_icmp = 1;
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700768 }
769 }
770
771 return is_icmp;
772}