blob: d7e420b2e63afc8d39a2da1ccf0cf00e41b15705 [file] [log] [blame]
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -06001/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * RMNET Data MAP protocol
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/skbuff.h>
18#include <linux/netdevice.h>
19#include <linux/rmnet_data.h>
20#include <linux/spinlock.h>
21#include <linux/workqueue.h>
22#include <linux/time.h>
23#include <linux/net_map.h>
24#include <linux/ip.h>
25#include <linux/ipv6.h>
26#include <linux/udp.h>
27#include <linux/tcp.h>
28#include <linux/in.h>
29#include <net/ip.h>
30#include <net/checksum.h>
31#include <net/ip6_checksum.h>
32#include <net/rmnet_config.h>
33#include "rmnet_data_config.h"
34#include "rmnet_map.h"
35#include "rmnet_data_private.h"
36#include "rmnet_data_stats.h"
37#include "rmnet_data_trace.h"
38
39RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_MAPD);
40
41/* Local Definitions */
42
43long agg_time_limit __read_mostly = 1000000L;
44module_param(agg_time_limit, long, 0644);
45MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf");
46
47long agg_bypass_time __read_mostly = 10000000L;
48module_param(agg_bypass_time, long, 0644);
49MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
50
51struct agg_work {
52 struct delayed_work work;
53 struct rmnet_phys_ep_config *config;
54};
55
56#define RMNET_MAP_DEAGGR_SPACING 64
57#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
58
59/* rmnet_map_add_map_header() - Adds MAP header to front of skb->data
60 * @skb: Socket buffer ("packet") to modify
61 * @hdrlen: Number of bytes of header data which should not be included in
62 * MAP length field
63 * @pad: Specify if padding the MAP packet to make it 4 byte aligned is
64 * necessary
65 *
66 * Padding is calculated and set appropriately in MAP header. Mux ID is
67 * initialized to 0.
68 *
69 * Return:
70 * - Pointer to MAP structure
71 * - 0 (null) if insufficient headroom
72 * - 0 (null) if insufficient tailroom for padding bytes
73 */
74struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb,
75 int hdrlen, int pad)
76{
77 u32 padding, map_datalen;
78 u8 *padbytes;
79 struct rmnet_map_header_s *map_header;
80
81 if (skb_headroom(skb) < sizeof(struct rmnet_map_header_s))
82 return 0;
83
84 map_datalen = skb->len - hdrlen;
85 map_header = (struct rmnet_map_header_s *)
86 skb_push(skb, sizeof(struct rmnet_map_header_s));
87 memset(map_header, 0, sizeof(struct rmnet_map_header_s));
88
89 if (pad == RMNET_MAP_NO_PAD_BYTES) {
90 map_header->pkt_len = htons(map_datalen);
91 return map_header;
92 }
93
94 padding = ALIGN(map_datalen, 4) - map_datalen;
95
96 if (padding == 0)
97 goto done;
98
99 if (skb_tailroom(skb) < padding)
100 return 0;
101
102 padbytes = (u8 *)skb_put(skb, padding);
103 LOGD("pad: %d", padding);
104 memset(padbytes, 0, padding);
105
106done:
107 map_header->pkt_len = htons(map_datalen + padding);
108 map_header->pad_len = padding & 0x3F;
109
110 return map_header;
111}
112
113/* rmnet_map_deaggregate() - Deaggregates a single packet
114 * @skb: Source socket buffer containing multiple MAP frames
115 * @config: Physical endpoint configuration of the ingress device
116 *
117 * A whole new buffer is allocated for each portion of an aggregated frame.
118 * Caller should keep calling deaggregate() on the source skb until 0 is
119 * returned, indicating that there are no more packets to deaggregate. Caller
120 * is responsible for freeing the original skb.
121 *
122 * Return:
123 * - Pointer to new skb
124 * - 0 (null) if no more aggregated packets
125 */
126struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
127 struct rmnet_phys_ep_config *config)
128{
129 struct sk_buff *skbn;
130 struct rmnet_map_header_s *maph;
131 u32 packet_len;
132
133 if (skb->len == 0)
134 return 0;
135
136 maph = (struct rmnet_map_header_s *)skb->data;
137 packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header_s);
138
139 if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
140 (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4))
141 packet_len += sizeof(struct rmnet_map_dl_checksum_trailer_s);
142
143 if ((((int)skb->len) - ((int)packet_len)) < 0) {
144 LOGM("%s", "Got malformed packet. Dropping");
145 return 0;
146 }
147
148 skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
149 if (!skbn)
150 return 0;
151
152 skbn->dev = skb->dev;
153 skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
154 skb_put(skbn, packet_len);
155 memcpy(skbn->data, skb->data, packet_len);
156 skb_pull(skb, packet_len);
157
158 /* Some hardware can send us empty frames. Catch them */
159 if (ntohs(maph->pkt_len) == 0) {
160 LOGD("Dropping empty MAP frame");
161 rmnet_kfree_skb(skbn, RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0);
162 return 0;
163 }
164
165 return skbn;
166}
167
168/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
169 * @work: struct agg_work containing delayed work and skb to flush
170 *
171 * This function is scheduled to run in a specified number of jiffies after
172 * the last frame transmitted by the network stack. When run, the buffer
173 * containing aggregated packets is finally transmitted on the underlying link.
174 *
175 */
176static void rmnet_map_flush_packet_queue(struct work_struct *work)
177{
178 struct agg_work *real_work;
179 struct rmnet_phys_ep_config *config;
180 unsigned long flags;
181 struct sk_buff *skb;
182 int rc, agg_count = 0;
183
184 skb = 0;
185 real_work = (struct agg_work *)work;
186 config = real_work->config;
187 LOGD("%s", "Entering flush thread");
188 spin_lock_irqsave(&config->agg_lock, flags);
189 if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
190 /* Buffer may have already been shipped out */
191 if (likely(config->agg_skb)) {
192 rmnet_stats_agg_pkts(config->agg_count);
193 if (config->agg_count > 1)
194 LOGL("Agg count: %d", config->agg_count);
195 skb = config->agg_skb;
196 agg_count = config->agg_count;
197 config->agg_skb = 0;
198 config->agg_count = 0;
199 memset(&config->agg_time, 0, sizeof(struct timespec));
200 }
201 config->agg_state = RMNET_MAP_AGG_IDLE;
202 } else {
203 /* How did we get here? */
204 LOGE("Ran queued command when state %s",
205 "is idle. State machine likely broken");
206 }
207
208 spin_unlock_irqrestore(&config->agg_lock, flags);
209 if (skb) {
210 trace_rmnet_map_flush_packet_queue(skb, agg_count);
211 rc = dev_queue_xmit(skb);
212 rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
213 }
214 kfree(work);
215}
216
217/* rmnet_map_aggregate() - Software aggregates multiple packets.
218 * @skb: current packet being transmitted
219 * @config: Physical endpoint configuration of the ingress device
220 *
221 * Aggregates multiple SKBs into a single large SKB for transmission. MAP
222 * protocol is used to separate the packets in the buffer. This function
223 * consumes the argument SKB and should not be further processed by any other
224 * function.
225 */
226void rmnet_map_aggregate(struct sk_buff *skb,
227 struct rmnet_phys_ep_config *config) {
228 u8 *dest_buff;
229 struct agg_work *work;
230 unsigned long flags;
231 struct sk_buff *agg_skb;
232 struct timespec diff, last;
233 int size, rc, agg_count = 0;
234
235 if (!skb || !config)
236 return;
237 size = config->egress_agg_size - skb->len;
238
239 if (size < 2000) {
240 LOGL("Invalid length %d", size);
241 return;
242 }
243
244new_packet:
245 spin_lock_irqsave(&config->agg_lock, flags);
246
247 memcpy(&last, &config->agg_last, sizeof(struct timespec));
248 getnstimeofday(&config->agg_last);
249
250 if (!config->agg_skb) {
251 /* Check to see if we should agg first. If the traffic is very
252 * sparse, don't aggregate. We will need to tune this later
253 */
254 diff = timespec_sub(config->agg_last, last);
255
256 if ((diff.tv_sec > 0) || (diff.tv_nsec > agg_bypass_time)) {
257 spin_unlock_irqrestore(&config->agg_lock, flags);
258 LOGL("delta t: %ld.%09lu\tcount: bypass", diff.tv_sec,
259 diff.tv_nsec);
260 rmnet_stats_agg_pkts(1);
261 trace_rmnet_map_aggregate(skb, 0);
262 rc = dev_queue_xmit(skb);
263 rmnet_stats_queue_xmit(rc,
264 RMNET_STATS_QUEUE_XMIT_AGG_SKIP);
265 return;
266 }
267
268 config->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
269 if (!config->agg_skb) {
270 config->agg_skb = 0;
271 config->agg_count = 0;
272 memset(&config->agg_time, 0, sizeof(struct timespec));
273 spin_unlock_irqrestore(&config->agg_lock, flags);
274 rmnet_stats_agg_pkts(1);
275 trace_rmnet_map_aggregate(skb, 0);
276 rc = dev_queue_xmit(skb);
277 rmnet_stats_queue_xmit
278 (rc,
279 RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL);
280 return;
281 }
282 config->agg_count = 1;
283 getnstimeofday(&config->agg_time);
284 trace_rmnet_start_aggregation(skb);
285 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_CPY_EXPAND);
286 goto schedule;
287 }
288 diff = timespec_sub(config->agg_last, config->agg_time);
289
290 if (skb->len > (config->egress_agg_size - config->agg_skb->len) ||
291 (config->agg_count >= config->egress_agg_count) ||
292 (diff.tv_sec > 0) || (diff.tv_nsec > agg_time_limit)) {
293 rmnet_stats_agg_pkts(config->agg_count);
294 agg_skb = config->agg_skb;
295 agg_count = config->agg_count;
296 config->agg_skb = 0;
297 config->agg_count = 0;
298 memset(&config->agg_time, 0, sizeof(struct timespec));
299 spin_unlock_irqrestore(&config->agg_lock, flags);
300 LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec,
301 diff.tv_nsec, agg_count);
302 trace_rmnet_map_aggregate(skb, agg_count);
303 rc = dev_queue_xmit(agg_skb);
304 rmnet_stats_queue_xmit(rc,
305 RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER);
306 goto new_packet;
307 }
308
309 dest_buff = skb_put(config->agg_skb, skb->len);
310 memcpy(dest_buff, skb->data, skb->len);
311 config->agg_count++;
312 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_INTO_BUFF);
313
314schedule:
315 if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
316 work = kmalloc(sizeof(*work), GFP_ATOMIC);
317 if (!work) {
318 LOGE("Failed to allocate work item for packet %s",
319 "transfer. DATA PATH LIKELY BROKEN!");
320 config->agg_state = RMNET_MAP_AGG_IDLE;
321 spin_unlock_irqrestore(&config->agg_lock, flags);
322 return;
323 }
324 INIT_DELAYED_WORK((struct delayed_work *)work,
325 rmnet_map_flush_packet_queue);
326 work->config = config;
327 config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
328 schedule_delayed_work((struct delayed_work *)work, 1);
329 }
330 spin_unlock_irqrestore(&config->agg_lock, flags);
331}
332
333/* Checksum Offload */
334
335static inline u16 *rmnet_map_get_checksum_field(unsigned char protocol,
336 const void *txporthdr)
337{
338 u16 *check = 0;
339
340 switch (protocol) {
341 case IPPROTO_TCP:
342 check = &(((struct tcphdr *)txporthdr)->check);
343 break;
344
345 case IPPROTO_UDP:
346 check = &(((struct udphdr *)txporthdr)->check);
347 break;
348
349 default:
350 check = 0;
351 break;
352 }
353
354 return check;
355}
356
357static inline u16 rmnet_map_add_checksums(u16 val1, u16 val2)
358{
359 int sum = val1 + val2;
360
361 sum = (((sum & 0xFFFF0000) >> 16) + sum) & 0x0000FFFF;
362 return (u16)(sum & 0x0000FFFF);
363}
364
365static inline u16 rmnet_map_subtract_checksums(u16 val1, u16 val2)
366{
367 return rmnet_map_add_checksums(val1, ~val2);
368}
369
370/* rmnet_map_validate_ipv4_packet_checksum() - Validates TCP/UDP checksum
371 * value for IPv4 packet
372 * @map_payload: Pointer to the beginning of the map payload
373 * @cksum_trailer: Pointer to the checksum trailer
374 *
375 * Validates the TCP/UDP checksum for the packet using the checksum value
376 * from the checksum trailer added to the packet.
377 * The validation formula is the following:
378 * 1. Performs 1's complement over the checksum value from the trailer
379 * 2. Computes 1's complement checksum over IPv4 header and subtracts it from
380 * the value from step 1
381 * 3. Computes 1's complement checksum over IPv4 pseudo header and adds it to
382 * the value from step 2
383 * 4. Subtracts the checksum value from the TCP/UDP header from the value from
384 * step 3
385 * 5. Compares the value from step 4 to the checksum value from the TCP/UDP
386 * header
387 *
388 * Fragmentation and tunneling are not supported.
389 *
390 * Return: 0 is validation succeeded.
391 */
392static int rmnet_map_validate_ipv4_packet_checksum
393 (unsigned char *map_payload,
394 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer)
395{
396 struct iphdr *ip4h;
397 u16 *checksum_field;
398 void *txporthdr;
399 u16 pseudo_checksum;
400 u16 ip_hdr_checksum;
401 u16 checksum_value;
402 u16 ip_payload_checksum;
403 u16 ip_pseudo_payload_checksum;
404 u16 checksum_value_final;
405
406 ip4h = (struct iphdr *)map_payload;
407 if ((ntohs(ip4h->frag_off) & IP_MF) ||
408 ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
409 return RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET;
410
411 txporthdr = map_payload + ip4h->ihl * 4;
412
413 checksum_field = rmnet_map_get_checksum_field(ip4h->protocol,
414 txporthdr);
415
416 if (unlikely(!checksum_field))
417 return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT;
418
419 /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
420 if ((*checksum_field == 0) && (ip4h->protocol == IPPROTO_UDP))
421 return RMNET_MAP_CHECKSUM_SKIPPED;
422
423 checksum_value = ~ntohs(cksum_trailer->checksum_value);
424 ip_hdr_checksum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
425 ip_payload_checksum = rmnet_map_subtract_checksums(checksum_value,
426 ip_hdr_checksum);
427
428 pseudo_checksum = ~ntohs(csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
429 (u16)(ntohs(ip4h->tot_len) - ip4h->ihl * 4),
430 (u16)ip4h->protocol, 0));
431 ip_pseudo_payload_checksum = rmnet_map_add_checksums(
432 ip_payload_checksum, pseudo_checksum);
433
434 checksum_value_final = ~rmnet_map_subtract_checksums(
435 ip_pseudo_payload_checksum, ntohs(*checksum_field));
436
437 if (unlikely(checksum_value_final == 0)) {
438 switch (ip4h->protocol) {
439 case IPPROTO_UDP:
440 /* RFC 768 */
441 LOGD("DL4 1's complement rule for UDP checksum 0");
442 checksum_value_final = ~checksum_value_final;
443 break;
444
445 case IPPROTO_TCP:
446 if (*checksum_field == 0xFFFF) {
447 LOGD(
448 "DL4 Non-RFC compliant TCP checksum found");
449 checksum_value_final = ~checksum_value_final;
450 }
451 break;
452 }
453 }
454
455 LOGD(
456 "DL4 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X",
457 ~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field),
458 pseudo_checksum, checksum_value_final);
459
460 if (checksum_value_final == ntohs(*checksum_field))
461 return RMNET_MAP_CHECKSUM_OK;
462 else
463 return RMNET_MAP_CHECKSUM_VALIDATION_FAILED;
464}
465
466/* rmnet_map_validate_ipv6_packet_checksum() - Validates TCP/UDP checksum
467 * value for IPv6 packet
468 * @map_payload: Pointer to the beginning of the map payload
469 * @cksum_trailer: Pointer to the checksum trailer
470 *
471 * Validates the TCP/UDP checksum for the packet using the checksum value
472 * from the checksum trailer added to the packet.
473 * The validation formula is the following:
474 * 1. Performs 1's complement over the checksum value from the trailer
475 * 2. Computes 1's complement checksum over IPv6 header and subtracts it from
476 * the value from step 1
477 * 3. Computes 1's complement checksum over IPv6 pseudo header and adds it to
478 * the value from step 2
479 * 4. Subtracts the checksum value from the TCP/UDP header from the value from
480 * step 3
481 * 5. Compares the value from step 4 to the checksum value from the TCP/UDP
482 * header
483 *
484 * Fragmentation, extension headers and tunneling are not supported.
485 *
486 * Return: 0 is validation succeeded.
487 */
488static int rmnet_map_validate_ipv6_packet_checksum
489 (unsigned char *map_payload,
490 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer)
491{
492 struct ipv6hdr *ip6h;
493 u16 *checksum_field;
494 void *txporthdr;
495 u16 pseudo_checksum;
496 u16 ip_hdr_checksum;
497 u16 checksum_value;
498 u16 ip_payload_checksum;
499 u16 ip_pseudo_payload_checksum;
500 u16 checksum_value_final;
501 u32 length;
502
503 ip6h = (struct ipv6hdr *)map_payload;
504
505 txporthdr = map_payload + sizeof(struct ipv6hdr);
506 checksum_field = rmnet_map_get_checksum_field(ip6h->nexthdr,
507 txporthdr);
508
509 if (unlikely(!checksum_field))
510 return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT;
511
512 checksum_value = ~ntohs(cksum_trailer->checksum_value);
513 ip_hdr_checksum = ~ntohs(ip_compute_csum(ip6h,
514 (int)(txporthdr - (void *)map_payload)));
515 ip_payload_checksum = rmnet_map_subtract_checksums
516 (checksum_value, ip_hdr_checksum);
517
518 length = (ip6h->nexthdr == IPPROTO_UDP) ?
519 ntohs(((struct udphdr *)txporthdr)->len) :
520 ntohs(ip6h->payload_len);
521 pseudo_checksum = ~ntohs(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
522 length, ip6h->nexthdr, 0));
523 ip_pseudo_payload_checksum = rmnet_map_add_checksums(
524 ip_payload_checksum, pseudo_checksum);
525
526 checksum_value_final = ~rmnet_map_subtract_checksums(
527 ip_pseudo_payload_checksum, ntohs(*checksum_field));
528
529 if (unlikely(checksum_value_final == 0)) {
530 switch (ip6h->nexthdr) {
531 case IPPROTO_UDP:
532 /* RFC 2460 section 8.1 */
533 LOGD("DL6 One's complement rule for UDP checksum 0");
534 checksum_value_final = ~checksum_value_final;
535 break;
536
537 case IPPROTO_TCP:
538 if (*checksum_field == 0xFFFF) {
539 LOGD(
540 "DL6 Non-RFC compliant TCP checksum found");
541 checksum_value_final = ~checksum_value_final;
542 }
543 break;
544 }
545 }
546
547 LOGD(
548 "DL6 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X",
549 ~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field),
550 pseudo_checksum, checksum_value_final);
551
552 if (checksum_value_final == ntohs(*checksum_field))
553 return RMNET_MAP_CHECKSUM_OK;
554 else
555 return RMNET_MAP_CHECKSUM_VALIDATION_FAILED;
556 }
557
558/* rmnet_map_checksum_downlink_packet() - Validates checksum on
559 * a downlink packet
560 * @skb: Pointer to the packet's skb.
561 *
562 * Validates packet checksums. Function takes a pointer to
563 * the beginning of a buffer which contains the entire MAP
564 * frame: MAP header + IP payload + padding + checksum trailer.
565 * Currently, only IPv4 and IPv6 are supported along with
566 * TCP & UDP. Fragmented or tunneled packets are not supported.
567 *
568 * Return:
569 * - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded.
570 * - RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER: Skb buffer given is corrupted.
571 * - RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET: Valid flag is not set in the
572 * checksum trailer.
573 * - RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET: The packet is a fragment.
574 * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT: The transport header is
575 * not TCP/UDP.
576 * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header.
577 * - RMNET_MAP_CHECKSUM_VALIDATION_FAILED: In case the validation failed.
578 */
579int rmnet_map_checksum_downlink_packet(struct sk_buff *skb)
580{
581 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer;
582 unsigned int data_len;
583 unsigned char *map_payload;
584 unsigned char ip_version;
585
586 data_len = RMNET_MAP_GET_LENGTH(skb);
587
588 if (unlikely(skb->len < (sizeof(struct rmnet_map_header_s) + data_len +
589 sizeof(struct rmnet_map_dl_checksum_trailer_s))))
590 return RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER;
591
592 cksum_trailer = (struct rmnet_map_dl_checksum_trailer_s *)
593 (skb->data + data_len
594 + sizeof(struct rmnet_map_header_s));
595
596 if (unlikely(!ntohs(cksum_trailer->valid)))
597 return RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET;
598
599 map_payload = (unsigned char *)(skb->data
600 + sizeof(struct rmnet_map_header_s));
601
602 ip_version = (*map_payload & 0xF0) >> 4;
603 if (ip_version == 0x04)
604 return rmnet_map_validate_ipv4_packet_checksum(map_payload,
605 cksum_trailer);
606 else if (ip_version == 0x06)
607 return rmnet_map_validate_ipv6_packet_checksum(map_payload,
608 cksum_trailer);
609
610 return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
611}
612
613static void rmnet_map_fill_ipv4_packet_ul_checksum_header
614 (void *iphdr, struct rmnet_map_ul_checksum_header_s *ul_header,
615 struct sk_buff *skb)
616{
617 struct iphdr *ip4h = (struct iphdr *)iphdr;
618 unsigned short *hdr = (unsigned short *)ul_header;
619
620 ul_header->checksum_start_offset = htons((unsigned short)
621 (skb_transport_header(skb) - (unsigned char *)iphdr));
622 ul_header->checksum_insert_offset = skb->csum_offset;
623 ul_header->cks_en = 1;
624 if (ip4h->protocol == IPPROTO_UDP)
625 ul_header->udp_ip4_ind = 1;
626 else
627 ul_header->udp_ip4_ind = 0;
628 /* Changing checksum_insert_offset to network order */
629 hdr++;
630 *hdr = htons(*hdr);
631 skb->ip_summed = CHECKSUM_NONE;
632}
633
634static void rmnet_map_fill_ipv6_packet_ul_checksum_header
635 (void *iphdr, struct rmnet_map_ul_checksum_header_s *ul_header,
636 struct sk_buff *skb)
637{
638 unsigned short *hdr = (unsigned short *)ul_header;
639
640 ul_header->checksum_start_offset = htons((unsigned short)
641 (skb_transport_header(skb) - (unsigned char *)iphdr));
642 ul_header->checksum_insert_offset = skb->csum_offset;
643 ul_header->cks_en = 1;
644 ul_header->udp_ip4_ind = 0;
645 /* Changing checksum_insert_offset to network order */
646 hdr++;
647 *hdr = htons(*hdr);
648 skb->ip_summed = CHECKSUM_NONE;
649}
650
651static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
652{
653 struct iphdr *ip4h = (struct iphdr *)iphdr;
654 void *txporthdr;
655 u16 *csum;
656
657 txporthdr = iphdr + ip4h->ihl * 4;
658
659 if ((ip4h->protocol == IPPROTO_TCP) ||
660 (ip4h->protocol == IPPROTO_UDP)) {
661 csum = (u16 *)rmnet_map_get_checksum_field(ip4h->protocol,
662 txporthdr);
663 *csum = ~(*csum);
664 }
665}
666
667static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
668{
669 struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
670 void *txporthdr;
671 u16 *csum;
672
673 txporthdr = ip6hdr + sizeof(struct ipv6hdr);
674
675 if ((ip6h->nexthdr == IPPROTO_TCP) || (ip6h->nexthdr == IPPROTO_UDP)) {
676 csum = (u16 *)rmnet_map_get_checksum_field(ip6h->nexthdr,
677 txporthdr);
678 *csum = ~(*csum);
679 }
680}
681
682/* rmnet_map_checksum_uplink_packet() - Generates UL checksum
683 * meta info header
684 * @skb: Pointer to the packet's skb.
685 *
686 * Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
687 * packets that are supported for UL checksum offload.
688 *
689 * Return:
690 * - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded.
691 * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header.
692 * - RMNET_MAP_CHECKSUM_SW: Unsupported packet for UL checksum offload.
693 */
694int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
695 struct net_device *orig_dev,
696 u32 egress_data_format)
697{
698 unsigned char ip_version;
699 struct rmnet_map_ul_checksum_header_s *ul_header;
700 void *iphdr;
701 int ret;
702
703 ul_header = (struct rmnet_map_ul_checksum_header_s *)
704 skb_push(skb, sizeof(struct rmnet_map_ul_checksum_header_s));
705
706 if (unlikely(!(orig_dev->features &
707 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) {
708 ret = RMNET_MAP_CHECKSUM_SW;
709 goto sw_checksum;
710 }
711
712 if (skb->ip_summed == CHECKSUM_PARTIAL) {
713 iphdr = (char *)ul_header +
714 sizeof(struct rmnet_map_ul_checksum_header_s);
715 ip_version = (*(char *)iphdr & 0xF0) >> 4;
716 if (ip_version == 0x04) {
717 rmnet_map_fill_ipv4_packet_ul_checksum_header
718 (iphdr, ul_header, skb);
719 if (egress_data_format &
720 RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
721 rmnet_map_complement_ipv4_txporthdr_csum_field(
722 iphdr);
723 ret = RMNET_MAP_CHECKSUM_OK;
724 goto done;
725 } else if (ip_version == 0x06) {
726 rmnet_map_fill_ipv6_packet_ul_checksum_header
727 (iphdr, ul_header, skb);
728 if (egress_data_format &
729 RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
730 rmnet_map_complement_ipv6_txporthdr_csum_field(
731 iphdr);
732 ret = RMNET_MAP_CHECKSUM_OK;
733 goto done;
734 } else {
735 ret = RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
736 goto sw_checksum;
737 }
738 } else {
739 ret = RMNET_MAP_CHECKSUM_SW;
740 goto sw_checksum;
741 }
742
743sw_checksum:
744 ul_header->checksum_start_offset = 0;
745 ul_header->checksum_insert_offset = 0;
746 ul_header->cks_en = 0;
747 ul_header->udp_ip4_ind = 0;
748done:
749 return ret;
750}