blob: 669a890b75efa6f5c5d8c3d3f47395e0b1536cb2 [file] [log] [blame]
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -07001/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * RMNET Data MAP protocol
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/skbuff.h>
18#include <linux/netdevice.h>
19#include <linux/rmnet_data.h>
20#include <linux/spinlock.h>
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -070021#include <linux/workqueue.h>
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -060022#include <linux/time.h>
23#include <linux/net_map.h>
24#include <linux/ip.h>
25#include <linux/ipv6.h>
26#include <linux/udp.h>
27#include <linux/tcp.h>
28#include <linux/in.h>
29#include <net/ip.h>
30#include <net/checksum.h>
31#include <net/ip6_checksum.h>
32#include <net/rmnet_config.h>
33#include "rmnet_data_config.h"
34#include "rmnet_map.h"
35#include "rmnet_data_private.h"
36#include "rmnet_data_stats.h"
37#include "rmnet_data_trace.h"
38
39RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_MAPD);
40
41/* Local Definitions */
42
43long agg_time_limit __read_mostly = 1000000L;
44module_param(agg_time_limit, long, 0644);
45MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf");
46
47long agg_bypass_time __read_mostly = 10000000L;
48module_param(agg_bypass_time, long, 0644);
49MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
50
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -070051struct agg_work {
Subash Abhinov Kasiviswanathan08fed022017-11-17 17:54:49 -070052 struct work_struct work;
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -070053 struct rmnet_phys_ep_config *config;
54};
55
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -060056#define RMNET_MAP_DEAGGR_SPACING 64
57#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
58
59/* rmnet_map_add_map_header() - Adds MAP header to front of skb->data
60 * @skb: Socket buffer ("packet") to modify
61 * @hdrlen: Number of bytes of header data which should not be included in
62 * MAP length field
63 * @pad: Specify if padding the MAP packet to make it 4 byte aligned is
64 * necessary
65 *
66 * Padding is calculated and set appropriately in MAP header. Mux ID is
67 * initialized to 0.
68 *
69 * Return:
70 * - Pointer to MAP structure
71 * - 0 (null) if insufficient headroom
72 * - 0 (null) if insufficient tailroom for padding bytes
73 */
74struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb,
75 int hdrlen, int pad)
76{
77 u32 padding, map_datalen;
78 u8 *padbytes;
79 struct rmnet_map_header_s *map_header;
80
81 if (skb_headroom(skb) < sizeof(struct rmnet_map_header_s))
82 return 0;
83
84 map_datalen = skb->len - hdrlen;
85 map_header = (struct rmnet_map_header_s *)
86 skb_push(skb, sizeof(struct rmnet_map_header_s));
87 memset(map_header, 0, sizeof(struct rmnet_map_header_s));
88
89 if (pad == RMNET_MAP_NO_PAD_BYTES) {
90 map_header->pkt_len = htons(map_datalen);
91 return map_header;
92 }
93
94 padding = ALIGN(map_datalen, 4) - map_datalen;
95
96 if (padding == 0)
97 goto done;
98
99 if (skb_tailroom(skb) < padding)
100 return 0;
101
102 padbytes = (u8 *)skb_put(skb, padding);
103 LOGD("pad: %d", padding);
104 memset(padbytes, 0, padding);
105
106done:
107 map_header->pkt_len = htons(map_datalen + padding);
108 map_header->pad_len = padding & 0x3F;
109
110 return map_header;
111}
112
113/* rmnet_map_deaggregate() - Deaggregates a single packet
114 * @skb: Source socket buffer containing multiple MAP frames
115 * @config: Physical endpoint configuration of the ingress device
116 *
117 * A whole new buffer is allocated for each portion of an aggregated frame.
118 * Caller should keep calling deaggregate() on the source skb until 0 is
119 * returned, indicating that there are no more packets to deaggregate. Caller
120 * is responsible for freeing the original skb.
121 *
122 * Return:
123 * - Pointer to new skb
124 * - 0 (null) if no more aggregated packets
125 */
126struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
127 struct rmnet_phys_ep_config *config)
128{
129 struct sk_buff *skbn;
130 struct rmnet_map_header_s *maph;
131 u32 packet_len;
132
133 if (skb->len == 0)
134 return 0;
135
136 maph = (struct rmnet_map_header_s *)skb->data;
137 packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header_s);
138
139 if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
140 (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4))
141 packet_len += sizeof(struct rmnet_map_dl_checksum_trailer_s);
142
143 if ((((int)skb->len) - ((int)packet_len)) < 0) {
144 LOGM("%s", "Got malformed packet. Dropping");
145 return 0;
146 }
147
148 skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
149 if (!skbn)
150 return 0;
151
152 skbn->dev = skb->dev;
153 skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
154 skb_put(skbn, packet_len);
155 memcpy(skbn->data, skb->data, packet_len);
156 skb_pull(skb, packet_len);
157
158 /* Some hardware can send us empty frames. Catch them */
159 if (ntohs(maph->pkt_len) == 0) {
160 LOGD("Dropping empty MAP frame");
161 rmnet_kfree_skb(skbn, RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0);
162 return 0;
163 }
164
165 return skbn;
166}
167
Subash Abhinov Kasiviswanathan08fed022017-11-17 17:54:49 -0700168static void rmnet_map_flush_packet_work(struct work_struct *work)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600169{
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600170 struct rmnet_phys_ep_config *config;
Subash Abhinov Kasiviswanathan08fed022017-11-17 17:54:49 -0700171 struct agg_work *real_work;
172 int rc, agg_count = 0;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600173 unsigned long flags;
174 struct sk_buff *skb;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600175
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -0700176 real_work = (struct agg_work *)work;
177 config = real_work->config;
Subash Abhinov Kasiviswanathan08fed022017-11-17 17:54:49 -0700178 skb = NULL;
179
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600180 LOGD("%s", "Entering flush thread");
181 spin_lock_irqsave(&config->agg_lock, flags);
182 if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
183 /* Buffer may have already been shipped out */
184 if (likely(config->agg_skb)) {
185 rmnet_stats_agg_pkts(config->agg_count);
186 if (config->agg_count > 1)
187 LOGL("Agg count: %d", config->agg_count);
188 skb = config->agg_skb;
189 agg_count = config->agg_count;
Subash Abhinov Kasiviswanathan08fed022017-11-17 17:54:49 -0700190 config->agg_skb = NULL;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600191 config->agg_count = 0;
192 memset(&config->agg_time, 0, sizeof(struct timespec));
193 }
194 config->agg_state = RMNET_MAP_AGG_IDLE;
195 } else {
196 /* How did we get here? */
197 LOGE("Ran queued command when state %s",
198 "is idle. State machine likely broken");
199 }
200
201 spin_unlock_irqrestore(&config->agg_lock, flags);
202 if (skb) {
203 trace_rmnet_map_flush_packet_queue(skb, agg_count);
204 rc = dev_queue_xmit(skb);
205 rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
206 }
Subash Abhinov Kasiviswanathan08fed022017-11-17 17:54:49 -0700207
Subash Abhinov Kasiviswanathanfe3dede2017-12-01 12:35:18 -0700208 kfree(work);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600209}
210
Subash Abhinov Kasiviswanathan08fed022017-11-17 17:54:49 -0700211/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
212 *
213 * This function is scheduled to run in a specified number of ns after
214 * the last frame transmitted by the network stack. When run, the buffer
215 * containing aggregated packets is finally transmitted on the underlying link.
216 *
217 */
218enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t)
219{
220 struct rmnet_phys_ep_config *config;
221 struct agg_work *work;
222
223 config = container_of(t, struct rmnet_phys_ep_config, hrtimer);
224
225 work = kmalloc(sizeof(*work), GFP_ATOMIC);
226 if (!work) {
227 config->agg_state = RMNET_MAP_AGG_IDLE;
228
229 return HRTIMER_NORESTART;
230 }
231
232 INIT_WORK(&work->work, rmnet_map_flush_packet_work);
233 work->config = config;
234 schedule_work((struct work_struct *)work);
235 return HRTIMER_NORESTART;
236}
237
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600238/* rmnet_map_aggregate() - Software aggregates multiple packets.
239 * @skb: current packet being transmitted
240 * @config: Physical endpoint configuration of the ingress device
241 *
242 * Aggregates multiple SKBs into a single large SKB for transmission. MAP
243 * protocol is used to separate the packets in the buffer. This function
244 * consumes the argument SKB and should not be further processed by any other
245 * function.
246 */
247void rmnet_map_aggregate(struct sk_buff *skb,
248 struct rmnet_phys_ep_config *config) {
249 u8 *dest_buff;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600250 unsigned long flags;
251 struct sk_buff *agg_skb;
252 struct timespec diff, last;
253 int size, rc, agg_count = 0;
254
255 if (!skb || !config)
256 return;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600257
258new_packet:
259 spin_lock_irqsave(&config->agg_lock, flags);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600260 memcpy(&last, &config->agg_last, sizeof(struct timespec));
261 getnstimeofday(&config->agg_last);
262
263 if (!config->agg_skb) {
264 /* Check to see if we should agg first. If the traffic is very
265 * sparse, don't aggregate. We will need to tune this later
266 */
267 diff = timespec_sub(config->agg_last, last);
268
269 if ((diff.tv_sec > 0) || (diff.tv_nsec > agg_bypass_time)) {
270 spin_unlock_irqrestore(&config->agg_lock, flags);
271 LOGL("delta t: %ld.%09lu\tcount: bypass", diff.tv_sec,
272 diff.tv_nsec);
273 rmnet_stats_agg_pkts(1);
274 trace_rmnet_map_aggregate(skb, 0);
275 rc = dev_queue_xmit(skb);
276 rmnet_stats_queue_xmit(rc,
277 RMNET_STATS_QUEUE_XMIT_AGG_SKIP);
278 return;
279 }
280
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700281 size = config->egress_agg_size - skb->len;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600282 config->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
283 if (!config->agg_skb) {
284 config->agg_skb = 0;
285 config->agg_count = 0;
286 memset(&config->agg_time, 0, sizeof(struct timespec));
287 spin_unlock_irqrestore(&config->agg_lock, flags);
288 rmnet_stats_agg_pkts(1);
289 trace_rmnet_map_aggregate(skb, 0);
290 rc = dev_queue_xmit(skb);
291 rmnet_stats_queue_xmit
292 (rc,
293 RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL);
294 return;
295 }
296 config->agg_count = 1;
297 getnstimeofday(&config->agg_time);
298 trace_rmnet_start_aggregation(skb);
299 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_CPY_EXPAND);
300 goto schedule;
301 }
302 diff = timespec_sub(config->agg_last, config->agg_time);
303
304 if (skb->len > (config->egress_agg_size - config->agg_skb->len) ||
305 (config->agg_count >= config->egress_agg_count) ||
306 (diff.tv_sec > 0) || (diff.tv_nsec > agg_time_limit)) {
307 rmnet_stats_agg_pkts(config->agg_count);
308 agg_skb = config->agg_skb;
309 agg_count = config->agg_count;
310 config->agg_skb = 0;
311 config->agg_count = 0;
312 memset(&config->agg_time, 0, sizeof(struct timespec));
Subash Abhinov Kasiviswanathan08fed022017-11-17 17:54:49 -0700313 config->agg_state = RMNET_MAP_AGG_IDLE;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600314 spin_unlock_irqrestore(&config->agg_lock, flags);
Subash Abhinov Kasiviswanathan08fed022017-11-17 17:54:49 -0700315 hrtimer_cancel(&config->hrtimer);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600316 LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec,
317 diff.tv_nsec, agg_count);
318 trace_rmnet_map_aggregate(skb, agg_count);
319 rc = dev_queue_xmit(agg_skb);
320 rmnet_stats_queue_xmit(rc,
321 RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER);
322 goto new_packet;
323 }
324
325 dest_buff = skb_put(config->agg_skb, skb->len);
326 memcpy(dest_buff, skb->data, skb->len);
327 config->agg_count++;
328 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_INTO_BUFF);
329
330schedule:
331 if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600332 config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
Subash Abhinov Kasiviswanathan08fed022017-11-17 17:54:49 -0700333 hrtimer_start(&config->hrtimer, ns_to_ktime(3000000),
334 HRTIMER_MODE_REL);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600335 }
336 spin_unlock_irqrestore(&config->agg_lock, flags);
337}
338
339/* Checksum Offload */
340
341static inline u16 *rmnet_map_get_checksum_field(unsigned char protocol,
342 const void *txporthdr)
343{
344 u16 *check = 0;
345
346 switch (protocol) {
347 case IPPROTO_TCP:
348 check = &(((struct tcphdr *)txporthdr)->check);
349 break;
350
351 case IPPROTO_UDP:
352 check = &(((struct udphdr *)txporthdr)->check);
353 break;
354
355 default:
356 check = 0;
357 break;
358 }
359
360 return check;
361}
362
363static inline u16 rmnet_map_add_checksums(u16 val1, u16 val2)
364{
365 int sum = val1 + val2;
366
367 sum = (((sum & 0xFFFF0000) >> 16) + sum) & 0x0000FFFF;
368 return (u16)(sum & 0x0000FFFF);
369}
370
371static inline u16 rmnet_map_subtract_checksums(u16 val1, u16 val2)
372{
373 return rmnet_map_add_checksums(val1, ~val2);
374}
375
376/* rmnet_map_validate_ipv4_packet_checksum() - Validates TCP/UDP checksum
377 * value for IPv4 packet
378 * @map_payload: Pointer to the beginning of the map payload
379 * @cksum_trailer: Pointer to the checksum trailer
380 *
381 * Validates the TCP/UDP checksum for the packet using the checksum value
382 * from the checksum trailer added to the packet.
383 * The validation formula is the following:
384 * 1. Performs 1's complement over the checksum value from the trailer
385 * 2. Computes 1's complement checksum over IPv4 header and subtracts it from
386 * the value from step 1
387 * 3. Computes 1's complement checksum over IPv4 pseudo header and adds it to
388 * the value from step 2
389 * 4. Subtracts the checksum value from the TCP/UDP header from the value from
390 * step 3
391 * 5. Compares the value from step 4 to the checksum value from the TCP/UDP
392 * header
393 *
394 * Fragmentation and tunneling are not supported.
395 *
396 * Return: 0 is validation succeeded.
397 */
398static int rmnet_map_validate_ipv4_packet_checksum
399 (unsigned char *map_payload,
400 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer)
401{
402 struct iphdr *ip4h;
403 u16 *checksum_field;
404 void *txporthdr;
405 u16 pseudo_checksum;
406 u16 ip_hdr_checksum;
407 u16 checksum_value;
408 u16 ip_payload_checksum;
409 u16 ip_pseudo_payload_checksum;
410 u16 checksum_value_final;
411
412 ip4h = (struct iphdr *)map_payload;
413 if ((ntohs(ip4h->frag_off) & IP_MF) ||
414 ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
415 return RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET;
416
417 txporthdr = map_payload + ip4h->ihl * 4;
418
419 checksum_field = rmnet_map_get_checksum_field(ip4h->protocol,
420 txporthdr);
421
422 if (unlikely(!checksum_field))
423 return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT;
424
425 /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
426 if ((*checksum_field == 0) && (ip4h->protocol == IPPROTO_UDP))
427 return RMNET_MAP_CHECKSUM_SKIPPED;
428
429 checksum_value = ~ntohs(cksum_trailer->checksum_value);
430 ip_hdr_checksum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
431 ip_payload_checksum = rmnet_map_subtract_checksums(checksum_value,
432 ip_hdr_checksum);
433
434 pseudo_checksum = ~ntohs(csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
435 (u16)(ntohs(ip4h->tot_len) - ip4h->ihl * 4),
436 (u16)ip4h->protocol, 0));
437 ip_pseudo_payload_checksum = rmnet_map_add_checksums(
438 ip_payload_checksum, pseudo_checksum);
439
440 checksum_value_final = ~rmnet_map_subtract_checksums(
441 ip_pseudo_payload_checksum, ntohs(*checksum_field));
442
443 if (unlikely(checksum_value_final == 0)) {
444 switch (ip4h->protocol) {
445 case IPPROTO_UDP:
446 /* RFC 768 */
447 LOGD("DL4 1's complement rule for UDP checksum 0");
448 checksum_value_final = ~checksum_value_final;
449 break;
450
451 case IPPROTO_TCP:
452 if (*checksum_field == 0xFFFF) {
453 LOGD(
454 "DL4 Non-RFC compliant TCP checksum found");
455 checksum_value_final = ~checksum_value_final;
456 }
457 break;
458 }
459 }
460
461 LOGD(
462 "DL4 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X",
463 ~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field),
464 pseudo_checksum, checksum_value_final);
465
466 if (checksum_value_final == ntohs(*checksum_field))
467 return RMNET_MAP_CHECKSUM_OK;
468 else
469 return RMNET_MAP_CHECKSUM_VALIDATION_FAILED;
470}
471
472/* rmnet_map_validate_ipv6_packet_checksum() - Validates TCP/UDP checksum
473 * value for IPv6 packet
474 * @map_payload: Pointer to the beginning of the map payload
475 * @cksum_trailer: Pointer to the checksum trailer
476 *
477 * Validates the TCP/UDP checksum for the packet using the checksum value
478 * from the checksum trailer added to the packet.
479 * The validation formula is the following:
480 * 1. Performs 1's complement over the checksum value from the trailer
481 * 2. Computes 1's complement checksum over IPv6 header and subtracts it from
482 * the value from step 1
483 * 3. Computes 1's complement checksum over IPv6 pseudo header and adds it to
484 * the value from step 2
485 * 4. Subtracts the checksum value from the TCP/UDP header from the value from
486 * step 3
487 * 5. Compares the value from step 4 to the checksum value from the TCP/UDP
488 * header
489 *
490 * Fragmentation, extension headers and tunneling are not supported.
491 *
492 * Return: 0 is validation succeeded.
493 */
494static int rmnet_map_validate_ipv6_packet_checksum
495 (unsigned char *map_payload,
496 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer)
497{
498 struct ipv6hdr *ip6h;
499 u16 *checksum_field;
500 void *txporthdr;
501 u16 pseudo_checksum;
502 u16 ip_hdr_checksum;
503 u16 checksum_value;
504 u16 ip_payload_checksum;
505 u16 ip_pseudo_payload_checksum;
506 u16 checksum_value_final;
507 u32 length;
508
509 ip6h = (struct ipv6hdr *)map_payload;
510
511 txporthdr = map_payload + sizeof(struct ipv6hdr);
512 checksum_field = rmnet_map_get_checksum_field(ip6h->nexthdr,
513 txporthdr);
514
515 if (unlikely(!checksum_field))
516 return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT;
517
518 checksum_value = ~ntohs(cksum_trailer->checksum_value);
519 ip_hdr_checksum = ~ntohs(ip_compute_csum(ip6h,
520 (int)(txporthdr - (void *)map_payload)));
521 ip_payload_checksum = rmnet_map_subtract_checksums
522 (checksum_value, ip_hdr_checksum);
523
524 length = (ip6h->nexthdr == IPPROTO_UDP) ?
525 ntohs(((struct udphdr *)txporthdr)->len) :
526 ntohs(ip6h->payload_len);
527 pseudo_checksum = ~ntohs(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
528 length, ip6h->nexthdr, 0));
529 ip_pseudo_payload_checksum = rmnet_map_add_checksums(
530 ip_payload_checksum, pseudo_checksum);
531
532 checksum_value_final = ~rmnet_map_subtract_checksums(
533 ip_pseudo_payload_checksum, ntohs(*checksum_field));
534
535 if (unlikely(checksum_value_final == 0)) {
536 switch (ip6h->nexthdr) {
537 case IPPROTO_UDP:
538 /* RFC 2460 section 8.1 */
539 LOGD("DL6 One's complement rule for UDP checksum 0");
540 checksum_value_final = ~checksum_value_final;
541 break;
542
543 case IPPROTO_TCP:
544 if (*checksum_field == 0xFFFF) {
545 LOGD(
546 "DL6 Non-RFC compliant TCP checksum found");
547 checksum_value_final = ~checksum_value_final;
548 }
549 break;
550 }
551 }
552
553 LOGD(
554 "DL6 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X",
555 ~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field),
556 pseudo_checksum, checksum_value_final);
557
558 if (checksum_value_final == ntohs(*checksum_field))
559 return RMNET_MAP_CHECKSUM_OK;
560 else
561 return RMNET_MAP_CHECKSUM_VALIDATION_FAILED;
562 }
563
564/* rmnet_map_checksum_downlink_packet() - Validates checksum on
565 * a downlink packet
566 * @skb: Pointer to the packet's skb.
567 *
568 * Validates packet checksums. Function takes a pointer to
569 * the beginning of a buffer which contains the entire MAP
570 * frame: MAP header + IP payload + padding + checksum trailer.
571 * Currently, only IPv4 and IPv6 are supported along with
572 * TCP & UDP. Fragmented or tunneled packets are not supported.
573 *
574 * Return:
575 * - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded.
576 * - RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER: Skb buffer given is corrupted.
577 * - RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET: Valid flag is not set in the
578 * checksum trailer.
579 * - RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET: The packet is a fragment.
580 * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT: The transport header is
581 * not TCP/UDP.
582 * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header.
583 * - RMNET_MAP_CHECKSUM_VALIDATION_FAILED: In case the validation failed.
584 */
585int rmnet_map_checksum_downlink_packet(struct sk_buff *skb)
586{
587 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer;
588 unsigned int data_len;
589 unsigned char *map_payload;
590 unsigned char ip_version;
591
592 data_len = RMNET_MAP_GET_LENGTH(skb);
593
594 if (unlikely(skb->len < (sizeof(struct rmnet_map_header_s) + data_len +
595 sizeof(struct rmnet_map_dl_checksum_trailer_s))))
596 return RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER;
597
598 cksum_trailer = (struct rmnet_map_dl_checksum_trailer_s *)
599 (skb->data + data_len
600 + sizeof(struct rmnet_map_header_s));
601
602 if (unlikely(!ntohs(cksum_trailer->valid)))
603 return RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET;
604
605 map_payload = (unsigned char *)(skb->data
606 + sizeof(struct rmnet_map_header_s));
607
608 ip_version = (*map_payload & 0xF0) >> 4;
609 if (ip_version == 0x04)
610 return rmnet_map_validate_ipv4_packet_checksum(map_payload,
611 cksum_trailer);
612 else if (ip_version == 0x06)
613 return rmnet_map_validate_ipv6_packet_checksum(map_payload,
614 cksum_trailer);
615
616 return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
617}
618
619static void rmnet_map_fill_ipv4_packet_ul_checksum_header
620 (void *iphdr, struct rmnet_map_ul_checksum_header_s *ul_header,
621 struct sk_buff *skb)
622{
623 struct iphdr *ip4h = (struct iphdr *)iphdr;
624 unsigned short *hdr = (unsigned short *)ul_header;
625
626 ul_header->checksum_start_offset = htons((unsigned short)
627 (skb_transport_header(skb) - (unsigned char *)iphdr));
628 ul_header->checksum_insert_offset = skb->csum_offset;
629 ul_header->cks_en = 1;
630 if (ip4h->protocol == IPPROTO_UDP)
631 ul_header->udp_ip4_ind = 1;
632 else
633 ul_header->udp_ip4_ind = 0;
634 /* Changing checksum_insert_offset to network order */
635 hdr++;
636 *hdr = htons(*hdr);
637 skb->ip_summed = CHECKSUM_NONE;
638}
639
640static void rmnet_map_fill_ipv6_packet_ul_checksum_header
641 (void *iphdr, struct rmnet_map_ul_checksum_header_s *ul_header,
642 struct sk_buff *skb)
643{
644 unsigned short *hdr = (unsigned short *)ul_header;
645
646 ul_header->checksum_start_offset = htons((unsigned short)
647 (skb_transport_header(skb) - (unsigned char *)iphdr));
648 ul_header->checksum_insert_offset = skb->csum_offset;
649 ul_header->cks_en = 1;
650 ul_header->udp_ip4_ind = 0;
651 /* Changing checksum_insert_offset to network order */
652 hdr++;
653 *hdr = htons(*hdr);
654 skb->ip_summed = CHECKSUM_NONE;
655}
656
657static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
658{
659 struct iphdr *ip4h = (struct iphdr *)iphdr;
660 void *txporthdr;
661 u16 *csum;
662
663 txporthdr = iphdr + ip4h->ihl * 4;
664
665 if ((ip4h->protocol == IPPROTO_TCP) ||
666 (ip4h->protocol == IPPROTO_UDP)) {
667 csum = (u16 *)rmnet_map_get_checksum_field(ip4h->protocol,
668 txporthdr);
669 *csum = ~(*csum);
670 }
671}
672
673static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
674{
675 struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
676 void *txporthdr;
677 u16 *csum;
678
679 txporthdr = ip6hdr + sizeof(struct ipv6hdr);
680
681 if ((ip6h->nexthdr == IPPROTO_TCP) || (ip6h->nexthdr == IPPROTO_UDP)) {
682 csum = (u16 *)rmnet_map_get_checksum_field(ip6h->nexthdr,
683 txporthdr);
684 *csum = ~(*csum);
685 }
686}
687
688/* rmnet_map_checksum_uplink_packet() - Generates UL checksum
689 * meta info header
690 * @skb: Pointer to the packet's skb.
691 *
692 * Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
693 * packets that are supported for UL checksum offload.
694 *
695 * Return:
696 * - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded.
697 * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header.
698 * - RMNET_MAP_CHECKSUM_SW: Unsupported packet for UL checksum offload.
699 */
700int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
701 struct net_device *orig_dev,
702 u32 egress_data_format)
703{
704 unsigned char ip_version;
705 struct rmnet_map_ul_checksum_header_s *ul_header;
706 void *iphdr;
707 int ret;
708
709 ul_header = (struct rmnet_map_ul_checksum_header_s *)
710 skb_push(skb, sizeof(struct rmnet_map_ul_checksum_header_s));
711
712 if (unlikely(!(orig_dev->features &
713 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) {
714 ret = RMNET_MAP_CHECKSUM_SW;
715 goto sw_checksum;
716 }
717
718 if (skb->ip_summed == CHECKSUM_PARTIAL) {
719 iphdr = (char *)ul_header +
720 sizeof(struct rmnet_map_ul_checksum_header_s);
721 ip_version = (*(char *)iphdr & 0xF0) >> 4;
722 if (ip_version == 0x04) {
723 rmnet_map_fill_ipv4_packet_ul_checksum_header
724 (iphdr, ul_header, skb);
725 if (egress_data_format &
726 RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
727 rmnet_map_complement_ipv4_txporthdr_csum_field(
728 iphdr);
729 ret = RMNET_MAP_CHECKSUM_OK;
730 goto done;
731 } else if (ip_version == 0x06) {
732 rmnet_map_fill_ipv6_packet_ul_checksum_header
733 (iphdr, ul_header, skb);
734 if (egress_data_format &
735 RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
736 rmnet_map_complement_ipv6_txporthdr_csum_field(
737 iphdr);
738 ret = RMNET_MAP_CHECKSUM_OK;
739 goto done;
740 } else {
741 ret = RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
742 goto sw_checksum;
743 }
744 } else {
745 ret = RMNET_MAP_CHECKSUM_SW;
746 goto sw_checksum;
747 }
748
749sw_checksum:
750 ul_header->checksum_start_offset = 0;
751 ul_header->checksum_insert_offset = 0;
752 ul_header->cks_en = 0;
753 ul_header->udp_ip4_ind = 0;
754done:
755 return ret;
756}
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700757
758int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset)
759{
760 unsigned char *packet_start = skb->data + offset;
761 int is_icmp = 0;
762
763 if ((skb->data[offset]) >> 4 == 0x04) {
764 struct iphdr *ip4h = (struct iphdr *)(packet_start);
765
766 if (ip4h->protocol == IPPROTO_ICMP)
767 is_icmp = 1;
Subash Abhinov Kasiviswanathanb7500cc2017-11-09 17:32:52 -0700768 } else if ((skb->data[offset]) >> 4 == 0x06) {
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700769 struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
770
Subash Abhinov Kasiviswanathanb7500cc2017-11-09 17:32:52 -0700771 if (ip6h->nexthdr == IPPROTO_ICMPV6) {
772 is_icmp = 1;
773 } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700774 struct frag_hdr *frag;
775
776 frag = (struct frag_hdr *)(packet_start
777 + sizeof(struct ipv6hdr));
778 if (frag->nexthdr == IPPROTO_ICMPV6)
779 is_icmp = 1;
Subash Abhinov Kasiviswanathanbf783ba2017-11-08 15:55:29 -0700780 }
781 }
782
783 return is_icmp;
784}