Subash Abhinov Kasiviswanathan | 0c26bab | 2019-01-15 19:42:37 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | * |
| 12 | * RMNET Data MAP protocol |
| 13 | */ |
| 14 | |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/skbuff.h> |
| 18 | #include <linux/netdevice.h> |
| 19 | #include <linux/rmnet_data.h> |
| 20 | #include <linux/spinlock.h> |
Subash Abhinov Kasiviswanathan | fe3dede | 2017-12-01 12:35:18 -0700 | [diff] [blame] | 21 | #include <linux/workqueue.h> |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 22 | #include <linux/time.h> |
| 23 | #include <linux/net_map.h> |
| 24 | #include <linux/ip.h> |
| 25 | #include <linux/ipv6.h> |
| 26 | #include <linux/udp.h> |
| 27 | #include <linux/tcp.h> |
| 28 | #include <linux/in.h> |
| 29 | #include <net/ip.h> |
| 30 | #include <net/checksum.h> |
| 31 | #include <net/ip6_checksum.h> |
| 32 | #include <net/rmnet_config.h> |
| 33 | #include "rmnet_data_config.h" |
| 34 | #include "rmnet_map.h" |
| 35 | #include "rmnet_data_private.h" |
| 36 | #include "rmnet_data_stats.h" |
| 37 | #include "rmnet_data_trace.h" |
| 38 | |
| 39 | RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_MAPD); |
| 40 | |
| 41 | /* Local Definitions */ |
| 42 | |
| 43 | long agg_time_limit __read_mostly = 1000000L; |
| 44 | module_param(agg_time_limit, long, 0644); |
| 45 | MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf"); |
| 46 | |
| 47 | long agg_bypass_time __read_mostly = 10000000L; |
| 48 | module_param(agg_bypass_time, long, 0644); |
| 49 | MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this"); |
| 50 | |
Subash Abhinov Kasiviswanathan | fe3dede | 2017-12-01 12:35:18 -0700 | [diff] [blame] | 51 | struct agg_work { |
Subash Abhinov Kasiviswanathan | 08fed02 | 2017-11-17 17:54:49 -0700 | [diff] [blame] | 52 | struct work_struct work; |
Subash Abhinov Kasiviswanathan | fe3dede | 2017-12-01 12:35:18 -0700 | [diff] [blame] | 53 | struct rmnet_phys_ep_config *config; |
| 54 | }; |
| 55 | |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 56 | #define RMNET_MAP_DEAGGR_SPACING 64 |
| 57 | #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) |
| 58 | |
| 59 | /* rmnet_map_add_map_header() - Adds MAP header to front of skb->data |
| 60 | * @skb: Socket buffer ("packet") to modify |
| 61 | * @hdrlen: Number of bytes of header data which should not be included in |
| 62 | * MAP length field |
| 63 | * @pad: Specify if padding the MAP packet to make it 4 byte aligned is |
| 64 | * necessary |
| 65 | * |
| 66 | * Padding is calculated and set appropriately in MAP header. Mux ID is |
| 67 | * initialized to 0. |
| 68 | * |
| 69 | * Return: |
| 70 | * - Pointer to MAP structure |
| 71 | * - 0 (null) if insufficient headroom |
| 72 | * - 0 (null) if insufficient tailroom for padding bytes |
| 73 | */ |
| 74 | struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb, |
| 75 | int hdrlen, int pad) |
| 76 | { |
| 77 | u32 padding, map_datalen; |
| 78 | u8 *padbytes; |
| 79 | struct rmnet_map_header_s *map_header; |
| 80 | |
| 81 | if (skb_headroom(skb) < sizeof(struct rmnet_map_header_s)) |
| 82 | return 0; |
| 83 | |
| 84 | map_datalen = skb->len - hdrlen; |
| 85 | map_header = (struct rmnet_map_header_s *) |
| 86 | skb_push(skb, sizeof(struct rmnet_map_header_s)); |
| 87 | memset(map_header, 0, sizeof(struct rmnet_map_header_s)); |
| 88 | |
| 89 | if (pad == RMNET_MAP_NO_PAD_BYTES) { |
| 90 | map_header->pkt_len = htons(map_datalen); |
| 91 | return map_header; |
| 92 | } |
| 93 | |
| 94 | padding = ALIGN(map_datalen, 4) - map_datalen; |
| 95 | |
| 96 | if (padding == 0) |
| 97 | goto done; |
| 98 | |
| 99 | if (skb_tailroom(skb) < padding) |
| 100 | return 0; |
| 101 | |
| 102 | padbytes = (u8 *)skb_put(skb, padding); |
| 103 | LOGD("pad: %d", padding); |
| 104 | memset(padbytes, 0, padding); |
| 105 | |
| 106 | done: |
| 107 | map_header->pkt_len = htons(map_datalen + padding); |
| 108 | map_header->pad_len = padding & 0x3F; |
| 109 | |
| 110 | return map_header; |
| 111 | } |
| 112 | |
| 113 | /* rmnet_map_deaggregate() - Deaggregates a single packet |
| 114 | * @skb: Source socket buffer containing multiple MAP frames |
| 115 | * @config: Physical endpoint configuration of the ingress device |
| 116 | * |
| 117 | * A whole new buffer is allocated for each portion of an aggregated frame. |
| 118 | * Caller should keep calling deaggregate() on the source skb until 0 is |
| 119 | * returned, indicating that there are no more packets to deaggregate. Caller |
| 120 | * is responsible for freeing the original skb. |
| 121 | * |
| 122 | * Return: |
| 123 | * - Pointer to new skb |
| 124 | * - 0 (null) if no more aggregated packets |
| 125 | */ |
| 126 | struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, |
| 127 | struct rmnet_phys_ep_config *config) |
| 128 | { |
| 129 | struct sk_buff *skbn; |
| 130 | struct rmnet_map_header_s *maph; |
| 131 | u32 packet_len; |
| 132 | |
| 133 | if (skb->len == 0) |
| 134 | return 0; |
| 135 | |
| 136 | maph = (struct rmnet_map_header_s *)skb->data; |
| 137 | packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header_s); |
| 138 | |
| 139 | if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) || |
| 140 | (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) |
| 141 | packet_len += sizeof(struct rmnet_map_dl_checksum_trailer_s); |
| 142 | |
| 143 | if ((((int)skb->len) - ((int)packet_len)) < 0) { |
| 144 | LOGM("%s", "Got malformed packet. Dropping"); |
| 145 | return 0; |
| 146 | } |
| 147 | |
| 148 | skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); |
| 149 | if (!skbn) |
| 150 | return 0; |
| 151 | |
| 152 | skbn->dev = skb->dev; |
| 153 | skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); |
| 154 | skb_put(skbn, packet_len); |
| 155 | memcpy(skbn->data, skb->data, packet_len); |
| 156 | skb_pull(skb, packet_len); |
| 157 | |
| 158 | /* Some hardware can send us empty frames. Catch them */ |
| 159 | if (ntohs(maph->pkt_len) == 0) { |
| 160 | LOGD("Dropping empty MAP frame"); |
| 161 | rmnet_kfree_skb(skbn, RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0); |
| 162 | return 0; |
| 163 | } |
| 164 | |
| 165 | return skbn; |
| 166 | } |
| 167 | |
Subash Abhinov Kasiviswanathan | 08fed02 | 2017-11-17 17:54:49 -0700 | [diff] [blame] | 168 | static void rmnet_map_flush_packet_work(struct work_struct *work) |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 169 | { |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 170 | struct rmnet_phys_ep_config *config; |
Subash Abhinov Kasiviswanathan | 08fed02 | 2017-11-17 17:54:49 -0700 | [diff] [blame] | 171 | struct agg_work *real_work; |
| 172 | int rc, agg_count = 0; |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 173 | unsigned long flags; |
| 174 | struct sk_buff *skb; |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 175 | |
Subash Abhinov Kasiviswanathan | fe3dede | 2017-12-01 12:35:18 -0700 | [diff] [blame] | 176 | real_work = (struct agg_work *)work; |
| 177 | config = real_work->config; |
Subash Abhinov Kasiviswanathan | 08fed02 | 2017-11-17 17:54:49 -0700 | [diff] [blame] | 178 | skb = NULL; |
| 179 | |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 180 | LOGD("%s", "Entering flush thread"); |
| 181 | spin_lock_irqsave(&config->agg_lock, flags); |
| 182 | if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) { |
| 183 | /* Buffer may have already been shipped out */ |
| 184 | if (likely(config->agg_skb)) { |
| 185 | rmnet_stats_agg_pkts(config->agg_count); |
| 186 | if (config->agg_count > 1) |
| 187 | LOGL("Agg count: %d", config->agg_count); |
| 188 | skb = config->agg_skb; |
| 189 | agg_count = config->agg_count; |
Subash Abhinov Kasiviswanathan | 08fed02 | 2017-11-17 17:54:49 -0700 | [diff] [blame] | 190 | config->agg_skb = NULL; |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 191 | config->agg_count = 0; |
| 192 | memset(&config->agg_time, 0, sizeof(struct timespec)); |
| 193 | } |
| 194 | config->agg_state = RMNET_MAP_AGG_IDLE; |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | spin_unlock_irqrestore(&config->agg_lock, flags); |
| 198 | if (skb) { |
| 199 | trace_rmnet_map_flush_packet_queue(skb, agg_count); |
| 200 | rc = dev_queue_xmit(skb); |
| 201 | rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT); |
| 202 | } |
Subash Abhinov Kasiviswanathan | 08fed02 | 2017-11-17 17:54:49 -0700 | [diff] [blame] | 203 | |
Subash Abhinov Kasiviswanathan | fe3dede | 2017-12-01 12:35:18 -0700 | [diff] [blame] | 204 | kfree(work); |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 205 | } |
| 206 | |
Subash Abhinov Kasiviswanathan | 08fed02 | 2017-11-17 17:54:49 -0700 | [diff] [blame] | 207 | /* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout |
| 208 | * |
| 209 | * This function is scheduled to run in a specified number of ns after |
| 210 | * the last frame transmitted by the network stack. When run, the buffer |
| 211 | * containing aggregated packets is finally transmitted on the underlying link. |
| 212 | * |
| 213 | */ |
| 214 | enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t) |
| 215 | { |
| 216 | struct rmnet_phys_ep_config *config; |
| 217 | struct agg_work *work; |
| 218 | |
| 219 | config = container_of(t, struct rmnet_phys_ep_config, hrtimer); |
| 220 | |
| 221 | work = kmalloc(sizeof(*work), GFP_ATOMIC); |
| 222 | if (!work) { |
| 223 | config->agg_state = RMNET_MAP_AGG_IDLE; |
| 224 | |
| 225 | return HRTIMER_NORESTART; |
| 226 | } |
| 227 | |
| 228 | INIT_WORK(&work->work, rmnet_map_flush_packet_work); |
| 229 | work->config = config; |
| 230 | schedule_work((struct work_struct *)work); |
| 231 | return HRTIMER_NORESTART; |
| 232 | } |
| 233 | |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 234 | /* rmnet_map_aggregate() - Software aggregates multiple packets. |
| 235 | * @skb: current packet being transmitted |
| 236 | * @config: Physical endpoint configuration of the ingress device |
| 237 | * |
| 238 | * Aggregates multiple SKBs into a single large SKB for transmission. MAP |
| 239 | * protocol is used to separate the packets in the buffer. This function |
| 240 | * consumes the argument SKB and should not be further processed by any other |
| 241 | * function. |
| 242 | */ |
| 243 | void rmnet_map_aggregate(struct sk_buff *skb, |
| 244 | struct rmnet_phys_ep_config *config) { |
| 245 | u8 *dest_buff; |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 246 | unsigned long flags; |
| 247 | struct sk_buff *agg_skb; |
| 248 | struct timespec diff, last; |
| 249 | int size, rc, agg_count = 0; |
| 250 | |
| 251 | if (!skb || !config) |
| 252 | return; |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 253 | |
| 254 | new_packet: |
| 255 | spin_lock_irqsave(&config->agg_lock, flags); |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 256 | memcpy(&last, &config->agg_last, sizeof(struct timespec)); |
| 257 | getnstimeofday(&config->agg_last); |
| 258 | |
| 259 | if (!config->agg_skb) { |
| 260 | /* Check to see if we should agg first. If the traffic is very |
| 261 | * sparse, don't aggregate. We will need to tune this later |
| 262 | */ |
| 263 | diff = timespec_sub(config->agg_last, last); |
Subash Abhinov Kasiviswanathan | 0c26bab | 2019-01-15 19:42:37 -0700 | [diff] [blame] | 264 | size = config->egress_agg_size - skb->len; |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 265 | |
Subash Abhinov Kasiviswanathan | 0c26bab | 2019-01-15 19:42:37 -0700 | [diff] [blame] | 266 | if ((diff.tv_sec > 0) || (diff.tv_nsec > agg_bypass_time) || |
| 267 | (size <= 0)) { |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 268 | spin_unlock_irqrestore(&config->agg_lock, flags); |
| 269 | LOGL("delta t: %ld.%09lu\tcount: bypass", diff.tv_sec, |
| 270 | diff.tv_nsec); |
| 271 | rmnet_stats_agg_pkts(1); |
| 272 | trace_rmnet_map_aggregate(skb, 0); |
| 273 | rc = dev_queue_xmit(skb); |
| 274 | rmnet_stats_queue_xmit(rc, |
| 275 | RMNET_STATS_QUEUE_XMIT_AGG_SKIP); |
| 276 | return; |
| 277 | } |
| 278 | |
| 279 | config->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC); |
| 280 | if (!config->agg_skb) { |
| 281 | config->agg_skb = 0; |
| 282 | config->agg_count = 0; |
| 283 | memset(&config->agg_time, 0, sizeof(struct timespec)); |
| 284 | spin_unlock_irqrestore(&config->agg_lock, flags); |
| 285 | rmnet_stats_agg_pkts(1); |
| 286 | trace_rmnet_map_aggregate(skb, 0); |
| 287 | rc = dev_queue_xmit(skb); |
| 288 | rmnet_stats_queue_xmit |
| 289 | (rc, |
| 290 | RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL); |
| 291 | return; |
| 292 | } |
| 293 | config->agg_count = 1; |
| 294 | getnstimeofday(&config->agg_time); |
| 295 | trace_rmnet_start_aggregation(skb); |
Subash Abhinov Kasiviswanathan | 4c42be4 | 2018-01-23 07:43:46 -0700 | [diff] [blame] | 296 | dev_kfree_skb_any(skb); |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 297 | goto schedule; |
| 298 | } |
| 299 | diff = timespec_sub(config->agg_last, config->agg_time); |
| 300 | |
| 301 | if (skb->len > (config->egress_agg_size - config->agg_skb->len) || |
| 302 | (config->agg_count >= config->egress_agg_count) || |
| 303 | (diff.tv_sec > 0) || (diff.tv_nsec > agg_time_limit)) { |
| 304 | rmnet_stats_agg_pkts(config->agg_count); |
| 305 | agg_skb = config->agg_skb; |
| 306 | agg_count = config->agg_count; |
| 307 | config->agg_skb = 0; |
| 308 | config->agg_count = 0; |
| 309 | memset(&config->agg_time, 0, sizeof(struct timespec)); |
Subash Abhinov Kasiviswanathan | 08fed02 | 2017-11-17 17:54:49 -0700 | [diff] [blame] | 310 | config->agg_state = RMNET_MAP_AGG_IDLE; |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 311 | spin_unlock_irqrestore(&config->agg_lock, flags); |
Subash Abhinov Kasiviswanathan | 08fed02 | 2017-11-17 17:54:49 -0700 | [diff] [blame] | 312 | hrtimer_cancel(&config->hrtimer); |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 313 | LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec, |
| 314 | diff.tv_nsec, agg_count); |
| 315 | trace_rmnet_map_aggregate(skb, agg_count); |
| 316 | rc = dev_queue_xmit(agg_skb); |
| 317 | rmnet_stats_queue_xmit(rc, |
| 318 | RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER); |
| 319 | goto new_packet; |
| 320 | } |
| 321 | |
| 322 | dest_buff = skb_put(config->agg_skb, skb->len); |
| 323 | memcpy(dest_buff, skb->data, skb->len); |
| 324 | config->agg_count++; |
Subash Abhinov Kasiviswanathan | 4c42be4 | 2018-01-23 07:43:46 -0700 | [diff] [blame] | 325 | dev_kfree_skb_any(skb); |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 326 | |
| 327 | schedule: |
| 328 | if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) { |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 329 | config->agg_state = RMNET_MAP_TXFER_SCHEDULED; |
Subash Abhinov Kasiviswanathan | 08fed02 | 2017-11-17 17:54:49 -0700 | [diff] [blame] | 330 | hrtimer_start(&config->hrtimer, ns_to_ktime(3000000), |
| 331 | HRTIMER_MODE_REL); |
Subash Abhinov Kasiviswanathan | 2139ce8a | 2016-10-14 11:01:48 -0600 | [diff] [blame] | 332 | } |
| 333 | spin_unlock_irqrestore(&config->agg_lock, flags); |
| 334 | } |
| 335 | |
| 336 | /* Checksum Offload */ |
| 337 | |
| 338 | static inline u16 *rmnet_map_get_checksum_field(unsigned char protocol, |
| 339 | const void *txporthdr) |
| 340 | { |
| 341 | u16 *check = 0; |
| 342 | |
| 343 | switch (protocol) { |
| 344 | case IPPROTO_TCP: |
| 345 | check = &(((struct tcphdr *)txporthdr)->check); |
| 346 | break; |
| 347 | |
| 348 | case IPPROTO_UDP: |
| 349 | check = &(((struct udphdr *)txporthdr)->check); |
| 350 | break; |
| 351 | |
| 352 | default: |
| 353 | check = 0; |
| 354 | break; |
| 355 | } |
| 356 | |
| 357 | return check; |
| 358 | } |
| 359 | |
| 360 | static inline u16 rmnet_map_add_checksums(u16 val1, u16 val2) |
| 361 | { |
| 362 | int sum = val1 + val2; |
| 363 | |
| 364 | sum = (((sum & 0xFFFF0000) >> 16) + sum) & 0x0000FFFF; |
| 365 | return (u16)(sum & 0x0000FFFF); |
| 366 | } |
| 367 | |
| 368 | static inline u16 rmnet_map_subtract_checksums(u16 val1, u16 val2) |
| 369 | { |
| 370 | return rmnet_map_add_checksums(val1, ~val2); |
| 371 | } |
| 372 | |
| 373 | /* rmnet_map_validate_ipv4_packet_checksum() - Validates TCP/UDP checksum |
| 374 | * value for IPv4 packet |
| 375 | * @map_payload: Pointer to the beginning of the map payload |
| 376 | * @cksum_trailer: Pointer to the checksum trailer |
| 377 | * |
| 378 | * Validates the TCP/UDP checksum for the packet using the checksum value |
| 379 | * from the checksum trailer added to the packet. |
| 380 | * The validation formula is the following: |
| 381 | * 1. Performs 1's complement over the checksum value from the trailer |
| 382 | * 2. Computes 1's complement checksum over IPv4 header and subtracts it from |
| 383 | * the value from step 1 |
| 384 | * 3. Computes 1's complement checksum over IPv4 pseudo header and adds it to |
| 385 | * the value from step 2 |
| 386 | * 4. Subtracts the checksum value from the TCP/UDP header from the value from |
| 387 | * step 3 |
| 388 | * 5. Compares the value from step 4 to the checksum value from the TCP/UDP |
| 389 | * header |
| 390 | * |
| 391 | * Fragmentation and tunneling are not supported. |
| 392 | * |
| 393 | * Return: 0 is validation succeeded. |
| 394 | */ |
| 395 | static int rmnet_map_validate_ipv4_packet_checksum |
| 396 | (unsigned char *map_payload, |
| 397 | struct rmnet_map_dl_checksum_trailer_s *cksum_trailer) |
| 398 | { |
| 399 | struct iphdr *ip4h; |
| 400 | u16 *checksum_field; |
| 401 | void *txporthdr; |
| 402 | u16 pseudo_checksum; |
| 403 | u16 ip_hdr_checksum; |
| 404 | u16 checksum_value; |
| 405 | u16 ip_payload_checksum; |
| 406 | u16 ip_pseudo_payload_checksum; |
| 407 | u16 checksum_value_final; |
| 408 | |
| 409 | ip4h = (struct iphdr *)map_payload; |
| 410 | if ((ntohs(ip4h->frag_off) & IP_MF) || |
| 411 | ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) |
| 412 | return RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET; |
| 413 | |
| 414 | txporthdr = map_payload + ip4h->ihl * 4; |
| 415 | |
| 416 | checksum_field = rmnet_map_get_checksum_field(ip4h->protocol, |
| 417 | txporthdr); |
| 418 | |
| 419 | if (unlikely(!checksum_field)) |
| 420 | return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT; |
| 421 | |
| 422 | /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */ |
| 423 | if ((*checksum_field == 0) && (ip4h->protocol == IPPROTO_UDP)) |
| 424 | return RMNET_MAP_CHECKSUM_SKIPPED; |
| 425 | |
| 426 | checksum_value = ~ntohs(cksum_trailer->checksum_value); |
| 427 | ip_hdr_checksum = ~ip_fast_csum(ip4h, (int)ip4h->ihl); |
| 428 | ip_payload_checksum = rmnet_map_subtract_checksums(checksum_value, |
| 429 | ip_hdr_checksum); |
| 430 | |
| 431 | pseudo_checksum = ~ntohs(csum_tcpudp_magic(ip4h->saddr, ip4h->daddr, |
| 432 | (u16)(ntohs(ip4h->tot_len) - ip4h->ihl * 4), |
| 433 | (u16)ip4h->protocol, 0)); |
| 434 | ip_pseudo_payload_checksum = rmnet_map_add_checksums( |
| 435 | ip_payload_checksum, pseudo_checksum); |
| 436 | |
| 437 | checksum_value_final = ~rmnet_map_subtract_checksums( |
| 438 | ip_pseudo_payload_checksum, ntohs(*checksum_field)); |
| 439 | |
| 440 | if (unlikely(checksum_value_final == 0)) { |
| 441 | switch (ip4h->protocol) { |
| 442 | case IPPROTO_UDP: |
| 443 | /* RFC 768 */ |
| 444 | LOGD("DL4 1's complement rule for UDP checksum 0"); |
| 445 | checksum_value_final = ~checksum_value_final; |
| 446 | break; |
| 447 | |
| 448 | case IPPROTO_TCP: |
| 449 | if (*checksum_field == 0xFFFF) { |
| 450 | LOGD( |
| 451 | "DL4 Non-RFC compliant TCP checksum found"); |
| 452 | checksum_value_final = ~checksum_value_final; |
| 453 | } |
| 454 | break; |
| 455 | } |
| 456 | } |
| 457 | |
| 458 | LOGD( |
| 459 | "DL4 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X", |
| 460 | ~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field), |
| 461 | pseudo_checksum, checksum_value_final); |
| 462 | |
| 463 | if (checksum_value_final == ntohs(*checksum_field)) |
| 464 | return RMNET_MAP_CHECKSUM_OK; |
| 465 | else |
| 466 | return RMNET_MAP_CHECKSUM_VALIDATION_FAILED; |
| 467 | } |
| 468 | |
| 469 | /* rmnet_map_validate_ipv6_packet_checksum() - Validates TCP/UDP checksum |
| 470 | * value for IPv6 packet |
| 471 | * @map_payload: Pointer to the beginning of the map payload |
| 472 | * @cksum_trailer: Pointer to the checksum trailer |
| 473 | * |
| 474 | * Validates the TCP/UDP checksum for the packet using the checksum value |
| 475 | * from the checksum trailer added to the packet. |
| 476 | * The validation formula is the following: |
| 477 | * 1. Performs 1's complement over the checksum value from the trailer |
| 478 | * 2. Computes 1's complement checksum over IPv6 header and subtracts it from |
| 479 | * the value from step 1 |
| 480 | * 3. Computes 1's complement checksum over IPv6 pseudo header and adds it to |
| 481 | * the value from step 2 |
| 482 | * 4. Subtracts the checksum value from the TCP/UDP header from the value from |
| 483 | * step 3 |
| 484 | * 5. Compares the value from step 4 to the checksum value from the TCP/UDP |
| 485 | * header |
| 486 | * |
| 487 | * Fragmentation, extension headers and tunneling are not supported. |
| 488 | * |
| 489 | * Return: 0 is validation succeeded. |
| 490 | */ |
| 491 | static int rmnet_map_validate_ipv6_packet_checksum |
| 492 | (unsigned char *map_payload, |
| 493 | struct rmnet_map_dl_checksum_trailer_s *cksum_trailer) |
| 494 | { |
| 495 | struct ipv6hdr *ip6h; |
| 496 | u16 *checksum_field; |
| 497 | void *txporthdr; |
| 498 | u16 pseudo_checksum; |
| 499 | u16 ip_hdr_checksum; |
| 500 | u16 checksum_value; |
| 501 | u16 ip_payload_checksum; |
| 502 | u16 ip_pseudo_payload_checksum; |
| 503 | u16 checksum_value_final; |
| 504 | u32 length; |
| 505 | |
| 506 | ip6h = (struct ipv6hdr *)map_payload; |
| 507 | |
| 508 | txporthdr = map_payload + sizeof(struct ipv6hdr); |
| 509 | checksum_field = rmnet_map_get_checksum_field(ip6h->nexthdr, |
| 510 | txporthdr); |
| 511 | |
| 512 | if (unlikely(!checksum_field)) |
| 513 | return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT; |
| 514 | |
| 515 | checksum_value = ~ntohs(cksum_trailer->checksum_value); |
| 516 | ip_hdr_checksum = ~ntohs(ip_compute_csum(ip6h, |
| 517 | (int)(txporthdr - (void *)map_payload))); |
| 518 | ip_payload_checksum = rmnet_map_subtract_checksums |
| 519 | (checksum_value, ip_hdr_checksum); |
| 520 | |
| 521 | length = (ip6h->nexthdr == IPPROTO_UDP) ? |
| 522 | ntohs(((struct udphdr *)txporthdr)->len) : |
| 523 | ntohs(ip6h->payload_len); |
| 524 | pseudo_checksum = ~ntohs(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, |
| 525 | length, ip6h->nexthdr, 0)); |
| 526 | ip_pseudo_payload_checksum = rmnet_map_add_checksums( |
| 527 | ip_payload_checksum, pseudo_checksum); |
| 528 | |
| 529 | checksum_value_final = ~rmnet_map_subtract_checksums( |
| 530 | ip_pseudo_payload_checksum, ntohs(*checksum_field)); |
| 531 | |
| 532 | if (unlikely(checksum_value_final == 0)) { |
| 533 | switch (ip6h->nexthdr) { |
| 534 | case IPPROTO_UDP: |
| 535 | /* RFC 2460 section 8.1 */ |
| 536 | LOGD("DL6 One's complement rule for UDP checksum 0"); |
| 537 | checksum_value_final = ~checksum_value_final; |
| 538 | break; |
| 539 | |
| 540 | case IPPROTO_TCP: |
| 541 | if (*checksum_field == 0xFFFF) { |
| 542 | LOGD( |
| 543 | "DL6 Non-RFC compliant TCP checksum found"); |
| 544 | checksum_value_final = ~checksum_value_final; |
| 545 | } |
| 546 | break; |
| 547 | } |
| 548 | } |
| 549 | |
| 550 | LOGD( |
| 551 | "DL6 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X", |
| 552 | ~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field), |
| 553 | pseudo_checksum, checksum_value_final); |
| 554 | |
| 555 | if (checksum_value_final == ntohs(*checksum_field)) |
| 556 | return RMNET_MAP_CHECKSUM_OK; |
| 557 | else |
| 558 | return RMNET_MAP_CHECKSUM_VALIDATION_FAILED; |
| 559 | } |
| 560 | |
| 561 | /* rmnet_map_checksum_downlink_packet() - Validates checksum on |
| 562 | * a downlink packet |
| 563 | * @skb: Pointer to the packet's skb. |
| 564 | * |
| 565 | * Validates packet checksums. Function takes a pointer to |
| 566 | * the beginning of a buffer which contains the entire MAP |
| 567 | * frame: MAP header + IP payload + padding + checksum trailer. |
| 568 | * Currently, only IPv4 and IPv6 are supported along with |
| 569 | * TCP & UDP. Fragmented or tunneled packets are not supported. |
| 570 | * |
| 571 | * Return: |
| 572 | * - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded. |
| 573 | * - RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER: Skb buffer given is corrupted. |
| 574 | * - RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET: Valid flag is not set in the |
| 575 | * checksum trailer. |
| 576 | * - RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET: The packet is a fragment. |
| 577 | * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT: The transport header is |
| 578 | * not TCP/UDP. |
| 579 | * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header. |
| 580 | * - RMNET_MAP_CHECKSUM_VALIDATION_FAILED: In case the validation failed. |
| 581 | */ |
| 582 | int rmnet_map_checksum_downlink_packet(struct sk_buff *skb) |
| 583 | { |
| 584 | struct rmnet_map_dl_checksum_trailer_s *cksum_trailer; |
| 585 | unsigned int data_len; |
| 586 | unsigned char *map_payload; |
| 587 | unsigned char ip_version; |
| 588 | |
| 589 | data_len = RMNET_MAP_GET_LENGTH(skb); |
| 590 | |
| 591 | if (unlikely(skb->len < (sizeof(struct rmnet_map_header_s) + data_len + |
| 592 | sizeof(struct rmnet_map_dl_checksum_trailer_s)))) |
| 593 | return RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER; |
| 594 | |
| 595 | cksum_trailer = (struct rmnet_map_dl_checksum_trailer_s *) |
| 596 | (skb->data + data_len |
| 597 | + sizeof(struct rmnet_map_header_s)); |
| 598 | |
| 599 | if (unlikely(!ntohs(cksum_trailer->valid))) |
| 600 | return RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET; |
| 601 | |
| 602 | map_payload = (unsigned char *)(skb->data |
| 603 | + sizeof(struct rmnet_map_header_s)); |
| 604 | |
| 605 | ip_version = (*map_payload & 0xF0) >> 4; |
| 606 | if (ip_version == 0x04) |
| 607 | return rmnet_map_validate_ipv4_packet_checksum(map_payload, |
| 608 | cksum_trailer); |
| 609 | else if (ip_version == 0x06) |
| 610 | return rmnet_map_validate_ipv6_packet_checksum(map_payload, |
| 611 | cksum_trailer); |
| 612 | |
| 613 | return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION; |
| 614 | } |
| 615 | |
| 616 | static void rmnet_map_fill_ipv4_packet_ul_checksum_header |
| 617 | (void *iphdr, struct rmnet_map_ul_checksum_header_s *ul_header, |
| 618 | struct sk_buff *skb) |
| 619 | { |
| 620 | struct iphdr *ip4h = (struct iphdr *)iphdr; |
| 621 | unsigned short *hdr = (unsigned short *)ul_header; |
| 622 | |
| 623 | ul_header->checksum_start_offset = htons((unsigned short) |
| 624 | (skb_transport_header(skb) - (unsigned char *)iphdr)); |
| 625 | ul_header->checksum_insert_offset = skb->csum_offset; |
| 626 | ul_header->cks_en = 1; |
| 627 | if (ip4h->protocol == IPPROTO_UDP) |
| 628 | ul_header->udp_ip4_ind = 1; |
| 629 | else |
| 630 | ul_header->udp_ip4_ind = 0; |
| 631 | /* Changing checksum_insert_offset to network order */ |
| 632 | hdr++; |
| 633 | *hdr = htons(*hdr); |
| 634 | skb->ip_summed = CHECKSUM_NONE; |
| 635 | } |
| 636 | |
| 637 | static void rmnet_map_fill_ipv6_packet_ul_checksum_header |
| 638 | (void *iphdr, struct rmnet_map_ul_checksum_header_s *ul_header, |
| 639 | struct sk_buff *skb) |
| 640 | { |
| 641 | unsigned short *hdr = (unsigned short *)ul_header; |
| 642 | |
| 643 | ul_header->checksum_start_offset = htons((unsigned short) |
| 644 | (skb_transport_header(skb) - (unsigned char *)iphdr)); |
| 645 | ul_header->checksum_insert_offset = skb->csum_offset; |
| 646 | ul_header->cks_en = 1; |
| 647 | ul_header->udp_ip4_ind = 0; |
| 648 | /* Changing checksum_insert_offset to network order */ |
| 649 | hdr++; |
| 650 | *hdr = htons(*hdr); |
| 651 | skb->ip_summed = CHECKSUM_NONE; |
| 652 | } |
| 653 | |
| 654 | static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr) |
| 655 | { |
| 656 | struct iphdr *ip4h = (struct iphdr *)iphdr; |
| 657 | void *txporthdr; |
| 658 | u16 *csum; |
| 659 | |
| 660 | txporthdr = iphdr + ip4h->ihl * 4; |
| 661 | |
| 662 | if ((ip4h->protocol == IPPROTO_TCP) || |
| 663 | (ip4h->protocol == IPPROTO_UDP)) { |
| 664 | csum = (u16 *)rmnet_map_get_checksum_field(ip4h->protocol, |
| 665 | txporthdr); |
| 666 | *csum = ~(*csum); |
| 667 | } |
| 668 | } |
| 669 | |
| 670 | static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr) |
| 671 | { |
| 672 | struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; |
| 673 | void *txporthdr; |
| 674 | u16 *csum; |
| 675 | |
| 676 | txporthdr = ip6hdr + sizeof(struct ipv6hdr); |
| 677 | |
| 678 | if ((ip6h->nexthdr == IPPROTO_TCP) || (ip6h->nexthdr == IPPROTO_UDP)) { |
| 679 | csum = (u16 *)rmnet_map_get_checksum_field(ip6h->nexthdr, |
| 680 | txporthdr); |
| 681 | *csum = ~(*csum); |
| 682 | } |
| 683 | } |
| 684 | |
| 685 | /* rmnet_map_checksum_uplink_packet() - Generates UL checksum |
| 686 | * meta info header |
| 687 | * @skb: Pointer to the packet's skb. |
| 688 | * |
| 689 | * Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP |
| 690 | * packets that are supported for UL checksum offload. |
| 691 | * |
| 692 | * Return: |
| 693 | * - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded. |
| 694 | * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header. |
| 695 | * - RMNET_MAP_CHECKSUM_SW: Unsupported packet for UL checksum offload. |
| 696 | */ |
| 697 | int rmnet_map_checksum_uplink_packet(struct sk_buff *skb, |
| 698 | struct net_device *orig_dev, |
| 699 | u32 egress_data_format) |
| 700 | { |
| 701 | unsigned char ip_version; |
| 702 | struct rmnet_map_ul_checksum_header_s *ul_header; |
| 703 | void *iphdr; |
| 704 | int ret; |
| 705 | |
| 706 | ul_header = (struct rmnet_map_ul_checksum_header_s *) |
| 707 | skb_push(skb, sizeof(struct rmnet_map_ul_checksum_header_s)); |
| 708 | |
| 709 | if (unlikely(!(orig_dev->features & |
| 710 | (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) { |
| 711 | ret = RMNET_MAP_CHECKSUM_SW; |
| 712 | goto sw_checksum; |
| 713 | } |
| 714 | |
| 715 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| 716 | iphdr = (char *)ul_header + |
| 717 | sizeof(struct rmnet_map_ul_checksum_header_s); |
| 718 | ip_version = (*(char *)iphdr & 0xF0) >> 4; |
| 719 | if (ip_version == 0x04) { |
| 720 | rmnet_map_fill_ipv4_packet_ul_checksum_header |
| 721 | (iphdr, ul_header, skb); |
| 722 | if (egress_data_format & |
| 723 | RMNET_EGRESS_FORMAT_MAP_CKSUMV4) |
| 724 | rmnet_map_complement_ipv4_txporthdr_csum_field( |
| 725 | iphdr); |
| 726 | ret = RMNET_MAP_CHECKSUM_OK; |
| 727 | goto done; |
| 728 | } else if (ip_version == 0x06) { |
| 729 | rmnet_map_fill_ipv6_packet_ul_checksum_header |
| 730 | (iphdr, ul_header, skb); |
| 731 | if (egress_data_format & |
| 732 | RMNET_EGRESS_FORMAT_MAP_CKSUMV4) |
| 733 | rmnet_map_complement_ipv6_txporthdr_csum_field( |
| 734 | iphdr); |
| 735 | ret = RMNET_MAP_CHECKSUM_OK; |
| 736 | goto done; |
| 737 | } else { |
| 738 | ret = RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION; |
| 739 | goto sw_checksum; |
| 740 | } |
| 741 | } else { |
| 742 | ret = RMNET_MAP_CHECKSUM_SW; |
| 743 | goto sw_checksum; |
| 744 | } |
| 745 | |
| 746 | sw_checksum: |
| 747 | ul_header->checksum_start_offset = 0; |
| 748 | ul_header->checksum_insert_offset = 0; |
| 749 | ul_header->cks_en = 0; |
| 750 | ul_header->udp_ip4_ind = 0; |
| 751 | done: |
| 752 | return ret; |
| 753 | } |
Subash Abhinov Kasiviswanathan | bf783ba | 2017-11-08 15:55:29 -0700 | [diff] [blame] | 754 | |
| 755 | int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset) |
| 756 | { |
| 757 | unsigned char *packet_start = skb->data + offset; |
| 758 | int is_icmp = 0; |
| 759 | |
| 760 | if ((skb->data[offset]) >> 4 == 0x04) { |
| 761 | struct iphdr *ip4h = (struct iphdr *)(packet_start); |
| 762 | |
| 763 | if (ip4h->protocol == IPPROTO_ICMP) |
| 764 | is_icmp = 1; |
Subash Abhinov Kasiviswanathan | b7500cc | 2017-11-09 17:32:52 -0700 | [diff] [blame] | 765 | } else if ((skb->data[offset]) >> 4 == 0x06) { |
Subash Abhinov Kasiviswanathan | bf783ba | 2017-11-08 15:55:29 -0700 | [diff] [blame] | 766 | struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start); |
| 767 | |
Subash Abhinov Kasiviswanathan | b7500cc | 2017-11-09 17:32:52 -0700 | [diff] [blame] | 768 | if (ip6h->nexthdr == IPPROTO_ICMPV6) { |
| 769 | is_icmp = 1; |
| 770 | } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) { |
Subash Abhinov Kasiviswanathan | bf783ba | 2017-11-08 15:55:29 -0700 | [diff] [blame] | 771 | struct frag_hdr *frag; |
| 772 | |
| 773 | frag = (struct frag_hdr *)(packet_start |
| 774 | + sizeof(struct ipv6hdr)); |
| 775 | if (frag->nexthdr == IPPROTO_ICMPV6) |
| 776 | is_icmp = 1; |
Subash Abhinov Kasiviswanathan | bf783ba | 2017-11-08 15:55:29 -0700 | [diff] [blame] | 777 | } |
| 778 | } |
| 779 | |
| 780 | return is_icmp; |
| 781 | } |