blob: 72f3c3bd6a0326255e3995298fb10bcace589958 [file] [log] [blame]
Subash Abhinov Kasiviswanathan5d8d66c2016-12-05 12:26:41 -07001/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 *
13 * RMNET Data virtual network driver
14 */
15
16#include <linux/types.h>
17#include <linux/rmnet_data.h>
18#include <linux/msm_rmnet.h>
19#include <linux/etherdevice.h>
20#include <linux/if_arp.h>
21#include <linux/spinlock.h>
22#include <net/pkt_sched.h>
23#include <linux/atomic.h>
24#include <linux/net_map.h>
25#include "rmnet_data_config.h"
26#include "rmnet_data_handlers.h"
27#include "rmnet_data_private.h"
28#include "rmnet_map.h"
29#include "rmnet_data_vnd.h"
30#include "rmnet_data_stats.h"
31#include "rmnet_data_trace.h"
32
33RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_VND);
34
35#define RMNET_MAP_FLOW_NUM_TC_HANDLE 3
36#define RMNET_VND_UF_ACTION_ADD 0
37#define RMNET_VND_UF_ACTION_DEL 1
38enum {
39 RMNET_VND_UPDATE_FLOW_OK,
40 RMNET_VND_UPDATE_FLOW_NO_ACTION,
41 RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM,
42 RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT
43};
44
45struct net_device *rmnet_devices[RMNET_DATA_MAX_VND];
46
47struct rmnet_map_flow_mapping_s {
48 struct list_head list;
49 u32 map_flow_id;
50 u32 tc_flow_valid[RMNET_MAP_FLOW_NUM_TC_HANDLE];
51 u32 tc_flow_id[RMNET_MAP_FLOW_NUM_TC_HANDLE];
52 atomic_t v4_seq;
53 atomic_t v6_seq;
54};
55
56struct rmnet_vnd_private_s {
57 u32 qos_version;
58 struct rmnet_logical_ep_conf_s local_ep;
59
60 rwlock_t flow_map_lock;
61 struct list_head flow_head;
62 struct rmnet_map_flow_mapping_s root_flow;
63};
64
65#define RMNET_VND_FC_QUEUED 0
66#define RMNET_VND_FC_NOT_ENABLED 1
67#define RMNET_VND_FC_KMALLOC_ERR 2
68
69/* Helper Functions */
70
71/* rmnet_vnd_add_qos_header() - Adds QoS header to front of skb->data
72 * @skb: Socket buffer ("packet") to modify
73 * @dev: Egress interface
74 *
75 * Does not check for sufficient headroom! Caller must make sure there is enough
76 * headroom.
77 */
78static void rmnet_vnd_add_qos_header(struct sk_buff *skb,
79 struct net_device *dev,
80 uint32_t qos_version)
81{
82 struct QMI_QOS_HDR_S *qmih;
83 struct qmi_qos_hdr8_s *qmi8h;
84
85 if (qos_version & RMNET_IOCTL_QOS_MODE_6) {
86 qmih = (struct QMI_QOS_HDR_S *)
87 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
88 qmih->version = 1;
89 qmih->flags = 0;
90 qmih->flow_id = skb->mark;
91 } else if (qos_version & RMNET_IOCTL_QOS_MODE_8) {
92 qmi8h = (struct qmi_qos_hdr8_s *)
93 skb_push(skb, sizeof(struct qmi_qos_hdr8_s));
94 /* Flags are 0 always */
95 qmi8h->hdr.version = 0;
96 qmi8h->hdr.flags = 0;
97 memset(qmi8h->reserved, 0, sizeof(qmi8h->reserved));
98 qmi8h->hdr.flow_id = skb->mark;
99 } else {
100 LOGD("%s(): Bad QoS version configured\n", __func__);
101 }
102}
103
104/* RX/TX Fixup */
105
106/* rmnet_vnd_rx_fixup() - Virtual Network Device receive fixup hook
107 * @skb: Socket buffer ("packet") to modify
108 * @dev: Virtual network device
109 *
110 * Additional VND specific packet processing for ingress packets
111 *
112 * Return:
113 * - RX_HANDLER_PASS if packet should continue to process in stack
114 * - RX_HANDLER_CONSUMED if packet should not be processed in stack
115 *
116 */
117int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
118{
119 if (unlikely(!dev || !skb))
120 return RX_HANDLER_CONSUMED;
121
122 dev->stats.rx_packets++;
123 dev->stats.rx_bytes += skb->len;
124
125 return RX_HANDLER_PASS;
126}
127
128/* rmnet_vnd_tx_fixup() - Virtual Network Device transmic fixup hook
129 * @skb: Socket buffer ("packet") to modify
130 * @dev: Virtual network device
131 *
132 * Additional VND specific packet processing for egress packets
133 *
134 * Return:
135 * - RX_HANDLER_PASS if packet should continue to be transmitted
136 * - RX_HANDLER_CONSUMED if packet should not be transmitted by stack
137 */
138int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
139{
140 struct rmnet_vnd_private_s *dev_conf;
141
142 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
143
144 if (unlikely(!dev || !skb))
145 return RX_HANDLER_CONSUMED;
146
147 dev->stats.tx_packets++;
148 dev->stats.tx_bytes += skb->len;
149
150 return RX_HANDLER_PASS;
151}
152
153/* Network Device Operations */
154
155/* rmnet_vnd_start_xmit() - Transmit NDO callback
156 * @skb: Socket buffer ("packet") being sent from network stack
157 * @dev: Virtual Network Device
158 *
159 * Standard network driver operations hook to transmit packets on virtual
160 * network device. Called by network stack. Packet is not transmitted directly
161 * from here; instead it is given to the rmnet egress handler.
162 *
163 * Return:
164 * - NETDEV_TX_OK under all cirumstances (cannot block/fail)
165 */
166static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
167 struct net_device *dev)
168{
169 struct rmnet_vnd_private_s *dev_conf;
170
171 trace_rmnet_vnd_start_xmit(skb);
172 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
173 if (dev_conf->local_ep.egress_dev) {
174 /* QoS header should come after MAP header */
175 if (dev_conf->qos_version)
176 rmnet_vnd_add_qos_header(skb,
177 dev,
178 dev_conf->qos_version);
Subash Abhinov Kasiviswanathan6a2d1dd2016-11-14 12:40:34 -0700179 skb_orphan(skb);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600180 rmnet_egress_handler(skb, &dev_conf->local_ep);
181 } else {
182 dev->stats.tx_dropped++;
183 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_VND_NO_EGRESS);
184 }
185 return NETDEV_TX_OK;
186}
187
188/* rmnet_vnd_change_mtu() - Change MTU NDO callback
189 * @dev: Virtual network device
190 * @new_mtu: New MTU value to set (in bytes)
191 *
192 * Standard network driver operations hook to set the MTU. Called by kernel to
193 * set the device MTU. Checks if desired MTU is less than zero or greater than
194 * RMNET_DATA_MAX_PACKET_SIZE;
195 *
196 * Return:
197 * - 0 if successful
198 * - -EINVAL if new_mtu is out of range
199 */
200static int rmnet_vnd_change_mtu(struct net_device *dev, int new_mtu)
201{
202 if (new_mtu < 0 || new_mtu > RMNET_DATA_MAX_PACKET_SIZE)
203 return -EINVAL;
204
205 dev->mtu = new_mtu;
206 return 0;
207}
208
209#ifdef CONFIG_RMNET_DATA_FC
210static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
211 struct ifreq *ifr,
212 int cmd)
213{
214 struct rmnet_vnd_private_s *dev_conf;
215 int rc, qdisc_len = 0;
216 struct rmnet_ioctl_data_s ioctl_data;
217
218 rc = 0;
219 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
220
221 switch (cmd) {
222 case RMNET_IOCTL_SET_QOS_ENABLE:
223 LOGM("RMNET_IOCTL_SET_QOS_ENABLE on %s", dev->name);
224 if (!dev_conf->qos_version)
225 dev_conf->qos_version = RMNET_IOCTL_QOS_MODE_6;
226 break;
227
228 case RMNET_IOCTL_SET_QOS_DISABLE:
229 LOGM("RMNET_IOCTL_SET_QOS_DISABLE on %s", dev->name);
230 dev_conf->qos_version = 0;
231 break;
232
233 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
234 LOGM("RMNET_IOCTL_GET_QOS on %s", dev->name);
235 ioctl_data.u.operation_mode = (dev_conf->qos_version ==
236 RMNET_IOCTL_QOS_MODE_6);
237 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
238 sizeof(struct rmnet_ioctl_data_s)))
239 rc = -EFAULT;
240 break;
241
242 case RMNET_IOCTL_FLOW_ENABLE:
243 LOGL("RMNET_IOCTL_FLOW_ENABLE on %s", dev->name);
244 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
245 sizeof(struct rmnet_ioctl_data_s))) {
246 rc = -EFAULT;
247 break;
248 }
249 qdisc_len = tc_qdisc_flow_control(dev,
250 ioctl_data.u.tcm_handle, 1);
251 trace_rmnet_fc_qmi(ioctl_data.u.tcm_handle, qdisc_len, 1);
252 break;
253
254 case RMNET_IOCTL_FLOW_DISABLE:
255 LOGL("RMNET_IOCTL_FLOW_DISABLE on %s", dev->name);
256 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
257 sizeof(struct rmnet_ioctl_data_s))) {
258 rc = -EFAULT;
259 break;
260 }
261 qdisc_len = tc_qdisc_flow_control(dev,
262 ioctl_data.u.tcm_handle, 0);
263 trace_rmnet_fc_qmi(ioctl_data.u.tcm_handle, qdisc_len, 0);
264 break;
265
266 default:
267 rc = -EINVAL;
268 }
269
270 return rc;
271}
272
273struct rmnet_vnd_fc_work {
274 struct work_struct work;
275 struct net_device *dev;
276 u32 tc_handle;
277 int enable;
278};
279
280static void _rmnet_vnd_wq_flow_control(struct work_struct *work)
281{
282 struct rmnet_vnd_fc_work *fcwork;
283 int qdisc_len = 0;
284
285 fcwork = (struct rmnet_vnd_fc_work *)work;
286
287 rtnl_lock();
288 qdisc_len = tc_qdisc_flow_control(fcwork->dev, fcwork->tc_handle,
289 fcwork->enable);
290 trace_rmnet_fc_map(fcwork->tc_handle, qdisc_len, fcwork->enable);
291 rtnl_unlock();
292
293 LOGL("[%s] handle:%08X enable:%d",
294 fcwork->dev->name, fcwork->tc_handle, fcwork->enable);
295
296 kfree(work);
297}
298
299static int _rmnet_vnd_do_flow_control(struct net_device *dev,
300 u32 tc_handle,
301 int enable)
302{
303 struct rmnet_vnd_fc_work *fcwork;
304
305 fcwork = kmalloc(sizeof(*fcwork), GFP_ATOMIC);
306 if (!fcwork)
307 return RMNET_VND_FC_KMALLOC_ERR;
308 memset(fcwork, 0, sizeof(struct rmnet_vnd_fc_work));
309
310 INIT_WORK((struct work_struct *)fcwork, _rmnet_vnd_wq_flow_control);
311 fcwork->dev = dev;
312 fcwork->tc_handle = tc_handle;
313 fcwork->enable = enable;
314
315 schedule_work((struct work_struct *)fcwork);
316 return RMNET_VND_FC_QUEUED;
317}
318#else
319static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
320 struct ifreq *ifr,
321 int cmd)
322{
323 return -EINVAL;
324}
325
326static inline int _rmnet_vnd_do_flow_control(struct net_device *dev,
327 u32 tc_handle,
328 int enable)
329{
330 LOGD("[%s] called with no QoS support", dev->name);
331 return RMNET_VND_FC_NOT_ENABLED;
332}
333#endif /* CONFIG_RMNET_DATA_FC */
334
335static int rmnet_vnd_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
336{
337 struct rmnet_vnd_private_s *dev_conf;
338 struct rmnet_ioctl_extended_s ext_cmd;
339 int rc = 0;
340
341 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
342
343 rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data,
344 sizeof(struct rmnet_ioctl_extended_s));
345 if (rc) {
346 LOGM("%s(): copy_from_user() failed\n", __func__);
347 return rc;
348 }
349
350 switch (ext_cmd.extended_ioctl) {
351 case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
352 ext_cmd.u.data = 0;
353 break;
354
355 case RMNET_IOCTL_GET_DRIVER_NAME:
356 strlcpy(ext_cmd.u.if_name, "rmnet_data",
357 sizeof(ext_cmd.u.if_name));
358 break;
359
360 case RMNET_IOCTL_GET_SUPPORTED_QOS_MODES:
361 ext_cmd.u.data = RMNET_IOCTL_QOS_MODE_6
362 | RMNET_IOCTL_QOS_MODE_8;
363 break;
364
365 case RMNET_IOCTL_GET_QOS_VERSION:
366 ext_cmd.u.data = dev_conf->qos_version;
367 break;
368
369 case RMNET_IOCTL_SET_QOS_VERSION:
370 if (ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_6 ||
371 ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_8 ||
372 ext_cmd.u.data == 0) {
373 dev_conf->qos_version = ext_cmd.u.data;
374 } else {
375 rc = -EINVAL;
376 goto done;
377 }
378 break;
379
380 default:
381 rc = -EINVAL;
382 goto done;
383 }
384
385 rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd,
386 sizeof(struct rmnet_ioctl_extended_s));
387 if (rc)
388 LOGM("%s(): copy_to_user() failed\n", __func__);
389
390done:
391 return rc;
392}
393
394/* rmnet_vnd_ioctl() - IOCTL NDO callback
395 * @dev: Virtual network device
396 * @ifreq: User data
397 * @cmd: IOCTL command value
398 *
399 * Standard network driver operations hook to process IOCTLs. Called by kernel
400 * to process non-stanard IOCTLs for device
401 *
402 * Return:
403 * - 0 if successful
404 * - -EINVAL if unknown IOCTL
405 */
406static int rmnet_vnd_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
407{
408 struct rmnet_vnd_private_s *dev_conf;
409 int rc;
410 struct rmnet_ioctl_data_s ioctl_data;
411
412 rc = 0;
413 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
414
415 rc = _rmnet_vnd_do_qos_ioctl(dev, ifr, cmd);
416 if (rc != -EINVAL)
417 return rc;
418 rc = 0; /* Reset rc as it may contain -EINVAL from above */
419
420 switch (cmd) {
421 case RMNET_IOCTL_OPEN: /* Do nothing. Support legacy behavior */
422 LOGM("RMNET_IOCTL_OPEN on %s (ignored)", dev->name);
423 break;
424
425 case RMNET_IOCTL_CLOSE: /* Do nothing. Support legacy behavior */
426 LOGM("RMNET_IOCTL_CLOSE on %s (ignored)", dev->name);
427 break;
428
429 case RMNET_IOCTL_SET_LLP_ETHERNET:
430 LOGM("RMNET_IOCTL_SET_LLP_ETHERNET on %s (no support)",
431 dev->name);
432 rc = -EINVAL;
433 break;
434
435 case RMNET_IOCTL_SET_LLP_IP: /* Do nothing. Support legacy behavior */
436 LOGM("RMNET_IOCTL_SET_LLP_IP on %s (ignored)", dev->name);
437 break;
438
439 case RMNET_IOCTL_GET_LLP: /* Always return IP mode */
440 LOGM("RMNET_IOCTL_GET_LLP on %s", dev->name);
441 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
442 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
443 sizeof(struct rmnet_ioctl_data_s)))
444 rc = -EFAULT;
445 break;
446
447 case RMNET_IOCTL_EXTENDED:
448 rc = rmnet_vnd_ioctl_extended(dev, ifr);
449 break;
450
451 default:
452 LOGM("Unknown IOCTL 0x%08X", cmd);
453 rc = -EINVAL;
454 }
455
456 return rc;
457}
458
459static const struct net_device_ops rmnet_data_vnd_ops = {
460 .ndo_init = 0,
461 .ndo_start_xmit = rmnet_vnd_start_xmit,
462 .ndo_do_ioctl = rmnet_vnd_ioctl,
463 .ndo_change_mtu = rmnet_vnd_change_mtu,
464 .ndo_set_mac_address = 0,
465 .ndo_validate_addr = 0,
466};
467
468/* rmnet_vnd_setup() - net_device initialization callback
469 * @dev: Virtual network device
470 *
471 * Called by kernel whenever a new rmnet_data<n> device is created. Sets MTU,
472 * flags, ARP type, needed headroom, etc...
473 */
474static void rmnet_vnd_setup(struct net_device *dev)
475{
476 struct rmnet_vnd_private_s *dev_conf;
477
478 LOGM("Setting up device %s", dev->name);
479
480 /* Clear out private data */
481 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
482 memset(dev_conf, 0, sizeof(struct rmnet_vnd_private_s));
483
484 dev->netdev_ops = &rmnet_data_vnd_ops;
485 dev->mtu = RMNET_DATA_DFLT_PACKET_SIZE;
486 dev->needed_headroom = RMNET_DATA_NEEDED_HEADROOM;
487 random_ether_addr(dev->dev_addr);
488 dev->tx_queue_len = RMNET_DATA_TX_QUEUE_LEN;
489
490 /* Raw IP mode */
491 dev->header_ops = 0; /* No header */
492 dev->type = ARPHRD_RAWIP;
493 dev->hard_header_len = 0;
494 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
495
496 /* Flow control */
497 rwlock_init(&dev_conf->flow_map_lock);
498 INIT_LIST_HEAD(&dev_conf->flow_head);
499}
500
Subash Abhinov Kasiviswanathan5d8d66c2016-12-05 12:26:41 -0700501/* rmnet_vnd_setup() - net_device initialization helper function
502 * @dev: Virtual network device
503 *
504 * Called during device initialization. Disables GRO.
505 */
506static void rmnet_vnd_disable_offload(struct net_device *dev)
507{
508 dev->wanted_features &= ~NETIF_F_GRO;
509 __netdev_update_features(dev);
510}
511
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600512/* Exposed API */
513
514/* rmnet_vnd_exit() - Shutdown cleanup hook
515 *
516 * Called by RmNet main on module unload. Cleans up data structures and
517 * unregisters/frees net_devices.
518 */
519void rmnet_vnd_exit(void)
520{
521 int i;
522
523 for (i = 0; i < RMNET_DATA_MAX_VND; i++)
524 if (rmnet_devices[i]) {
525 unregister_netdev(rmnet_devices[i]);
526 free_netdev(rmnet_devices[i]);
527 }
528}
529
530/* rmnet_vnd_init() - Init hook
531 *
532 * Called by RmNet main on module load. Initializes data structures
533 */
534int rmnet_vnd_init(void)
535{
536 memset(rmnet_devices, 0,
537 sizeof(struct net_device *) * RMNET_DATA_MAX_VND);
538 return 0;
539}
540
541/* rmnet_vnd_create_dev() - Create a new virtual network device node.
542 * @id: Virtual device node id
543 * @new_device: Pointer to newly created device node
544 * @prefix: Device name prefix
545 *
546 * Allocates structures for new virtual network devices. Sets the name of the
547 * new device and registers it with the network stack. Device will appear in
548 * ifconfig list after this is called. If the prefix is null, then
549 * RMNET_DATA_DEV_NAME_STR will be assumed.
550 *
551 * Return:
552 * - 0 if successful
553 * - RMNET_CONFIG_BAD_ARGUMENTS if id is out of range or prefix is too long
554 * - RMNET_CONFIG_DEVICE_IN_USE if id already in use
555 * - RMNET_CONFIG_NOMEM if net_device allocation failed
556 * - RMNET_CONFIG_UNKNOWN_ERROR if register_netdevice() fails
557 */
558int rmnet_vnd_create_dev(int id, struct net_device **new_device,
Subash Abhinov Kasiviswanathan58598632017-02-23 18:24:42 -0700559 const char *prefix, int use_name)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600560{
561 struct net_device *dev;
562 char dev_prefix[IFNAMSIZ];
563 int p, rc = 0;
564
565 if (id < 0 || id >= RMNET_DATA_MAX_VND) {
566 *new_device = 0;
567 return RMNET_CONFIG_BAD_ARGUMENTS;
568 }
569
570 if (rmnet_devices[id] != 0) {
571 *new_device = 0;
572 return RMNET_CONFIG_DEVICE_IN_USE;
573 }
574
Subash Abhinov Kasiviswanathan58598632017-02-23 18:24:42 -0700575 if (!prefix && !use_name)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600576 p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d",
577 RMNET_DATA_DEV_NAME_STR);
Subash Abhinov Kasiviswanathan58598632017-02-23 18:24:42 -0700578 else if (prefix && use_name)
579 p = scnprintf(dev_prefix, IFNAMSIZ, "%s", prefix);
580 else if (prefix && !use_name)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600581 p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d", prefix);
Subash Abhinov Kasiviswanathan58598632017-02-23 18:24:42 -0700582 else
583 return RMNET_CONFIG_BAD_ARGUMENTS;
584
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600585 if (p >= (IFNAMSIZ - 1)) {
586 LOGE("Specified prefix longer than IFNAMSIZ");
587 return RMNET_CONFIG_BAD_ARGUMENTS;
588 }
589
590 dev = alloc_netdev(sizeof(struct rmnet_vnd_private_s),
591 dev_prefix,
Subash Abhinov Kasiviswanathan58598632017-02-23 18:24:42 -0700592 use_name ? NET_NAME_UNKNOWN : NET_NAME_ENUM,
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600593 rmnet_vnd_setup);
594 if (!dev) {
595 LOGE("Failed to to allocate netdev for id %d", id);
596 *new_device = 0;
597 return RMNET_CONFIG_NOMEM;
598 }
599
600 if (!prefix) {
601 /* Configuring DL checksum offload on rmnet_data interfaces */
602 dev->hw_features = NETIF_F_RXCSUM;
603 /* Configuring UL checksum offload on rmnet_data interfaces */
604 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
605 /* Configuring GRO on rmnet_data interfaces */
606 dev->hw_features |= NETIF_F_GRO;
607 /* Configuring Scatter-Gather on rmnet_data interfaces */
608 dev->hw_features |= NETIF_F_SG;
609 /* Configuring GSO on rmnet_data interfaces */
610 dev->hw_features |= NETIF_F_GSO;
611 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
612 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
613 }
614
615 rc = register_netdevice(dev);
616 if (rc != 0) {
617 LOGE("Failed to to register netdev [%s]", dev->name);
618 free_netdev(dev);
619 *new_device = 0;
620 rc = RMNET_CONFIG_UNKNOWN_ERROR;
621 } else {
622 rmnet_devices[id] = dev;
623 *new_device = dev;
624 LOGM("Registered device %s", dev->name);
625 }
626
Subash Abhinov Kasiviswanathan5d8d66c2016-12-05 12:26:41 -0700627 rmnet_vnd_disable_offload(dev);
628
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600629 return rc;
630}
631
632/* rmnet_vnd_free_dev() - free a virtual network device node.
633 * @id: Virtual device node id
634 *
635 * Unregisters the virtual network device node and frees it.
636 * unregister_netdev locks the rtnl mutex, so the mutex must not be locked
637 * by the caller of the function. unregister_netdev enqueues the request to
638 * unregister the device into a TODO queue. The requests in the TODO queue
639 * are only done after rtnl mutex is unlocked, therefore free_netdev has to
640 * called after unlocking rtnl mutex.
641 *
642 * Return:
643 * - 0 if successful
644 * - RMNET_CONFIG_NO_SUCH_DEVICE if id is invalid or not in range
645 * - RMNET_CONFIG_DEVICE_IN_USE if device has logical ep that wasn't unset
646 */
647int rmnet_vnd_free_dev(int id)
648{
649 struct rmnet_logical_ep_conf_s *epconfig_l;
650 struct net_device *dev;
651
652 rtnl_lock();
653 if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
654 rtnl_unlock();
655 LOGM("Invalid id [%d]", id);
656 return RMNET_CONFIG_NO_SUCH_DEVICE;
657 }
658
659 epconfig_l = rmnet_vnd_get_le_config(rmnet_devices[id]);
660 if (epconfig_l && epconfig_l->refcount) {
661 rtnl_unlock();
662 return RMNET_CONFIG_DEVICE_IN_USE;
663 }
664
665 dev = rmnet_devices[id];
666 rmnet_devices[id] = 0;
667 rtnl_unlock();
668
669 if (dev) {
670 unregister_netdev(dev);
671 free_netdev(dev);
672 return 0;
673 } else {
674 return RMNET_CONFIG_NO_SUCH_DEVICE;
675 }
676}
677
678/* rmnet_vnd_get_name() - Gets the string name of a VND based on ID
679 * @id: Virtual device node id
680 * @name: Buffer to store name of virtual device node
681 * @name_len: Length of name buffer
682 *
683 * Copies the name of the virtual device node into the users buffer. Will throw
684 * an error if the buffer is null, or too small to hold the device name.
685 *
686 * Return:
687 * - 0 if successful
688 * - -EINVAL if name is null
689 * - -EINVAL if id is invalid or not in range
690 * - -EINVAL if name is too small to hold things
691 */
692int rmnet_vnd_get_name(int id, char *name, int name_len)
693{
694 int p;
695
696 if (!name) {
697 LOGM("%s", "Bad arguments; name buffer null");
698 return -EINVAL;
699 }
700
701 if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
702 LOGM("Invalid id [%d]", id);
703 return -EINVAL;
704 }
705
706 p = strlcpy(name, rmnet_devices[id]->name, name_len);
707 if (p >= name_len) {
708 LOGM("Buffer to small (%d) to fit device name", name_len);
709 return -EINVAL;
710 }
711 LOGL("Found mapping [%d]->\"%s\"", id, name);
712
713 return 0;
714}
715
716/* rmnet_vnd_is_vnd() - Determine if net_device is RmNet owned virtual devices
717 * @dev: Network device to test
718 *
719 * Searches through list of known RmNet virtual devices. This function is O(n)
720 * and should not be used in the data path.
721 *
722 * Return:
723 * - 0 if device is not RmNet virtual device
724 * - 1 if device is RmNet virtual device
725 */
726int rmnet_vnd_is_vnd(struct net_device *dev)
727{
728 /* This is not an efficient search, but, this will only be called in
729 * a configuration context, and the list is small.
730 */
731 int i;
732
733 if (!dev)
734 return 0;
735
736 for (i = 0; i < RMNET_DATA_MAX_VND; i++)
737 if (dev == rmnet_devices[i])
738 return i + 1;
739
740 return 0;
741}
742
743/* rmnet_vnd_get_le_config() - Get the logical endpoint configuration
744 * @dev: Virtual device node
745 *
746 * Gets the logical endpoint configuration for a RmNet virtual network device
747 * node. Caller should confirm that devices is a RmNet VND before calling.
748 *
749 * Return:
750 * - Pointer to logical endpoint configuration structure
751 * - 0 (null) if dev is null
752 */
753struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev)
754{
755 struct rmnet_vnd_private_s *dev_conf;
756
757 if (!dev)
758 return 0;
759
760 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
761 if (!dev_conf)
762 return 0;
763
764 return &dev_conf->local_ep;
765}
766
767/* _rmnet_vnd_get_flow_map() - Gets object representing a MAP flow handle
768 * @dev_conf: Private configuration structure for virtual network device
769 * @map_flow: MAP flow handle IF
770 *
771 * Loops through available flow mappings and compares the MAP flow handle.
772 * Returns when mapping is found.
773 *
774 * Return:
775 * - Null if no mapping was found
776 * - Pointer to mapping otherwise
777 */
778static struct rmnet_map_flow_mapping_s *_rmnet_vnd_get_flow_map
779 (struct rmnet_vnd_private_s *dev_conf,
780 u32 map_flow)
781{
782 struct list_head *p;
783 struct rmnet_map_flow_mapping_s *itm;
784
785 list_for_each(p, &dev_conf->flow_head) {
786 itm = list_entry(p, struct rmnet_map_flow_mapping_s, list);
787
788 if (unlikely(!itm))
789 return 0;
790
791 if (itm->map_flow_id == map_flow)
792 return itm;
793 }
794 return 0;
795}
796
797/* _rmnet_vnd_update_flow_map() - Add or remove individual TC flow handles
798 * @action: One of RMNET_VND_UF_ACTION_ADD / RMNET_VND_UF_ACTION_DEL
799 * @itm: Flow mapping object
800 * @map_flow: TC flow handle
801 *
802 * RMNET_VND_UF_ACTION_ADD:
803 * Will check for a free mapping slot in the mapping object. If one is found,
804 * valid for that slot will be set to 1 and the value will be set.
805 *
806 * RMNET_VND_UF_ACTION_DEL:
807 * Will check for matching tc handle. If found, valid for that slot will be
808 * set to 0 and the value will also be zeroed.
809 *
810 * Return:
811 * - RMNET_VND_UPDATE_FLOW_OK tc flow handle is added/removed ok
812 * - RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM if there are no more tc handles
813 * - RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT if flow mapping is now empty
814 * - RMNET_VND_UPDATE_FLOW_NO_ACTION if no action was taken
815 */
816static int _rmnet_vnd_update_flow_map(u8 action,
817 struct rmnet_map_flow_mapping_s *itm,
818 u32 tc_flow)
819{
820 int rc, i, j;
821
822 rc = RMNET_VND_UPDATE_FLOW_OK;
823
824 switch (action) {
825 case RMNET_VND_UF_ACTION_ADD:
826 rc = RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM;
827 for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
828 if (itm->tc_flow_valid[i] == 0) {
829 itm->tc_flow_valid[i] = 1;
830 itm->tc_flow_id[i] = tc_flow;
831 rc = RMNET_VND_UPDATE_FLOW_OK;
832 LOGD("{%pK}->tc_flow_id[%d]=%08X",
833 itm, i, tc_flow);
834 break;
835 }
836 }
837 break;
838
839 case RMNET_VND_UF_ACTION_DEL:
840 j = 0;
841 rc = RMNET_VND_UPDATE_FLOW_OK;
842 for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
843 if (itm->tc_flow_valid[i] == 1) {
844 if (itm->tc_flow_id[i] == tc_flow) {
845 itm->tc_flow_valid[i] = 0;
846 itm->tc_flow_id[i] = 0;
847 j++;
848 LOGD("{%pK}->tc_flow_id[%d]=0", itm, i);
849 }
850 } else {
851 j++;
852 }
853 }
854 if (j == RMNET_MAP_FLOW_NUM_TC_HANDLE)
855 rc = RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT;
856 break;
857
858 default:
859 rc = RMNET_VND_UPDATE_FLOW_NO_ACTION;
860 break;
861 }
862 return rc;
863}
864
865/* rmnet_vnd_add_tc_flow() - Add a MAP/TC flow handle mapping
866 * @id: Virtual network device ID
867 * @map_flow: MAP flow handle
868 * @tc_flow: TC flow handle
869 *
870 * Checkes for an existing flow mapping object corresponding to map_flow. If one
871 * is found, then it will try to add to the existing mapping object. Otherwise,
872 * a new mapping object is created.
873 *
874 * Return:
875 * - RMNET_CONFIG_OK if successful
876 * - RMNET_CONFIG_TC_HANDLE_FULL if there is no more room in the map object
877 * - RMNET_CONFIG_NOMEM failed to allocate a new map object
878 */
879int rmnet_vnd_add_tc_flow(u32 id, u32 map_flow, u32 tc_flow)
880{
881 struct rmnet_map_flow_mapping_s *itm;
882 struct net_device *dev;
883 struct rmnet_vnd_private_s *dev_conf;
884 int r;
885 unsigned long flags;
886
887 if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
888 LOGM("Invalid VND id [%d]", id);
889 return RMNET_CONFIG_NO_SUCH_DEVICE;
890 }
891
892 dev = rmnet_devices[id];
893 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
894
895 if (!dev_conf)
896 return RMNET_CONFIG_NO_SUCH_DEVICE;
897
898 write_lock_irqsave(&dev_conf->flow_map_lock, flags);
899 itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow);
900 if (itm) {
901 r = _rmnet_vnd_update_flow_map(RMNET_VND_UF_ACTION_ADD,
902 itm, tc_flow);
903 if (r != RMNET_VND_UPDATE_FLOW_OK) {
904 write_unlock_irqrestore(&dev_conf->flow_map_lock,
905 flags);
906 return RMNET_CONFIG_TC_HANDLE_FULL;
907 }
908 write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
909 return RMNET_CONFIG_OK;
910 }
911 write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
912
913 itm = kmalloc(sizeof(*itm), GFP_KERNEL);
914
915 if (!itm) {
916 LOGM("%s", "Failure allocating flow mapping");
917 return RMNET_CONFIG_NOMEM;
918 }
919 memset(itm, 0, sizeof(struct rmnet_map_flow_mapping_s));
920
921 itm->map_flow_id = map_flow;
922 itm->tc_flow_valid[0] = 1;
923 itm->tc_flow_id[0] = tc_flow;
924
925 /* How can we dynamically init these safely? Kernel only provides static
926 * initializers for atomic_t
927 */
928 itm->v4_seq.counter = 0; /* Init is broken: ATOMIC_INIT(0); */
929 itm->v6_seq.counter = 0; /* Init is broken: ATOMIC_INIT(0); */
930
931 write_lock_irqsave(&dev_conf->flow_map_lock, flags);
932 list_add(&itm->list, &dev_conf->flow_head);
933 write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
934
935 LOGD("Created flow mapping [%s][0x%08X][0x%08X]@%pK",
936 dev->name, itm->map_flow_id, itm->tc_flow_id[0], itm);
937
938 return RMNET_CONFIG_OK;
939}
940
941/* rmnet_vnd_del_tc_flow() - Delete a MAP/TC flow handle mapping
942 * @id: Virtual network device ID
943 * @map_flow: MAP flow handle
944 * @tc_flow: TC flow handle
945 *
946 * Checkes for an existing flow mapping object corresponding to map_flow. If one
947 * is found, then it will try to remove the existing tc_flow mapping. If the
948 * mapping object no longer contains any mappings, then it is freed. Otherwise
949 * the mapping object is left in the list
950 *
951 * Return:
952 * - RMNET_CONFIG_OK if successful or if there was no such tc_flow
953 * - RMNET_CONFIG_INVALID_REQUEST if there is no such map_flow
954 */
955int rmnet_vnd_del_tc_flow(u32 id, u32 map_flow, u32 tc_flow)
956{
957 struct rmnet_vnd_private_s *dev_conf;
958 struct net_device *dev;
959 struct rmnet_map_flow_mapping_s *itm;
960 int r;
961 unsigned long flags;
962 int rc = RMNET_CONFIG_OK;
963
964 if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
965 LOGM("Invalid VND id [%d]", id);
966 return RMNET_CONFIG_NO_SUCH_DEVICE;
967 }
968
969 dev = rmnet_devices[id];
970 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
971
972 if (!dev_conf)
973 return RMNET_CONFIG_NO_SUCH_DEVICE;
974
975 r = RMNET_VND_UPDATE_FLOW_NO_ACTION;
976 write_lock_irqsave(&dev_conf->flow_map_lock, flags);
977 itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow);
978 if (!itm) {
979 rc = RMNET_CONFIG_INVALID_REQUEST;
980 } else {
981 r = _rmnet_vnd_update_flow_map(RMNET_VND_UF_ACTION_DEL,
982 itm, tc_flow);
983 if (r == RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT)
984 list_del(&itm->list);
985 }
986 write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
987
988 if (r == RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT) {
989 if (itm)
990 LOGD("Removed flow mapping [%s][0x%08X]@%pK",
991 dev->name, itm->map_flow_id, itm);
992 kfree(itm);
993 }
994
995 return rc;
996}
997
998/* rmnet_vnd_do_flow_control() - Process flow control request
999 * @dev: Virtual network device node to do lookup on
1000 * @map_flow_id: Flow ID from MAP message
1001 * @v4_seq: pointer to IPv4 indication sequence number
1002 * @v6_seq: pointer to IPv6 indication sequence number
1003 * @enable: boolean to enable/disable flow.
1004 *
1005 * Return:
1006 * - 0 if successful
1007 * - 1 if no mapping is found
1008 * - 2 if dev is not RmNet virtual network device node
1009 */
1010int rmnet_vnd_do_flow_control(struct net_device *dev,
1011 u32 map_flow_id,
1012 u16 v4_seq,
1013 u16 v6_seq,
1014 int enable)
1015{
1016 struct rmnet_vnd_private_s *dev_conf;
1017 struct rmnet_map_flow_mapping_s *itm;
1018 int do_fc, error, i;
1019
1020 error = 0;
1021 do_fc = 0;
1022
1023 if (unlikely(!dev))
1024 return 2;
1025
1026 if (!rmnet_vnd_is_vnd(dev))
1027 return 2;
1028
1029 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
1030
1031 if (unlikely(!dev_conf))
1032 return 2;
1033
1034 read_lock(&dev_conf->flow_map_lock);
1035 if (map_flow_id == 0xFFFFFFFF) {
1036 itm = &dev_conf->root_flow;
1037 goto nolookup;
1038 }
1039
1040 itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow_id);
1041
1042 if (!itm) {
1043 LOGL("Got flow control request for unknown flow %08X",
1044 map_flow_id);
1045 goto fcdone;
1046 }
1047
1048nolookup:
1049 if (v4_seq == 0 || v4_seq >= atomic_read(&itm->v4_seq)) {
1050 atomic_set(&itm->v4_seq, v4_seq);
1051 if (map_flow_id == 0xFFFFFFFF) {
1052 LOGD("Setting VND TX queue state to %d", enable);
1053 /* Although we expect similar number of enable/disable
1054 * commands, optimize for the disable. That is more
1055 * latency sensitive than enable
1056 */
1057 if (unlikely(enable))
1058 netif_wake_queue(dev);
1059 else
1060 netif_stop_queue(dev);
1061 trace_rmnet_fc_map(0xFFFFFFFF, 0, enable);
1062 goto fcdone;
1063 }
1064 for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
1065 if (itm->tc_flow_valid[i] == 1) {
1066 LOGD("Found [%s][0x%08X][%d:0x%08X]",
1067 dev->name, itm->map_flow_id, i,
1068 itm->tc_flow_id[i]);
1069
1070 _rmnet_vnd_do_flow_control(dev,
1071 itm->tc_flow_id[i],
1072 enable);
1073 }
1074 }
1075 } else {
1076 LOGD("Internal seq(%hd) higher than called(%hd)",
1077 atomic_read(&itm->v4_seq), v4_seq);
1078 }
1079
1080fcdone:
1081 read_unlock(&dev_conf->flow_map_lock);
1082
1083 return error;
1084}
1085
1086/* rmnet_vnd_get_by_id() - Get VND by array index ID
1087 * @id: Virtual network deice id [0:RMNET_DATA_MAX_VND]
1088 *
1089 * Return:
1090 * - 0 if no device or ID out of range
1091 * - otherwise return pointer to VND net_device struct
1092 */
1093struct net_device *rmnet_vnd_get_by_id(int id)
1094{
1095 if (id < 0 || id >= RMNET_DATA_MAX_VND) {
1096 LOGE("Bug; VND ID out of bounds");
1097 return 0;
1098 }
1099 return rmnet_devices[id];
1100}