blob: 166b445dd7245b2f0f44ae08c0fde43eced833a0 [file] [log] [blame]
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -06001/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 *
13 * RMNET Data virtual network driver
14 */
15
16#include <linux/types.h>
17#include <linux/rmnet_data.h>
18#include <linux/msm_rmnet.h>
19#include <linux/etherdevice.h>
20#include <linux/if_arp.h>
21#include <linux/spinlock.h>
22#include <net/pkt_sched.h>
23#include <linux/atomic.h>
24#include <linux/net_map.h>
25#include "rmnet_data_config.h"
26#include "rmnet_data_handlers.h"
27#include "rmnet_data_private.h"
28#include "rmnet_map.h"
29#include "rmnet_data_vnd.h"
30#include "rmnet_data_stats.h"
31#include "rmnet_data_trace.h"
32
33RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_VND);
34
35#define RMNET_MAP_FLOW_NUM_TC_HANDLE 3
36#define RMNET_VND_UF_ACTION_ADD 0
37#define RMNET_VND_UF_ACTION_DEL 1
38enum {
39 RMNET_VND_UPDATE_FLOW_OK,
40 RMNET_VND_UPDATE_FLOW_NO_ACTION,
41 RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM,
42 RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT
43};
44
45struct net_device *rmnet_devices[RMNET_DATA_MAX_VND];
46
47struct rmnet_map_flow_mapping_s {
48 struct list_head list;
49 u32 map_flow_id;
50 u32 tc_flow_valid[RMNET_MAP_FLOW_NUM_TC_HANDLE];
51 u32 tc_flow_id[RMNET_MAP_FLOW_NUM_TC_HANDLE];
52 atomic_t v4_seq;
53 atomic_t v6_seq;
54};
55
56struct rmnet_vnd_private_s {
57 u32 qos_version;
58 struct rmnet_logical_ep_conf_s local_ep;
59
60 rwlock_t flow_map_lock;
61 struct list_head flow_head;
62 struct rmnet_map_flow_mapping_s root_flow;
63};
64
65#define RMNET_VND_FC_QUEUED 0
66#define RMNET_VND_FC_NOT_ENABLED 1
67#define RMNET_VND_FC_KMALLOC_ERR 2
68
69/* Helper Functions */
70
71/* rmnet_vnd_add_qos_header() - Adds QoS header to front of skb->data
72 * @skb: Socket buffer ("packet") to modify
73 * @dev: Egress interface
74 *
75 * Does not check for sufficient headroom! Caller must make sure there is enough
76 * headroom.
77 */
78static void rmnet_vnd_add_qos_header(struct sk_buff *skb,
79 struct net_device *dev,
80 uint32_t qos_version)
81{
82 struct QMI_QOS_HDR_S *qmih;
83 struct qmi_qos_hdr8_s *qmi8h;
84
85 if (qos_version & RMNET_IOCTL_QOS_MODE_6) {
86 qmih = (struct QMI_QOS_HDR_S *)
87 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
88 qmih->version = 1;
89 qmih->flags = 0;
90 qmih->flow_id = skb->mark;
91 } else if (qos_version & RMNET_IOCTL_QOS_MODE_8) {
92 qmi8h = (struct qmi_qos_hdr8_s *)
93 skb_push(skb, sizeof(struct qmi_qos_hdr8_s));
94 /* Flags are 0 always */
95 qmi8h->hdr.version = 0;
96 qmi8h->hdr.flags = 0;
97 memset(qmi8h->reserved, 0, sizeof(qmi8h->reserved));
98 qmi8h->hdr.flow_id = skb->mark;
99 } else {
100 LOGD("%s(): Bad QoS version configured\n", __func__);
101 }
102}
103
104/* RX/TX Fixup */
105
106/* rmnet_vnd_rx_fixup() - Virtual Network Device receive fixup hook
107 * @skb: Socket buffer ("packet") to modify
108 * @dev: Virtual network device
109 *
110 * Additional VND specific packet processing for ingress packets
111 *
112 * Return:
113 * - RX_HANDLER_PASS if packet should continue to process in stack
114 * - RX_HANDLER_CONSUMED if packet should not be processed in stack
115 *
116 */
117int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
118{
119 if (unlikely(!dev || !skb))
120 return RX_HANDLER_CONSUMED;
121
122 dev->stats.rx_packets++;
123 dev->stats.rx_bytes += skb->len;
124
125 return RX_HANDLER_PASS;
126}
127
128/* rmnet_vnd_tx_fixup() - Virtual Network Device transmic fixup hook
129 * @skb: Socket buffer ("packet") to modify
130 * @dev: Virtual network device
131 *
132 * Additional VND specific packet processing for egress packets
133 *
134 * Return:
135 * - RX_HANDLER_PASS if packet should continue to be transmitted
136 * - RX_HANDLER_CONSUMED if packet should not be transmitted by stack
137 */
138int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
139{
140 struct rmnet_vnd_private_s *dev_conf;
141
142 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
143
144 if (unlikely(!dev || !skb))
145 return RX_HANDLER_CONSUMED;
146
147 dev->stats.tx_packets++;
148 dev->stats.tx_bytes += skb->len;
149
150 return RX_HANDLER_PASS;
151}
152
153/* Network Device Operations */
154
155/* rmnet_vnd_start_xmit() - Transmit NDO callback
156 * @skb: Socket buffer ("packet") being sent from network stack
157 * @dev: Virtual Network Device
158 *
159 * Standard network driver operations hook to transmit packets on virtual
160 * network device. Called by network stack. Packet is not transmitted directly
161 * from here; instead it is given to the rmnet egress handler.
162 *
163 * Return:
164 * - NETDEV_TX_OK under all cirumstances (cannot block/fail)
165 */
166static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
167 struct net_device *dev)
168{
169 struct rmnet_vnd_private_s *dev_conf;
170
171 trace_rmnet_vnd_start_xmit(skb);
172 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
173 if (dev_conf->local_ep.egress_dev) {
174 /* QoS header should come after MAP header */
175 if (dev_conf->qos_version)
176 rmnet_vnd_add_qos_header(skb,
177 dev,
178 dev_conf->qos_version);
179 rmnet_egress_handler(skb, &dev_conf->local_ep);
180 } else {
181 dev->stats.tx_dropped++;
182 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_VND_NO_EGRESS);
183 }
184 return NETDEV_TX_OK;
185}
186
187/* rmnet_vnd_change_mtu() - Change MTU NDO callback
188 * @dev: Virtual network device
189 * @new_mtu: New MTU value to set (in bytes)
190 *
191 * Standard network driver operations hook to set the MTU. Called by kernel to
192 * set the device MTU. Checks if desired MTU is less than zero or greater than
193 * RMNET_DATA_MAX_PACKET_SIZE;
194 *
195 * Return:
196 * - 0 if successful
197 * - -EINVAL if new_mtu is out of range
198 */
199static int rmnet_vnd_change_mtu(struct net_device *dev, int new_mtu)
200{
201 if (new_mtu < 0 || new_mtu > RMNET_DATA_MAX_PACKET_SIZE)
202 return -EINVAL;
203
204 dev->mtu = new_mtu;
205 return 0;
206}
207
208#ifdef CONFIG_RMNET_DATA_FC
209static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
210 struct ifreq *ifr,
211 int cmd)
212{
213 struct rmnet_vnd_private_s *dev_conf;
214 int rc, qdisc_len = 0;
215 struct rmnet_ioctl_data_s ioctl_data;
216
217 rc = 0;
218 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
219
220 switch (cmd) {
221 case RMNET_IOCTL_SET_QOS_ENABLE:
222 LOGM("RMNET_IOCTL_SET_QOS_ENABLE on %s", dev->name);
223 if (!dev_conf->qos_version)
224 dev_conf->qos_version = RMNET_IOCTL_QOS_MODE_6;
225 break;
226
227 case RMNET_IOCTL_SET_QOS_DISABLE:
228 LOGM("RMNET_IOCTL_SET_QOS_DISABLE on %s", dev->name);
229 dev_conf->qos_version = 0;
230 break;
231
232 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
233 LOGM("RMNET_IOCTL_GET_QOS on %s", dev->name);
234 ioctl_data.u.operation_mode = (dev_conf->qos_version ==
235 RMNET_IOCTL_QOS_MODE_6);
236 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
237 sizeof(struct rmnet_ioctl_data_s)))
238 rc = -EFAULT;
239 break;
240
241 case RMNET_IOCTL_FLOW_ENABLE:
242 LOGL("RMNET_IOCTL_FLOW_ENABLE on %s", dev->name);
243 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
244 sizeof(struct rmnet_ioctl_data_s))) {
245 rc = -EFAULT;
246 break;
247 }
248 qdisc_len = tc_qdisc_flow_control(dev,
249 ioctl_data.u.tcm_handle, 1);
250 trace_rmnet_fc_qmi(ioctl_data.u.tcm_handle, qdisc_len, 1);
251 break;
252
253 case RMNET_IOCTL_FLOW_DISABLE:
254 LOGL("RMNET_IOCTL_FLOW_DISABLE on %s", dev->name);
255 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
256 sizeof(struct rmnet_ioctl_data_s))) {
257 rc = -EFAULT;
258 break;
259 }
260 qdisc_len = tc_qdisc_flow_control(dev,
261 ioctl_data.u.tcm_handle, 0);
262 trace_rmnet_fc_qmi(ioctl_data.u.tcm_handle, qdisc_len, 0);
263 break;
264
265 default:
266 rc = -EINVAL;
267 }
268
269 return rc;
270}
271
272struct rmnet_vnd_fc_work {
273 struct work_struct work;
274 struct net_device *dev;
275 u32 tc_handle;
276 int enable;
277};
278
279static void _rmnet_vnd_wq_flow_control(struct work_struct *work)
280{
281 struct rmnet_vnd_fc_work *fcwork;
282 int qdisc_len = 0;
283
284 fcwork = (struct rmnet_vnd_fc_work *)work;
285
286 rtnl_lock();
287 qdisc_len = tc_qdisc_flow_control(fcwork->dev, fcwork->tc_handle,
288 fcwork->enable);
289 trace_rmnet_fc_map(fcwork->tc_handle, qdisc_len, fcwork->enable);
290 rtnl_unlock();
291
292 LOGL("[%s] handle:%08X enable:%d",
293 fcwork->dev->name, fcwork->tc_handle, fcwork->enable);
294
295 kfree(work);
296}
297
298static int _rmnet_vnd_do_flow_control(struct net_device *dev,
299 u32 tc_handle,
300 int enable)
301{
302 struct rmnet_vnd_fc_work *fcwork;
303
304 fcwork = kmalloc(sizeof(*fcwork), GFP_ATOMIC);
305 if (!fcwork)
306 return RMNET_VND_FC_KMALLOC_ERR;
307 memset(fcwork, 0, sizeof(struct rmnet_vnd_fc_work));
308
309 INIT_WORK((struct work_struct *)fcwork, _rmnet_vnd_wq_flow_control);
310 fcwork->dev = dev;
311 fcwork->tc_handle = tc_handle;
312 fcwork->enable = enable;
313
314 schedule_work((struct work_struct *)fcwork);
315 return RMNET_VND_FC_QUEUED;
316}
317#else
318static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
319 struct ifreq *ifr,
320 int cmd)
321{
322 return -EINVAL;
323}
324
325static inline int _rmnet_vnd_do_flow_control(struct net_device *dev,
326 u32 tc_handle,
327 int enable)
328{
329 LOGD("[%s] called with no QoS support", dev->name);
330 return RMNET_VND_FC_NOT_ENABLED;
331}
332#endif /* CONFIG_RMNET_DATA_FC */
333
334static int rmnet_vnd_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
335{
336 struct rmnet_vnd_private_s *dev_conf;
337 struct rmnet_ioctl_extended_s ext_cmd;
338 int rc = 0;
339
340 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
341
342 rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data,
343 sizeof(struct rmnet_ioctl_extended_s));
344 if (rc) {
345 LOGM("%s(): copy_from_user() failed\n", __func__);
346 return rc;
347 }
348
349 switch (ext_cmd.extended_ioctl) {
350 case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
351 ext_cmd.u.data = 0;
352 break;
353
354 case RMNET_IOCTL_GET_DRIVER_NAME:
355 strlcpy(ext_cmd.u.if_name, "rmnet_data",
356 sizeof(ext_cmd.u.if_name));
357 break;
358
359 case RMNET_IOCTL_GET_SUPPORTED_QOS_MODES:
360 ext_cmd.u.data = RMNET_IOCTL_QOS_MODE_6
361 | RMNET_IOCTL_QOS_MODE_8;
362 break;
363
364 case RMNET_IOCTL_GET_QOS_VERSION:
365 ext_cmd.u.data = dev_conf->qos_version;
366 break;
367
368 case RMNET_IOCTL_SET_QOS_VERSION:
369 if (ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_6 ||
370 ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_8 ||
371 ext_cmd.u.data == 0) {
372 dev_conf->qos_version = ext_cmd.u.data;
373 } else {
374 rc = -EINVAL;
375 goto done;
376 }
377 break;
378
379 default:
380 rc = -EINVAL;
381 goto done;
382 }
383
384 rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd,
385 sizeof(struct rmnet_ioctl_extended_s));
386 if (rc)
387 LOGM("%s(): copy_to_user() failed\n", __func__);
388
389done:
390 return rc;
391}
392
393/* rmnet_vnd_ioctl() - IOCTL NDO callback
394 * @dev: Virtual network device
395 * @ifreq: User data
396 * @cmd: IOCTL command value
397 *
398 * Standard network driver operations hook to process IOCTLs. Called by kernel
399 * to process non-stanard IOCTLs for device
400 *
401 * Return:
402 * - 0 if successful
403 * - -EINVAL if unknown IOCTL
404 */
405static int rmnet_vnd_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
406{
407 struct rmnet_vnd_private_s *dev_conf;
408 int rc;
409 struct rmnet_ioctl_data_s ioctl_data;
410
411 rc = 0;
412 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
413
414 rc = _rmnet_vnd_do_qos_ioctl(dev, ifr, cmd);
415 if (rc != -EINVAL)
416 return rc;
417 rc = 0; /* Reset rc as it may contain -EINVAL from above */
418
419 switch (cmd) {
420 case RMNET_IOCTL_OPEN: /* Do nothing. Support legacy behavior */
421 LOGM("RMNET_IOCTL_OPEN on %s (ignored)", dev->name);
422 break;
423
424 case RMNET_IOCTL_CLOSE: /* Do nothing. Support legacy behavior */
425 LOGM("RMNET_IOCTL_CLOSE on %s (ignored)", dev->name);
426 break;
427
428 case RMNET_IOCTL_SET_LLP_ETHERNET:
429 LOGM("RMNET_IOCTL_SET_LLP_ETHERNET on %s (no support)",
430 dev->name);
431 rc = -EINVAL;
432 break;
433
434 case RMNET_IOCTL_SET_LLP_IP: /* Do nothing. Support legacy behavior */
435 LOGM("RMNET_IOCTL_SET_LLP_IP on %s (ignored)", dev->name);
436 break;
437
438 case RMNET_IOCTL_GET_LLP: /* Always return IP mode */
439 LOGM("RMNET_IOCTL_GET_LLP on %s", dev->name);
440 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
441 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
442 sizeof(struct rmnet_ioctl_data_s)))
443 rc = -EFAULT;
444 break;
445
446 case RMNET_IOCTL_EXTENDED:
447 rc = rmnet_vnd_ioctl_extended(dev, ifr);
448 break;
449
450 default:
451 LOGM("Unknown IOCTL 0x%08X", cmd);
452 rc = -EINVAL;
453 }
454
455 return rc;
456}
457
458static const struct net_device_ops rmnet_data_vnd_ops = {
459 .ndo_init = 0,
460 .ndo_start_xmit = rmnet_vnd_start_xmit,
461 .ndo_do_ioctl = rmnet_vnd_ioctl,
462 .ndo_change_mtu = rmnet_vnd_change_mtu,
463 .ndo_set_mac_address = 0,
464 .ndo_validate_addr = 0,
465};
466
467/* rmnet_vnd_setup() - net_device initialization callback
468 * @dev: Virtual network device
469 *
470 * Called by kernel whenever a new rmnet_data<n> device is created. Sets MTU,
471 * flags, ARP type, needed headroom, etc...
472 */
473static void rmnet_vnd_setup(struct net_device *dev)
474{
475 struct rmnet_vnd_private_s *dev_conf;
476
477 LOGM("Setting up device %s", dev->name);
478
479 /* Clear out private data */
480 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
481 memset(dev_conf, 0, sizeof(struct rmnet_vnd_private_s));
482
483 dev->netdev_ops = &rmnet_data_vnd_ops;
484 dev->mtu = RMNET_DATA_DFLT_PACKET_SIZE;
485 dev->needed_headroom = RMNET_DATA_NEEDED_HEADROOM;
486 random_ether_addr(dev->dev_addr);
487 dev->tx_queue_len = RMNET_DATA_TX_QUEUE_LEN;
488
489 /* Raw IP mode */
490 dev->header_ops = 0; /* No header */
491 dev->type = ARPHRD_RAWIP;
492 dev->hard_header_len = 0;
493 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
494
495 /* Flow control */
496 rwlock_init(&dev_conf->flow_map_lock);
497 INIT_LIST_HEAD(&dev_conf->flow_head);
498}
499
500/* Exposed API */
501
502/* rmnet_vnd_exit() - Shutdown cleanup hook
503 *
504 * Called by RmNet main on module unload. Cleans up data structures and
505 * unregisters/frees net_devices.
506 */
507void rmnet_vnd_exit(void)
508{
509 int i;
510
511 for (i = 0; i < RMNET_DATA_MAX_VND; i++)
512 if (rmnet_devices[i]) {
513 unregister_netdev(rmnet_devices[i]);
514 free_netdev(rmnet_devices[i]);
515 }
516}
517
518/* rmnet_vnd_init() - Init hook
519 *
520 * Called by RmNet main on module load. Initializes data structures
521 */
522int rmnet_vnd_init(void)
523{
524 memset(rmnet_devices, 0,
525 sizeof(struct net_device *) * RMNET_DATA_MAX_VND);
526 return 0;
527}
528
529/* rmnet_vnd_create_dev() - Create a new virtual network device node.
530 * @id: Virtual device node id
531 * @new_device: Pointer to newly created device node
532 * @prefix: Device name prefix
533 *
534 * Allocates structures for new virtual network devices. Sets the name of the
535 * new device and registers it with the network stack. Device will appear in
536 * ifconfig list after this is called. If the prefix is null, then
537 * RMNET_DATA_DEV_NAME_STR will be assumed.
538 *
539 * Return:
540 * - 0 if successful
541 * - RMNET_CONFIG_BAD_ARGUMENTS if id is out of range or prefix is too long
542 * - RMNET_CONFIG_DEVICE_IN_USE if id already in use
543 * - RMNET_CONFIG_NOMEM if net_device allocation failed
544 * - RMNET_CONFIG_UNKNOWN_ERROR if register_netdevice() fails
545 */
546int rmnet_vnd_create_dev(int id, struct net_device **new_device,
547 const char *prefix)
548{
549 struct net_device *dev;
550 char dev_prefix[IFNAMSIZ];
551 int p, rc = 0;
552
553 if (id < 0 || id >= RMNET_DATA_MAX_VND) {
554 *new_device = 0;
555 return RMNET_CONFIG_BAD_ARGUMENTS;
556 }
557
558 if (rmnet_devices[id] != 0) {
559 *new_device = 0;
560 return RMNET_CONFIG_DEVICE_IN_USE;
561 }
562
563 if (!prefix)
564 p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d",
565 RMNET_DATA_DEV_NAME_STR);
566 else
567 p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d", prefix);
568 if (p >= (IFNAMSIZ - 1)) {
569 LOGE("Specified prefix longer than IFNAMSIZ");
570 return RMNET_CONFIG_BAD_ARGUMENTS;
571 }
572
573 dev = alloc_netdev(sizeof(struct rmnet_vnd_private_s),
574 dev_prefix,
575 NET_NAME_ENUM,
576 rmnet_vnd_setup);
577 if (!dev) {
578 LOGE("Failed to to allocate netdev for id %d", id);
579 *new_device = 0;
580 return RMNET_CONFIG_NOMEM;
581 }
582
583 if (!prefix) {
584 /* Configuring DL checksum offload on rmnet_data interfaces */
585 dev->hw_features = NETIF_F_RXCSUM;
586 /* Configuring UL checksum offload on rmnet_data interfaces */
587 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
588 /* Configuring GRO on rmnet_data interfaces */
589 dev->hw_features |= NETIF_F_GRO;
590 /* Configuring Scatter-Gather on rmnet_data interfaces */
591 dev->hw_features |= NETIF_F_SG;
592 /* Configuring GSO on rmnet_data interfaces */
593 dev->hw_features |= NETIF_F_GSO;
594 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
595 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
596 }
597
598 rc = register_netdevice(dev);
599 if (rc != 0) {
600 LOGE("Failed to to register netdev [%s]", dev->name);
601 free_netdev(dev);
602 *new_device = 0;
603 rc = RMNET_CONFIG_UNKNOWN_ERROR;
604 } else {
605 rmnet_devices[id] = dev;
606 *new_device = dev;
607 LOGM("Registered device %s", dev->name);
608 }
609
610 return rc;
611}
612
613/* rmnet_vnd_free_dev() - free a virtual network device node.
614 * @id: Virtual device node id
615 *
616 * Unregisters the virtual network device node and frees it.
617 * unregister_netdev locks the rtnl mutex, so the mutex must not be locked
618 * by the caller of the function. unregister_netdev enqueues the request to
619 * unregister the device into a TODO queue. The requests in the TODO queue
620 * are only done after rtnl mutex is unlocked, therefore free_netdev has to
621 * called after unlocking rtnl mutex.
622 *
623 * Return:
624 * - 0 if successful
625 * - RMNET_CONFIG_NO_SUCH_DEVICE if id is invalid or not in range
626 * - RMNET_CONFIG_DEVICE_IN_USE if device has logical ep that wasn't unset
627 */
628int rmnet_vnd_free_dev(int id)
629{
630 struct rmnet_logical_ep_conf_s *epconfig_l;
631 struct net_device *dev;
632
633 rtnl_lock();
634 if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
635 rtnl_unlock();
636 LOGM("Invalid id [%d]", id);
637 return RMNET_CONFIG_NO_SUCH_DEVICE;
638 }
639
640 epconfig_l = rmnet_vnd_get_le_config(rmnet_devices[id]);
641 if (epconfig_l && epconfig_l->refcount) {
642 rtnl_unlock();
643 return RMNET_CONFIG_DEVICE_IN_USE;
644 }
645
646 dev = rmnet_devices[id];
647 rmnet_devices[id] = 0;
648 rtnl_unlock();
649
650 if (dev) {
651 unregister_netdev(dev);
652 free_netdev(dev);
653 return 0;
654 } else {
655 return RMNET_CONFIG_NO_SUCH_DEVICE;
656 }
657}
658
659/* rmnet_vnd_get_name() - Gets the string name of a VND based on ID
660 * @id: Virtual device node id
661 * @name: Buffer to store name of virtual device node
662 * @name_len: Length of name buffer
663 *
664 * Copies the name of the virtual device node into the users buffer. Will throw
665 * an error if the buffer is null, or too small to hold the device name.
666 *
667 * Return:
668 * - 0 if successful
669 * - -EINVAL if name is null
670 * - -EINVAL if id is invalid or not in range
671 * - -EINVAL if name is too small to hold things
672 */
673int rmnet_vnd_get_name(int id, char *name, int name_len)
674{
675 int p;
676
677 if (!name) {
678 LOGM("%s", "Bad arguments; name buffer null");
679 return -EINVAL;
680 }
681
682 if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
683 LOGM("Invalid id [%d]", id);
684 return -EINVAL;
685 }
686
687 p = strlcpy(name, rmnet_devices[id]->name, name_len);
688 if (p >= name_len) {
689 LOGM("Buffer to small (%d) to fit device name", name_len);
690 return -EINVAL;
691 }
692 LOGL("Found mapping [%d]->\"%s\"", id, name);
693
694 return 0;
695}
696
697/* rmnet_vnd_is_vnd() - Determine if net_device is RmNet owned virtual devices
698 * @dev: Network device to test
699 *
700 * Searches through list of known RmNet virtual devices. This function is O(n)
701 * and should not be used in the data path.
702 *
703 * Return:
704 * - 0 if device is not RmNet virtual device
705 * - 1 if device is RmNet virtual device
706 */
707int rmnet_vnd_is_vnd(struct net_device *dev)
708{
709 /* This is not an efficient search, but, this will only be called in
710 * a configuration context, and the list is small.
711 */
712 int i;
713
714 if (!dev)
715 return 0;
716
717 for (i = 0; i < RMNET_DATA_MAX_VND; i++)
718 if (dev == rmnet_devices[i])
719 return i + 1;
720
721 return 0;
722}
723
724/* rmnet_vnd_get_le_config() - Get the logical endpoint configuration
725 * @dev: Virtual device node
726 *
727 * Gets the logical endpoint configuration for a RmNet virtual network device
728 * node. Caller should confirm that devices is a RmNet VND before calling.
729 *
730 * Return:
731 * - Pointer to logical endpoint configuration structure
732 * - 0 (null) if dev is null
733 */
734struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev)
735{
736 struct rmnet_vnd_private_s *dev_conf;
737
738 if (!dev)
739 return 0;
740
741 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
742 if (!dev_conf)
743 return 0;
744
745 return &dev_conf->local_ep;
746}
747
748/* _rmnet_vnd_get_flow_map() - Gets object representing a MAP flow handle
749 * @dev_conf: Private configuration structure for virtual network device
750 * @map_flow: MAP flow handle IF
751 *
752 * Loops through available flow mappings and compares the MAP flow handle.
753 * Returns when mapping is found.
754 *
755 * Return:
756 * - Null if no mapping was found
757 * - Pointer to mapping otherwise
758 */
759static struct rmnet_map_flow_mapping_s *_rmnet_vnd_get_flow_map
760 (struct rmnet_vnd_private_s *dev_conf,
761 u32 map_flow)
762{
763 struct list_head *p;
764 struct rmnet_map_flow_mapping_s *itm;
765
766 list_for_each(p, &dev_conf->flow_head) {
767 itm = list_entry(p, struct rmnet_map_flow_mapping_s, list);
768
769 if (unlikely(!itm))
770 return 0;
771
772 if (itm->map_flow_id == map_flow)
773 return itm;
774 }
775 return 0;
776}
777
778/* _rmnet_vnd_update_flow_map() - Add or remove individual TC flow handles
779 * @action: One of RMNET_VND_UF_ACTION_ADD / RMNET_VND_UF_ACTION_DEL
780 * @itm: Flow mapping object
781 * @map_flow: TC flow handle
782 *
783 * RMNET_VND_UF_ACTION_ADD:
784 * Will check for a free mapping slot in the mapping object. If one is found,
785 * valid for that slot will be set to 1 and the value will be set.
786 *
787 * RMNET_VND_UF_ACTION_DEL:
788 * Will check for matching tc handle. If found, valid for that slot will be
789 * set to 0 and the value will also be zeroed.
790 *
791 * Return:
792 * - RMNET_VND_UPDATE_FLOW_OK tc flow handle is added/removed ok
793 * - RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM if there are no more tc handles
794 * - RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT if flow mapping is now empty
795 * - RMNET_VND_UPDATE_FLOW_NO_ACTION if no action was taken
796 */
797static int _rmnet_vnd_update_flow_map(u8 action,
798 struct rmnet_map_flow_mapping_s *itm,
799 u32 tc_flow)
800{
801 int rc, i, j;
802
803 rc = RMNET_VND_UPDATE_FLOW_OK;
804
805 switch (action) {
806 case RMNET_VND_UF_ACTION_ADD:
807 rc = RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM;
808 for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
809 if (itm->tc_flow_valid[i] == 0) {
810 itm->tc_flow_valid[i] = 1;
811 itm->tc_flow_id[i] = tc_flow;
812 rc = RMNET_VND_UPDATE_FLOW_OK;
813 LOGD("{%pK}->tc_flow_id[%d]=%08X",
814 itm, i, tc_flow);
815 break;
816 }
817 }
818 break;
819
820 case RMNET_VND_UF_ACTION_DEL:
821 j = 0;
822 rc = RMNET_VND_UPDATE_FLOW_OK;
823 for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
824 if (itm->tc_flow_valid[i] == 1) {
825 if (itm->tc_flow_id[i] == tc_flow) {
826 itm->tc_flow_valid[i] = 0;
827 itm->tc_flow_id[i] = 0;
828 j++;
829 LOGD("{%pK}->tc_flow_id[%d]=0", itm, i);
830 }
831 } else {
832 j++;
833 }
834 }
835 if (j == RMNET_MAP_FLOW_NUM_TC_HANDLE)
836 rc = RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT;
837 break;
838
839 default:
840 rc = RMNET_VND_UPDATE_FLOW_NO_ACTION;
841 break;
842 }
843 return rc;
844}
845
846/* rmnet_vnd_add_tc_flow() - Add a MAP/TC flow handle mapping
847 * @id: Virtual network device ID
848 * @map_flow: MAP flow handle
849 * @tc_flow: TC flow handle
850 *
851 * Checkes for an existing flow mapping object corresponding to map_flow. If one
852 * is found, then it will try to add to the existing mapping object. Otherwise,
853 * a new mapping object is created.
854 *
855 * Return:
856 * - RMNET_CONFIG_OK if successful
857 * - RMNET_CONFIG_TC_HANDLE_FULL if there is no more room in the map object
858 * - RMNET_CONFIG_NOMEM failed to allocate a new map object
859 */
860int rmnet_vnd_add_tc_flow(u32 id, u32 map_flow, u32 tc_flow)
861{
862 struct rmnet_map_flow_mapping_s *itm;
863 struct net_device *dev;
864 struct rmnet_vnd_private_s *dev_conf;
865 int r;
866 unsigned long flags;
867
868 if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
869 LOGM("Invalid VND id [%d]", id);
870 return RMNET_CONFIG_NO_SUCH_DEVICE;
871 }
872
873 dev = rmnet_devices[id];
874 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
875
876 if (!dev_conf)
877 return RMNET_CONFIG_NO_SUCH_DEVICE;
878
879 write_lock_irqsave(&dev_conf->flow_map_lock, flags);
880 itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow);
881 if (itm) {
882 r = _rmnet_vnd_update_flow_map(RMNET_VND_UF_ACTION_ADD,
883 itm, tc_flow);
884 if (r != RMNET_VND_UPDATE_FLOW_OK) {
885 write_unlock_irqrestore(&dev_conf->flow_map_lock,
886 flags);
887 return RMNET_CONFIG_TC_HANDLE_FULL;
888 }
889 write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
890 return RMNET_CONFIG_OK;
891 }
892 write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
893
894 itm = kmalloc(sizeof(*itm), GFP_KERNEL);
895
896 if (!itm) {
897 LOGM("%s", "Failure allocating flow mapping");
898 return RMNET_CONFIG_NOMEM;
899 }
900 memset(itm, 0, sizeof(struct rmnet_map_flow_mapping_s));
901
902 itm->map_flow_id = map_flow;
903 itm->tc_flow_valid[0] = 1;
904 itm->tc_flow_id[0] = tc_flow;
905
906 /* How can we dynamically init these safely? Kernel only provides static
907 * initializers for atomic_t
908 */
909 itm->v4_seq.counter = 0; /* Init is broken: ATOMIC_INIT(0); */
910 itm->v6_seq.counter = 0; /* Init is broken: ATOMIC_INIT(0); */
911
912 write_lock_irqsave(&dev_conf->flow_map_lock, flags);
913 list_add(&itm->list, &dev_conf->flow_head);
914 write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
915
916 LOGD("Created flow mapping [%s][0x%08X][0x%08X]@%pK",
917 dev->name, itm->map_flow_id, itm->tc_flow_id[0], itm);
918
919 return RMNET_CONFIG_OK;
920}
921
922/* rmnet_vnd_del_tc_flow() - Delete a MAP/TC flow handle mapping
923 * @id: Virtual network device ID
924 * @map_flow: MAP flow handle
925 * @tc_flow: TC flow handle
926 *
927 * Checkes for an existing flow mapping object corresponding to map_flow. If one
928 * is found, then it will try to remove the existing tc_flow mapping. If the
929 * mapping object no longer contains any mappings, then it is freed. Otherwise
930 * the mapping object is left in the list
931 *
932 * Return:
933 * - RMNET_CONFIG_OK if successful or if there was no such tc_flow
934 * - RMNET_CONFIG_INVALID_REQUEST if there is no such map_flow
935 */
936int rmnet_vnd_del_tc_flow(u32 id, u32 map_flow, u32 tc_flow)
937{
938 struct rmnet_vnd_private_s *dev_conf;
939 struct net_device *dev;
940 struct rmnet_map_flow_mapping_s *itm;
941 int r;
942 unsigned long flags;
943 int rc = RMNET_CONFIG_OK;
944
945 if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
946 LOGM("Invalid VND id [%d]", id);
947 return RMNET_CONFIG_NO_SUCH_DEVICE;
948 }
949
950 dev = rmnet_devices[id];
951 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
952
953 if (!dev_conf)
954 return RMNET_CONFIG_NO_SUCH_DEVICE;
955
956 r = RMNET_VND_UPDATE_FLOW_NO_ACTION;
957 write_lock_irqsave(&dev_conf->flow_map_lock, flags);
958 itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow);
959 if (!itm) {
960 rc = RMNET_CONFIG_INVALID_REQUEST;
961 } else {
962 r = _rmnet_vnd_update_flow_map(RMNET_VND_UF_ACTION_DEL,
963 itm, tc_flow);
964 if (r == RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT)
965 list_del(&itm->list);
966 }
967 write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
968
969 if (r == RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT) {
970 if (itm)
971 LOGD("Removed flow mapping [%s][0x%08X]@%pK",
972 dev->name, itm->map_flow_id, itm);
973 kfree(itm);
974 }
975
976 return rc;
977}
978
979/* rmnet_vnd_do_flow_control() - Process flow control request
980 * @dev: Virtual network device node to do lookup on
981 * @map_flow_id: Flow ID from MAP message
982 * @v4_seq: pointer to IPv4 indication sequence number
983 * @v6_seq: pointer to IPv6 indication sequence number
984 * @enable: boolean to enable/disable flow.
985 *
986 * Return:
987 * - 0 if successful
988 * - 1 if no mapping is found
989 * - 2 if dev is not RmNet virtual network device node
990 */
991int rmnet_vnd_do_flow_control(struct net_device *dev,
992 u32 map_flow_id,
993 u16 v4_seq,
994 u16 v6_seq,
995 int enable)
996{
997 struct rmnet_vnd_private_s *dev_conf;
998 struct rmnet_map_flow_mapping_s *itm;
999 int do_fc, error, i;
1000
1001 error = 0;
1002 do_fc = 0;
1003
1004 if (unlikely(!dev))
1005 return 2;
1006
1007 if (!rmnet_vnd_is_vnd(dev))
1008 return 2;
1009
1010 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
1011
1012 if (unlikely(!dev_conf))
1013 return 2;
1014
1015 read_lock(&dev_conf->flow_map_lock);
1016 if (map_flow_id == 0xFFFFFFFF) {
1017 itm = &dev_conf->root_flow;
1018 goto nolookup;
1019 }
1020
1021 itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow_id);
1022
1023 if (!itm) {
1024 LOGL("Got flow control request for unknown flow %08X",
1025 map_flow_id);
1026 goto fcdone;
1027 }
1028
1029nolookup:
1030 if (v4_seq == 0 || v4_seq >= atomic_read(&itm->v4_seq)) {
1031 atomic_set(&itm->v4_seq, v4_seq);
1032 if (map_flow_id == 0xFFFFFFFF) {
1033 LOGD("Setting VND TX queue state to %d", enable);
1034 /* Although we expect similar number of enable/disable
1035 * commands, optimize for the disable. That is more
1036 * latency sensitive than enable
1037 */
1038 if (unlikely(enable))
1039 netif_wake_queue(dev);
1040 else
1041 netif_stop_queue(dev);
1042 trace_rmnet_fc_map(0xFFFFFFFF, 0, enable);
1043 goto fcdone;
1044 }
1045 for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
1046 if (itm->tc_flow_valid[i] == 1) {
1047 LOGD("Found [%s][0x%08X][%d:0x%08X]",
1048 dev->name, itm->map_flow_id, i,
1049 itm->tc_flow_id[i]);
1050
1051 _rmnet_vnd_do_flow_control(dev,
1052 itm->tc_flow_id[i],
1053 enable);
1054 }
1055 }
1056 } else {
1057 LOGD("Internal seq(%hd) higher than called(%hd)",
1058 atomic_read(&itm->v4_seq), v4_seq);
1059 }
1060
1061fcdone:
1062 read_unlock(&dev_conf->flow_map_lock);
1063
1064 return error;
1065}
1066
1067/* rmnet_vnd_get_by_id() - Get VND by array index ID
1068 * @id: Virtual network deice id [0:RMNET_DATA_MAX_VND]
1069 *
1070 * Return:
1071 * - 0 if no device or ID out of range
1072 * - otherwise return pointer to VND net_device struct
1073 */
1074struct net_device *rmnet_vnd_get_by_id(int id)
1075{
1076 if (id < 0 || id >= RMNET_DATA_MAX_VND) {
1077 LOGE("Bug; VND ID out of bounds");
1078 return 0;
1079 }
1080 return rmnet_devices[id];
1081}