blob: 3603c5e320191502f9cda7eb1fa510c8375cc9a5 [file] [log] [blame]
Ashwanth Goli6de7e092018-05-09 20:28:24 +05301/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 *
13 * RMNET Data virtual network driver
14 */
15
16#include <linux/types.h>
17#include <linux/rmnet_data.h>
18#include <linux/msm_rmnet.h>
19#include <linux/etherdevice.h>
20#include <linux/if_arp.h>
21#include <linux/spinlock.h>
22#include <net/pkt_sched.h>
23#include <linux/atomic.h>
24#include <linux/net_map.h>
25#include "rmnet_data_config.h"
26#include "rmnet_data_handlers.h"
27#include "rmnet_data_private.h"
28#include "rmnet_map.h"
29#include "rmnet_data_vnd.h"
30#include "rmnet_data_stats.h"
31#include "rmnet_data_trace.h"
32
33RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_VND);
34
35#define RMNET_MAP_FLOW_NUM_TC_HANDLE 3
36#define RMNET_VND_UF_ACTION_ADD 0
37#define RMNET_VND_UF_ACTION_DEL 1
38enum {
39 RMNET_VND_UPDATE_FLOW_OK,
40 RMNET_VND_UPDATE_FLOW_NO_ACTION,
41 RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM,
42 RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT
43};
44
45struct net_device *rmnet_devices[RMNET_DATA_MAX_VND];
46
47struct rmnet_map_flow_mapping_s {
48 struct list_head list;
49 u32 map_flow_id;
50 u32 tc_flow_valid[RMNET_MAP_FLOW_NUM_TC_HANDLE];
51 u32 tc_flow_id[RMNET_MAP_FLOW_NUM_TC_HANDLE];
52 atomic_t v4_seq;
53 atomic_t v6_seq;
54};
55
56struct rmnet_vnd_private_s {
57 u32 qos_version;
58 struct rmnet_logical_ep_conf_s local_ep;
59
60 rwlock_t flow_map_lock;
61 struct list_head flow_head;
62 struct rmnet_map_flow_mapping_s root_flow;
63};
64
65#define RMNET_VND_FC_QUEUED 0
66#define RMNET_VND_FC_NOT_ENABLED 1
67#define RMNET_VND_FC_KMALLOC_ERR 2
68
69/* Helper Functions */
70
71/* rmnet_vnd_add_qos_header() - Adds QoS header to front of skb->data
72 * @skb: Socket buffer ("packet") to modify
73 * @dev: Egress interface
74 *
75 * Does not check for sufficient headroom! Caller must make sure there is enough
76 * headroom.
77 */
78static void rmnet_vnd_add_qos_header(struct sk_buff *skb,
79 struct net_device *dev,
80 uint32_t qos_version)
81{
82 struct QMI_QOS_HDR_S *qmih;
83 struct qmi_qos_hdr8_s *qmi8h;
84
85 if (qos_version & RMNET_IOCTL_QOS_MODE_6) {
86 qmih = (struct QMI_QOS_HDR_S *)
87 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
88 qmih->version = 1;
89 qmih->flags = 0;
90 qmih->flow_id = skb->mark;
91 } else if (qos_version & RMNET_IOCTL_QOS_MODE_8) {
92 qmi8h = (struct qmi_qos_hdr8_s *)
93 skb_push(skb, sizeof(struct qmi_qos_hdr8_s));
94 /* Flags are 0 always */
95 qmi8h->hdr.version = 0;
96 qmi8h->hdr.flags = 0;
97 memset(qmi8h->reserved, 0, sizeof(qmi8h->reserved));
98 qmi8h->hdr.flow_id = skb->mark;
99 } else {
100 LOGD("%s(): Bad QoS version configured\n", __func__);
101 }
102}
103
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600104/* Network Device Operations */
105
106/* rmnet_vnd_start_xmit() - Transmit NDO callback
107 * @skb: Socket buffer ("packet") being sent from network stack
108 * @dev: Virtual Network Device
109 *
110 * Standard network driver operations hook to transmit packets on virtual
111 * network device. Called by network stack. Packet is not transmitted directly
112 * from here; instead it is given to the rmnet egress handler.
113 *
114 * Return:
115 * - NETDEV_TX_OK under all cirumstances (cannot block/fail)
116 */
117static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
118 struct net_device *dev)
119{
120 struct rmnet_vnd_private_s *dev_conf;
121
122 trace_rmnet_vnd_start_xmit(skb);
123 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
124 if (dev_conf->local_ep.egress_dev) {
125 /* QoS header should come after MAP header */
126 if (dev_conf->qos_version)
127 rmnet_vnd_add_qos_header(skb,
128 dev,
129 dev_conf->qos_version);
Subash Abhinov Kasiviswanathan6a2d1dd2016-11-14 12:40:34 -0700130 skb_orphan(skb);
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600131 rmnet_egress_handler(skb, &dev_conf->local_ep);
132 } else {
133 dev->stats.tx_dropped++;
134 rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_VND_NO_EGRESS);
135 }
136 return NETDEV_TX_OK;
137}
138
139/* rmnet_vnd_change_mtu() - Change MTU NDO callback
140 * @dev: Virtual network device
141 * @new_mtu: New MTU value to set (in bytes)
142 *
143 * Standard network driver operations hook to set the MTU. Called by kernel to
144 * set the device MTU. Checks if desired MTU is less than zero or greater than
145 * RMNET_DATA_MAX_PACKET_SIZE;
146 *
147 * Return:
148 * - 0 if successful
149 * - -EINVAL if new_mtu is out of range
150 */
151static int rmnet_vnd_change_mtu(struct net_device *dev, int new_mtu)
152{
153 if (new_mtu < 0 || new_mtu > RMNET_DATA_MAX_PACKET_SIZE)
154 return -EINVAL;
155
156 dev->mtu = new_mtu;
157 return 0;
158}
159
160#ifdef CONFIG_RMNET_DATA_FC
161static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
162 struct ifreq *ifr,
163 int cmd)
164{
165 struct rmnet_vnd_private_s *dev_conf;
166 int rc, qdisc_len = 0;
167 struct rmnet_ioctl_data_s ioctl_data;
168
169 rc = 0;
170 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
171
172 switch (cmd) {
173 case RMNET_IOCTL_SET_QOS_ENABLE:
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600174 if (!capable(CAP_NET_ADMIN))
175 return -EPERM;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600176 LOGM("RMNET_IOCTL_SET_QOS_ENABLE on %s", dev->name);
177 if (!dev_conf->qos_version)
178 dev_conf->qos_version = RMNET_IOCTL_QOS_MODE_6;
179 break;
180
181 case RMNET_IOCTL_SET_QOS_DISABLE:
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600182 if (!capable(CAP_NET_ADMIN))
183 return -EPERM;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600184 LOGM("RMNET_IOCTL_SET_QOS_DISABLE on %s", dev->name);
185 dev_conf->qos_version = 0;
186 break;
187
188 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
189 LOGM("RMNET_IOCTL_GET_QOS on %s", dev->name);
190 ioctl_data.u.operation_mode = (dev_conf->qos_version ==
191 RMNET_IOCTL_QOS_MODE_6);
192 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
193 sizeof(struct rmnet_ioctl_data_s)))
194 rc = -EFAULT;
195 break;
196
197 case RMNET_IOCTL_FLOW_ENABLE:
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600198 if (!capable(CAP_NET_ADMIN))
199 return -EPERM;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600200 LOGL("RMNET_IOCTL_FLOW_ENABLE on %s", dev->name);
201 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
202 sizeof(struct rmnet_ioctl_data_s))) {
203 rc = -EFAULT;
204 break;
205 }
206 qdisc_len = tc_qdisc_flow_control(dev,
207 ioctl_data.u.tcm_handle, 1);
208 trace_rmnet_fc_qmi(ioctl_data.u.tcm_handle, qdisc_len, 1);
209 break;
210
211 case RMNET_IOCTL_FLOW_DISABLE:
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600212 if (!capable(CAP_NET_ADMIN))
213 return -EPERM;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600214 LOGL("RMNET_IOCTL_FLOW_DISABLE on %s", dev->name);
215 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
216 sizeof(struct rmnet_ioctl_data_s))) {
217 rc = -EFAULT;
218 break;
219 }
220 qdisc_len = tc_qdisc_flow_control(dev,
221 ioctl_data.u.tcm_handle, 0);
222 trace_rmnet_fc_qmi(ioctl_data.u.tcm_handle, qdisc_len, 0);
223 break;
224
225 default:
226 rc = -EINVAL;
227 }
228
229 return rc;
230}
231
232struct rmnet_vnd_fc_work {
233 struct work_struct work;
234 struct net_device *dev;
235 u32 tc_handle;
236 int enable;
237};
238
239static void _rmnet_vnd_wq_flow_control(struct work_struct *work)
240{
241 struct rmnet_vnd_fc_work *fcwork;
242 int qdisc_len = 0;
243
244 fcwork = (struct rmnet_vnd_fc_work *)work;
245
246 rtnl_lock();
247 qdisc_len = tc_qdisc_flow_control(fcwork->dev, fcwork->tc_handle,
248 fcwork->enable);
249 trace_rmnet_fc_map(fcwork->tc_handle, qdisc_len, fcwork->enable);
250 rtnl_unlock();
251
252 LOGL("[%s] handle:%08X enable:%d",
253 fcwork->dev->name, fcwork->tc_handle, fcwork->enable);
254
255 kfree(work);
256}
257
258static int _rmnet_vnd_do_flow_control(struct net_device *dev,
259 u32 tc_handle,
260 int enable)
261{
262 struct rmnet_vnd_fc_work *fcwork;
263
264 fcwork = kmalloc(sizeof(*fcwork), GFP_ATOMIC);
265 if (!fcwork)
266 return RMNET_VND_FC_KMALLOC_ERR;
267 memset(fcwork, 0, sizeof(struct rmnet_vnd_fc_work));
268
269 INIT_WORK((struct work_struct *)fcwork, _rmnet_vnd_wq_flow_control);
270 fcwork->dev = dev;
271 fcwork->tc_handle = tc_handle;
272 fcwork->enable = enable;
273
274 schedule_work((struct work_struct *)fcwork);
275 return RMNET_VND_FC_QUEUED;
276}
277#else
278static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
279 struct ifreq *ifr,
280 int cmd)
281{
282 return -EINVAL;
283}
284
285static inline int _rmnet_vnd_do_flow_control(struct net_device *dev,
286 u32 tc_handle,
287 int enable)
288{
289 LOGD("[%s] called with no QoS support", dev->name);
290 return RMNET_VND_FC_NOT_ENABLED;
291}
292#endif /* CONFIG_RMNET_DATA_FC */
293
294static int rmnet_vnd_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
295{
296 struct rmnet_vnd_private_s *dev_conf;
297 struct rmnet_ioctl_extended_s ext_cmd;
298 int rc = 0;
299
300 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
301
302 rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data,
303 sizeof(struct rmnet_ioctl_extended_s));
304 if (rc) {
305 LOGM("%s(): copy_from_user() failed\n", __func__);
306 return rc;
307 }
308
309 switch (ext_cmd.extended_ioctl) {
310 case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
311 ext_cmd.u.data = 0;
312 break;
313
314 case RMNET_IOCTL_GET_DRIVER_NAME:
315 strlcpy(ext_cmd.u.if_name, "rmnet_data",
316 sizeof(ext_cmd.u.if_name));
317 break;
318
319 case RMNET_IOCTL_GET_SUPPORTED_QOS_MODES:
320 ext_cmd.u.data = RMNET_IOCTL_QOS_MODE_6
321 | RMNET_IOCTL_QOS_MODE_8;
322 break;
323
324 case RMNET_IOCTL_GET_QOS_VERSION:
325 ext_cmd.u.data = dev_conf->qos_version;
326 break;
327
328 case RMNET_IOCTL_SET_QOS_VERSION:
Subash Abhinov Kasiviswanathan29e64522017-09-05 19:33:59 -0600329 if (!capable(CAP_NET_ADMIN))
330 return -EPERM;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600331 if (ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_6 ||
332 ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_8 ||
333 ext_cmd.u.data == 0) {
334 dev_conf->qos_version = ext_cmd.u.data;
335 } else {
336 rc = -EINVAL;
337 goto done;
338 }
339 break;
340
341 default:
342 rc = -EINVAL;
343 goto done;
344 }
345
346 rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd,
347 sizeof(struct rmnet_ioctl_extended_s));
348 if (rc)
349 LOGM("%s(): copy_to_user() failed\n", __func__);
350
351done:
352 return rc;
353}
354
355/* rmnet_vnd_ioctl() - IOCTL NDO callback
356 * @dev: Virtual network device
357 * @ifreq: User data
358 * @cmd: IOCTL command value
359 *
360 * Standard network driver operations hook to process IOCTLs. Called by kernel
361 * to process non-stanard IOCTLs for device
362 *
363 * Return:
364 * - 0 if successful
365 * - -EINVAL if unknown IOCTL
366 */
367static int rmnet_vnd_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
368{
369 struct rmnet_vnd_private_s *dev_conf;
370 int rc;
371 struct rmnet_ioctl_data_s ioctl_data;
372
373 rc = 0;
374 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
375
376 rc = _rmnet_vnd_do_qos_ioctl(dev, ifr, cmd);
377 if (rc != -EINVAL)
378 return rc;
379 rc = 0; /* Reset rc as it may contain -EINVAL from above */
380
381 switch (cmd) {
382 case RMNET_IOCTL_OPEN: /* Do nothing. Support legacy behavior */
383 LOGM("RMNET_IOCTL_OPEN on %s (ignored)", dev->name);
384 break;
385
386 case RMNET_IOCTL_CLOSE: /* Do nothing. Support legacy behavior */
387 LOGM("RMNET_IOCTL_CLOSE on %s (ignored)", dev->name);
388 break;
389
390 case RMNET_IOCTL_SET_LLP_ETHERNET:
391 LOGM("RMNET_IOCTL_SET_LLP_ETHERNET on %s (no support)",
392 dev->name);
393 rc = -EINVAL;
394 break;
395
396 case RMNET_IOCTL_SET_LLP_IP: /* Do nothing. Support legacy behavior */
397 LOGM("RMNET_IOCTL_SET_LLP_IP on %s (ignored)", dev->name);
398 break;
399
400 case RMNET_IOCTL_GET_LLP: /* Always return IP mode */
401 LOGM("RMNET_IOCTL_GET_LLP on %s", dev->name);
402 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
403 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
404 sizeof(struct rmnet_ioctl_data_s)))
405 rc = -EFAULT;
406 break;
407
408 case RMNET_IOCTL_EXTENDED:
409 rc = rmnet_vnd_ioctl_extended(dev, ifr);
410 break;
411
412 default:
413 LOGM("Unknown IOCTL 0x%08X", cmd);
414 rc = -EINVAL;
415 }
416
417 return rc;
418}
419
420static const struct net_device_ops rmnet_data_vnd_ops = {
421 .ndo_init = 0,
422 .ndo_start_xmit = rmnet_vnd_start_xmit,
423 .ndo_do_ioctl = rmnet_vnd_ioctl,
424 .ndo_change_mtu = rmnet_vnd_change_mtu,
425 .ndo_set_mac_address = 0,
426 .ndo_validate_addr = 0,
427};
428
429/* rmnet_vnd_setup() - net_device initialization callback
430 * @dev: Virtual network device
431 *
432 * Called by kernel whenever a new rmnet_data<n> device is created. Sets MTU,
433 * flags, ARP type, needed headroom, etc...
434 */
435static void rmnet_vnd_setup(struct net_device *dev)
436{
437 struct rmnet_vnd_private_s *dev_conf;
438
439 LOGM("Setting up device %s", dev->name);
440
441 /* Clear out private data */
442 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
443 memset(dev_conf, 0, sizeof(struct rmnet_vnd_private_s));
444
445 dev->netdev_ops = &rmnet_data_vnd_ops;
446 dev->mtu = RMNET_DATA_DFLT_PACKET_SIZE;
447 dev->needed_headroom = RMNET_DATA_NEEDED_HEADROOM;
448 random_ether_addr(dev->dev_addr);
449 dev->tx_queue_len = RMNET_DATA_TX_QUEUE_LEN;
450
451 /* Raw IP mode */
452 dev->header_ops = 0; /* No header */
453 dev->type = ARPHRD_RAWIP;
454 dev->hard_header_len = 0;
455 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
456
457 /* Flow control */
458 rwlock_init(&dev_conf->flow_map_lock);
459 INIT_LIST_HEAD(&dev_conf->flow_head);
460}
461
Subash Abhinov Kasiviswanathan5d8d66c2016-12-05 12:26:41 -0700462/* rmnet_vnd_setup() - net_device initialization helper function
463 * @dev: Virtual network device
464 *
465 * Called during device initialization. Disables GRO.
466 */
467static void rmnet_vnd_disable_offload(struct net_device *dev)
468{
469 dev->wanted_features &= ~NETIF_F_GRO;
470 __netdev_update_features(dev);
471}
472
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600473/* Exposed API */
474
475/* rmnet_vnd_exit() - Shutdown cleanup hook
476 *
477 * Called by RmNet main on module unload. Cleans up data structures and
478 * unregisters/frees net_devices.
479 */
480void rmnet_vnd_exit(void)
481{
482 int i;
483
484 for (i = 0; i < RMNET_DATA_MAX_VND; i++)
485 if (rmnet_devices[i]) {
486 unregister_netdev(rmnet_devices[i]);
487 free_netdev(rmnet_devices[i]);
488 }
489}
490
491/* rmnet_vnd_init() - Init hook
492 *
493 * Called by RmNet main on module load. Initializes data structures
494 */
495int rmnet_vnd_init(void)
496{
497 memset(rmnet_devices, 0,
498 sizeof(struct net_device *) * RMNET_DATA_MAX_VND);
499 return 0;
500}
501
502/* rmnet_vnd_create_dev() - Create a new virtual network device node.
503 * @id: Virtual device node id
504 * @new_device: Pointer to newly created device node
505 * @prefix: Device name prefix
506 *
507 * Allocates structures for new virtual network devices. Sets the name of the
508 * new device and registers it with the network stack. Device will appear in
509 * ifconfig list after this is called. If the prefix is null, then
510 * RMNET_DATA_DEV_NAME_STR will be assumed.
511 *
512 * Return:
513 * - 0 if successful
514 * - RMNET_CONFIG_BAD_ARGUMENTS if id is out of range or prefix is too long
515 * - RMNET_CONFIG_DEVICE_IN_USE if id already in use
516 * - RMNET_CONFIG_NOMEM if net_device allocation failed
517 * - RMNET_CONFIG_UNKNOWN_ERROR if register_netdevice() fails
518 */
519int rmnet_vnd_create_dev(int id, struct net_device **new_device,
Subash Abhinov Kasiviswanathan58598632017-02-23 18:24:42 -0700520 const char *prefix, int use_name)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600521{
522 struct net_device *dev;
523 char dev_prefix[IFNAMSIZ];
524 int p, rc = 0;
525
526 if (id < 0 || id >= RMNET_DATA_MAX_VND) {
527 *new_device = 0;
528 return RMNET_CONFIG_BAD_ARGUMENTS;
529 }
530
531 if (rmnet_devices[id] != 0) {
532 *new_device = 0;
533 return RMNET_CONFIG_DEVICE_IN_USE;
534 }
535
Subash Abhinov Kasiviswanathan58598632017-02-23 18:24:42 -0700536 if (!prefix && !use_name)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600537 p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d",
538 RMNET_DATA_DEV_NAME_STR);
Subash Abhinov Kasiviswanathan58598632017-02-23 18:24:42 -0700539 else if (prefix && use_name)
540 p = scnprintf(dev_prefix, IFNAMSIZ, "%s", prefix);
541 else if (prefix && !use_name)
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600542 p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d", prefix);
Subash Abhinov Kasiviswanathan58598632017-02-23 18:24:42 -0700543 else
544 return RMNET_CONFIG_BAD_ARGUMENTS;
545
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600546 if (p >= (IFNAMSIZ - 1)) {
547 LOGE("Specified prefix longer than IFNAMSIZ");
548 return RMNET_CONFIG_BAD_ARGUMENTS;
549 }
550
551 dev = alloc_netdev(sizeof(struct rmnet_vnd_private_s),
552 dev_prefix,
Subash Abhinov Kasiviswanathan58598632017-02-23 18:24:42 -0700553 use_name ? NET_NAME_UNKNOWN : NET_NAME_ENUM,
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600554 rmnet_vnd_setup);
555 if (!dev) {
556 LOGE("Failed to to allocate netdev for id %d", id);
557 *new_device = 0;
558 return RMNET_CONFIG_NOMEM;
559 }
560
561 if (!prefix) {
562 /* Configuring DL checksum offload on rmnet_data interfaces */
563 dev->hw_features = NETIF_F_RXCSUM;
564 /* Configuring UL checksum offload on rmnet_data interfaces */
565 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
566 /* Configuring GRO on rmnet_data interfaces */
567 dev->hw_features |= NETIF_F_GRO;
568 /* Configuring Scatter-Gather on rmnet_data interfaces */
569 dev->hw_features |= NETIF_F_SG;
570 /* Configuring GSO on rmnet_data interfaces */
571 dev->hw_features |= NETIF_F_GSO;
572 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
573 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
574 }
575
576 rc = register_netdevice(dev);
577 if (rc != 0) {
578 LOGE("Failed to to register netdev [%s]", dev->name);
579 free_netdev(dev);
580 *new_device = 0;
Ashwanth Goli6de7e092018-05-09 20:28:24 +0530581 return RMNET_CONFIG_UNKNOWN_ERROR;
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600582 } else {
583 rmnet_devices[id] = dev;
584 *new_device = dev;
585 LOGM("Registered device %s", dev->name);
586 }
587
Subash Abhinov Kasiviswanathan5d8d66c2016-12-05 12:26:41 -0700588 rmnet_vnd_disable_offload(dev);
589
Subash Abhinov Kasiviswanathan2139ce8a2016-10-14 11:01:48 -0600590 return rc;
591}
592
593/* rmnet_vnd_free_dev() - free a virtual network device node.
594 * @id: Virtual device node id
595 *
596 * Unregisters the virtual network device node and frees it.
597 * unregister_netdev locks the rtnl mutex, so the mutex must not be locked
598 * by the caller of the function. unregister_netdev enqueues the request to
599 * unregister the device into a TODO queue. The requests in the TODO queue
600 * are only done after rtnl mutex is unlocked, therefore free_netdev has to
601 * called after unlocking rtnl mutex.
602 *
603 * Return:
604 * - 0 if successful
605 * - RMNET_CONFIG_NO_SUCH_DEVICE if id is invalid or not in range
606 * - RMNET_CONFIG_DEVICE_IN_USE if device has logical ep that wasn't unset
607 */
608int rmnet_vnd_free_dev(int id)
609{
610 struct rmnet_logical_ep_conf_s *epconfig_l;
611 struct net_device *dev;
612
613 rtnl_lock();
614 if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
615 rtnl_unlock();
616 LOGM("Invalid id [%d]", id);
617 return RMNET_CONFIG_NO_SUCH_DEVICE;
618 }
619
620 epconfig_l = rmnet_vnd_get_le_config(rmnet_devices[id]);
621 if (epconfig_l && epconfig_l->refcount) {
622 rtnl_unlock();
623 return RMNET_CONFIG_DEVICE_IN_USE;
624 }
625
626 dev = rmnet_devices[id];
627 rmnet_devices[id] = 0;
628 rtnl_unlock();
629
630 if (dev) {
631 unregister_netdev(dev);
632 free_netdev(dev);
633 return 0;
634 } else {
635 return RMNET_CONFIG_NO_SUCH_DEVICE;
636 }
637}
638
639/* rmnet_vnd_get_name() - Gets the string name of a VND based on ID
640 * @id: Virtual device node id
641 * @name: Buffer to store name of virtual device node
642 * @name_len: Length of name buffer
643 *
644 * Copies the name of the virtual device node into the users buffer. Will throw
645 * an error if the buffer is null, or too small to hold the device name.
646 *
647 * Return:
648 * - 0 if successful
649 * - -EINVAL if name is null
650 * - -EINVAL if id is invalid or not in range
651 * - -EINVAL if name is too small to hold things
652 */
653int rmnet_vnd_get_name(int id, char *name, int name_len)
654{
655 int p;
656
657 if (!name) {
658 LOGM("%s", "Bad arguments; name buffer null");
659 return -EINVAL;
660 }
661
662 if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
663 LOGM("Invalid id [%d]", id);
664 return -EINVAL;
665 }
666
667 p = strlcpy(name, rmnet_devices[id]->name, name_len);
668 if (p >= name_len) {
669 LOGM("Buffer to small (%d) to fit device name", name_len);
670 return -EINVAL;
671 }
672 LOGL("Found mapping [%d]->\"%s\"", id, name);
673
674 return 0;
675}
676
677/* rmnet_vnd_is_vnd() - Determine if net_device is RmNet owned virtual devices
678 * @dev: Network device to test
679 *
680 * Searches through list of known RmNet virtual devices. This function is O(n)
681 * and should not be used in the data path.
682 *
683 * Return:
684 * - 0 if device is not RmNet virtual device
685 * - 1 if device is RmNet virtual device
686 */
687int rmnet_vnd_is_vnd(struct net_device *dev)
688{
689 /* This is not an efficient search, but, this will only be called in
690 * a configuration context, and the list is small.
691 */
692 int i;
693
694 if (!dev)
695 return 0;
696
697 for (i = 0; i < RMNET_DATA_MAX_VND; i++)
698 if (dev == rmnet_devices[i])
699 return i + 1;
700
701 return 0;
702}
703
704/* rmnet_vnd_get_le_config() - Get the logical endpoint configuration
705 * @dev: Virtual device node
706 *
707 * Gets the logical endpoint configuration for a RmNet virtual network device
708 * node. Caller should confirm that devices is a RmNet VND before calling.
709 *
710 * Return:
711 * - Pointer to logical endpoint configuration structure
712 * - 0 (null) if dev is null
713 */
714struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev)
715{
716 struct rmnet_vnd_private_s *dev_conf;
717
718 if (!dev)
719 return 0;
720
721 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
722 if (!dev_conf)
723 return 0;
724
725 return &dev_conf->local_ep;
726}
727
728/* _rmnet_vnd_get_flow_map() - Gets object representing a MAP flow handle
729 * @dev_conf: Private configuration structure for virtual network device
730 * @map_flow: MAP flow handle IF
731 *
732 * Loops through available flow mappings and compares the MAP flow handle.
733 * Returns when mapping is found.
734 *
735 * Return:
736 * - Null if no mapping was found
737 * - Pointer to mapping otherwise
738 */
739static struct rmnet_map_flow_mapping_s *_rmnet_vnd_get_flow_map
740 (struct rmnet_vnd_private_s *dev_conf,
741 u32 map_flow)
742{
743 struct list_head *p;
744 struct rmnet_map_flow_mapping_s *itm;
745
746 list_for_each(p, &dev_conf->flow_head) {
747 itm = list_entry(p, struct rmnet_map_flow_mapping_s, list);
748
749 if (unlikely(!itm))
750 return 0;
751
752 if (itm->map_flow_id == map_flow)
753 return itm;
754 }
755 return 0;
756}
757
758/* _rmnet_vnd_update_flow_map() - Add or remove individual TC flow handles
759 * @action: One of RMNET_VND_UF_ACTION_ADD / RMNET_VND_UF_ACTION_DEL
760 * @itm: Flow mapping object
761 * @map_flow: TC flow handle
762 *
763 * RMNET_VND_UF_ACTION_ADD:
764 * Will check for a free mapping slot in the mapping object. If one is found,
765 * valid for that slot will be set to 1 and the value will be set.
766 *
767 * RMNET_VND_UF_ACTION_DEL:
768 * Will check for matching tc handle. If found, valid for that slot will be
769 * set to 0 and the value will also be zeroed.
770 *
771 * Return:
772 * - RMNET_VND_UPDATE_FLOW_OK tc flow handle is added/removed ok
773 * - RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM if there are no more tc handles
774 * - RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT if flow mapping is now empty
775 * - RMNET_VND_UPDATE_FLOW_NO_ACTION if no action was taken
776 */
777static int _rmnet_vnd_update_flow_map(u8 action,
778 struct rmnet_map_flow_mapping_s *itm,
779 u32 tc_flow)
780{
781 int rc, i, j;
782
783 rc = RMNET_VND_UPDATE_FLOW_OK;
784
785 switch (action) {
786 case RMNET_VND_UF_ACTION_ADD:
787 rc = RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM;
788 for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
789 if (itm->tc_flow_valid[i] == 0) {
790 itm->tc_flow_valid[i] = 1;
791 itm->tc_flow_id[i] = tc_flow;
792 rc = RMNET_VND_UPDATE_FLOW_OK;
793 LOGD("{%pK}->tc_flow_id[%d]=%08X",
794 itm, i, tc_flow);
795 break;
796 }
797 }
798 break;
799
800 case RMNET_VND_UF_ACTION_DEL:
801 j = 0;
802 rc = RMNET_VND_UPDATE_FLOW_OK;
803 for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
804 if (itm->tc_flow_valid[i] == 1) {
805 if (itm->tc_flow_id[i] == tc_flow) {
806 itm->tc_flow_valid[i] = 0;
807 itm->tc_flow_id[i] = 0;
808 j++;
809 LOGD("{%pK}->tc_flow_id[%d]=0", itm, i);
810 }
811 } else {
812 j++;
813 }
814 }
815 if (j == RMNET_MAP_FLOW_NUM_TC_HANDLE)
816 rc = RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT;
817 break;
818
819 default:
820 rc = RMNET_VND_UPDATE_FLOW_NO_ACTION;
821 break;
822 }
823 return rc;
824}
825
826/* rmnet_vnd_add_tc_flow() - Add a MAP/TC flow handle mapping
827 * @id: Virtual network device ID
828 * @map_flow: MAP flow handle
829 * @tc_flow: TC flow handle
830 *
831 * Checkes for an existing flow mapping object corresponding to map_flow. If one
832 * is found, then it will try to add to the existing mapping object. Otherwise,
833 * a new mapping object is created.
834 *
835 * Return:
836 * - RMNET_CONFIG_OK if successful
837 * - RMNET_CONFIG_TC_HANDLE_FULL if there is no more room in the map object
838 * - RMNET_CONFIG_NOMEM failed to allocate a new map object
839 */
840int rmnet_vnd_add_tc_flow(u32 id, u32 map_flow, u32 tc_flow)
841{
842 struct rmnet_map_flow_mapping_s *itm;
843 struct net_device *dev;
844 struct rmnet_vnd_private_s *dev_conf;
845 int r;
846 unsigned long flags;
847
848 if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
849 LOGM("Invalid VND id [%d]", id);
850 return RMNET_CONFIG_NO_SUCH_DEVICE;
851 }
852
853 dev = rmnet_devices[id];
854 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
855
856 if (!dev_conf)
857 return RMNET_CONFIG_NO_SUCH_DEVICE;
858
859 write_lock_irqsave(&dev_conf->flow_map_lock, flags);
860 itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow);
861 if (itm) {
862 r = _rmnet_vnd_update_flow_map(RMNET_VND_UF_ACTION_ADD,
863 itm, tc_flow);
864 if (r != RMNET_VND_UPDATE_FLOW_OK) {
865 write_unlock_irqrestore(&dev_conf->flow_map_lock,
866 flags);
867 return RMNET_CONFIG_TC_HANDLE_FULL;
868 }
869 write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
870 return RMNET_CONFIG_OK;
871 }
872 write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
873
874 itm = kmalloc(sizeof(*itm), GFP_KERNEL);
875
876 if (!itm) {
877 LOGM("%s", "Failure allocating flow mapping");
878 return RMNET_CONFIG_NOMEM;
879 }
880 memset(itm, 0, sizeof(struct rmnet_map_flow_mapping_s));
881
882 itm->map_flow_id = map_flow;
883 itm->tc_flow_valid[0] = 1;
884 itm->tc_flow_id[0] = tc_flow;
885
886 /* How can we dynamically init these safely? Kernel only provides static
887 * initializers for atomic_t
888 */
889 itm->v4_seq.counter = 0; /* Init is broken: ATOMIC_INIT(0); */
890 itm->v6_seq.counter = 0; /* Init is broken: ATOMIC_INIT(0); */
891
892 write_lock_irqsave(&dev_conf->flow_map_lock, flags);
893 list_add(&itm->list, &dev_conf->flow_head);
894 write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
895
896 LOGD("Created flow mapping [%s][0x%08X][0x%08X]@%pK",
897 dev->name, itm->map_flow_id, itm->tc_flow_id[0], itm);
898
899 return RMNET_CONFIG_OK;
900}
901
902/* rmnet_vnd_del_tc_flow() - Delete a MAP/TC flow handle mapping
903 * @id: Virtual network device ID
904 * @map_flow: MAP flow handle
905 * @tc_flow: TC flow handle
906 *
907 * Checkes for an existing flow mapping object corresponding to map_flow. If one
908 * is found, then it will try to remove the existing tc_flow mapping. If the
909 * mapping object no longer contains any mappings, then it is freed. Otherwise
910 * the mapping object is left in the list
911 *
912 * Return:
913 * - RMNET_CONFIG_OK if successful or if there was no such tc_flow
914 * - RMNET_CONFIG_INVALID_REQUEST if there is no such map_flow
915 */
916int rmnet_vnd_del_tc_flow(u32 id, u32 map_flow, u32 tc_flow)
917{
918 struct rmnet_vnd_private_s *dev_conf;
919 struct net_device *dev;
920 struct rmnet_map_flow_mapping_s *itm;
921 int r;
922 unsigned long flags;
923 int rc = RMNET_CONFIG_OK;
924
925 if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
926 LOGM("Invalid VND id [%d]", id);
927 return RMNET_CONFIG_NO_SUCH_DEVICE;
928 }
929
930 dev = rmnet_devices[id];
931 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
932
933 if (!dev_conf)
934 return RMNET_CONFIG_NO_SUCH_DEVICE;
935
936 r = RMNET_VND_UPDATE_FLOW_NO_ACTION;
937 write_lock_irqsave(&dev_conf->flow_map_lock, flags);
938 itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow);
939 if (!itm) {
940 rc = RMNET_CONFIG_INVALID_REQUEST;
941 } else {
942 r = _rmnet_vnd_update_flow_map(RMNET_VND_UF_ACTION_DEL,
943 itm, tc_flow);
944 if (r == RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT)
945 list_del(&itm->list);
946 }
947 write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
948
949 if (r == RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT) {
950 if (itm)
951 LOGD("Removed flow mapping [%s][0x%08X]@%pK",
952 dev->name, itm->map_flow_id, itm);
953 kfree(itm);
954 }
955
956 return rc;
957}
958
959/* rmnet_vnd_do_flow_control() - Process flow control request
960 * @dev: Virtual network device node to do lookup on
961 * @map_flow_id: Flow ID from MAP message
962 * @v4_seq: pointer to IPv4 indication sequence number
963 * @v6_seq: pointer to IPv6 indication sequence number
964 * @enable: boolean to enable/disable flow.
965 *
966 * Return:
967 * - 0 if successful
968 * - 1 if no mapping is found
969 * - 2 if dev is not RmNet virtual network device node
970 */
971int rmnet_vnd_do_flow_control(struct net_device *dev,
972 u32 map_flow_id,
973 u16 v4_seq,
974 u16 v6_seq,
975 int enable)
976{
977 struct rmnet_vnd_private_s *dev_conf;
978 struct rmnet_map_flow_mapping_s *itm;
979 int do_fc, error, i;
980
981 error = 0;
982 do_fc = 0;
983
984 if (unlikely(!dev))
985 return 2;
986
987 if (!rmnet_vnd_is_vnd(dev))
988 return 2;
989
990 dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
991
992 if (unlikely(!dev_conf))
993 return 2;
994
995 read_lock(&dev_conf->flow_map_lock);
996 if (map_flow_id == 0xFFFFFFFF) {
997 itm = &dev_conf->root_flow;
998 goto nolookup;
999 }
1000
1001 itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow_id);
1002
1003 if (!itm) {
1004 LOGL("Got flow control request for unknown flow %08X",
1005 map_flow_id);
1006 goto fcdone;
1007 }
1008
1009nolookup:
1010 if (v4_seq == 0 || v4_seq >= atomic_read(&itm->v4_seq)) {
1011 atomic_set(&itm->v4_seq, v4_seq);
1012 if (map_flow_id == 0xFFFFFFFF) {
1013 LOGD("Setting VND TX queue state to %d", enable);
1014 /* Although we expect similar number of enable/disable
1015 * commands, optimize for the disable. That is more
1016 * latency sensitive than enable
1017 */
1018 if (unlikely(enable))
1019 netif_wake_queue(dev);
1020 else
1021 netif_stop_queue(dev);
1022 trace_rmnet_fc_map(0xFFFFFFFF, 0, enable);
1023 goto fcdone;
1024 }
1025 for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
1026 if (itm->tc_flow_valid[i] == 1) {
1027 LOGD("Found [%s][0x%08X][%d:0x%08X]",
1028 dev->name, itm->map_flow_id, i,
1029 itm->tc_flow_id[i]);
1030
1031 _rmnet_vnd_do_flow_control(dev,
1032 itm->tc_flow_id[i],
1033 enable);
1034 }
1035 }
1036 } else {
1037 LOGD("Internal seq(%hd) higher than called(%hd)",
1038 atomic_read(&itm->v4_seq), v4_seq);
1039 }
1040
1041fcdone:
1042 read_unlock(&dev_conf->flow_map_lock);
1043
1044 return error;
1045}
1046
1047/* rmnet_vnd_get_by_id() - Get VND by array index ID
1048 * @id: Virtual network deice id [0:RMNET_DATA_MAX_VND]
1049 *
1050 * Return:
1051 * - 0 if no device or ID out of range
1052 * - otherwise return pointer to VND net_device struct
1053 */
1054struct net_device *rmnet_vnd_get_by_id(int id)
1055{
1056 if (id < 0 || id >= RMNET_DATA_MAX_VND) {
1057 LOGE("Bug; VND ID out of bounds");
1058 return 0;
1059 }
1060 return rmnet_devices[id];
1061}