blob: 467223395a24c381cc2bff0cb419afa0c7886d89 [file] [log] [blame]
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * WWAN Transport Network Driver.
15 */
16
17#include <linux/completion.h>
18#include <linux/errno.h>
19#include <linux/if_arp.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/of_device.h>
26#include <linux/string.h>
27#include <linux/skbuff.h>
28#include <linux/version.h>
29#include <linux/workqueue.h>
30#include <net/pkt_sched.h>
31#include <soc/qcom/subsystem_restart.h>
32#include <soc/qcom/subsystem_notif.h>
33#include "ipa_qmi_service.h"
34#include <linux/rmnet_ipa_fd_ioctl.h>
35#include <linux/ipa.h>
36#include <uapi/linux/net_map.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020037#include <uapi/linux/msm_rmnet.h>
38#include <net/rmnet_config.h>
Amir Levy9659e592016-10-27 18:08:27 +030039
40#include "ipa_trace.h"
41
42#define WWAN_METADATA_SHFT 24
43#define WWAN_METADATA_MASK 0xFF000000
44#define WWAN_DATA_LEN 2000
45#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
46#define HEADROOM_FOR_QMAP 8 /* for mux header */
47#define TAILROOM 0 /* for padding by mux layer */
48#define MAX_NUM_OF_MUX_CHANNEL 10 /* max mux channels */
49#define UL_FILTER_RULE_HANDLE_START 69
50#define DEFAULT_OUTSTANDING_HIGH_CTL 96
51#define DEFAULT_OUTSTANDING_HIGH 64
52#define DEFAULT_OUTSTANDING_LOW 32
53
54#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +053055#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0"
56
Amir Levy9659e592016-10-27 18:08:27 +030057#define IPA_WWAN_DEVICE_COUNT (1)
58
59#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
60
61#define INVALID_MUX_ID 0xFF
62#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
63#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
64#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
65
66#define NAPI_WEIGHT 60
Sunil Paidimarri226cf032016-10-14 13:33:08 -070067#define IPA_WWAN_CONS_DESC_FIFO_SZ 1024
Amir Levy9659e592016-10-27 18:08:27 +030068
69static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
70static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
71static u32 qmap_hdr_hdl, dflt_v4_wan_rt_hdl, dflt_v6_wan_rt_hdl;
72static struct rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
73static int num_q6_rule, old_num_q6_rule;
74static int rmnet_index;
75static bool egress_set, a7_ul_flt_set;
76static struct workqueue_struct *ipa_rm_q6_workqueue; /* IPA_RM workqueue*/
77static atomic_t is_initialized;
78static atomic_t is_ssr;
79static void *subsys_notify_handle;
80
81u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */
82static struct mutex ipa_to_apps_pipe_handle_guard;
83static int wwan_add_ul_flt_rule_to_ipa(void);
84static int wwan_del_ul_flt_rule_to_ipa(void);
85static void ipa_wwan_msg_free_cb(void*, u32, u32);
86static void ipa_rmnet_rx_cb(void *priv);
87static int ipa_rmnet_poll(struct napi_struct *napi, int budget);
88
89static void wake_tx_queue(struct work_struct *work);
90static DECLARE_WORK(ipa_tx_wakequeue_work, wake_tx_queue);
91
92static void tethering_stats_poll_queue(struct work_struct *work);
93static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work,
94 tethering_stats_poll_queue);
95
96enum wwan_device_status {
97 WWAN_DEVICE_INACTIVE = 0,
98 WWAN_DEVICE_ACTIVE = 1
99};
100
101struct ipa_rmnet_plat_drv_res {
102 bool ipa_rmnet_ssr;
103 bool ipa_loaduC;
104 bool ipa_advertise_sg_support;
105 bool ipa_napi_enable;
Sunil Paidimarri226cf032016-10-14 13:33:08 -0700106 u32 wan_rx_desc_size;
Amir Levy9659e592016-10-27 18:08:27 +0300107};
108
109static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
110/**
111 * struct wwan_private - WWAN private data
112 * @net: network interface struct implemented by this driver
113 * @stats: iface statistics
114 * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
115 * @outstanding_high: number of outstanding packets allowed
116 * @outstanding_low: number of outstanding packets which shall cause
117 * @ch_id: channel id
118 * @lock: spinlock for mutual exclusion
119 * @device_status: holds device status
120 *
121 * WWAN private - holds all relevant info about WWAN driver
122 */
123struct wwan_private {
124 struct net_device *net;
125 struct net_device_stats stats;
126 atomic_t outstanding_pkts;
127 int outstanding_high_ctl;
128 int outstanding_high;
129 int outstanding_low;
130 uint32_t ch_id;
131 spinlock_t lock;
132 struct completion resource_granted_completion;
133 enum wwan_device_status device_status;
134 struct napi_struct napi;
135};
136
137/**
138* ipa_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
139*
140* Return codes:
141* 0: success
142* -ENOMEM: failed to allocate memory
143* -EPERM: failed to add the tables
144*/
145static int ipa_setup_a7_qmap_hdr(void)
146{
147 struct ipa_ioc_add_hdr *hdr;
148 struct ipa_hdr_add *hdr_entry;
149 u32 pyld_sz;
150 int ret;
151
152 /* install the basic exception header */
153 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
154 sizeof(struct ipa_hdr_add);
155 hdr = kzalloc(pyld_sz, GFP_KERNEL);
156 if (!hdr) {
157 IPAWANERR("fail to alloc exception hdr\n");
158 return -ENOMEM;
159 }
160 hdr->num_hdrs = 1;
161 hdr->commit = 1;
162 hdr_entry = &hdr->hdr[0];
163
164 strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
165 IPA_RESOURCE_NAME_MAX);
166 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
167
168 if (ipa2_add_hdr(hdr)) {
169 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
170 ret = -EPERM;
171 goto bail;
172 }
173
174 if (hdr_entry->status) {
175 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
176 ret = -EPERM;
177 goto bail;
178 }
179 qmap_hdr_hdl = hdr_entry->hdr_hdl;
180
181 ret = 0;
182bail:
183 kfree(hdr);
184 return ret;
185}
186
187static void ipa_del_a7_qmap_hdr(void)
188{
189 struct ipa_ioc_del_hdr *del_hdr;
190 struct ipa_hdr_del *hdl_entry;
191 u32 pyld_sz;
192 int ret;
193
194 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
195 sizeof(struct ipa_hdr_del);
196 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
197 if (!del_hdr) {
198 IPAWANERR("fail to alloc exception hdr_del\n");
199 return;
200 }
201
202 del_hdr->commit = 1;
203 del_hdr->num_hdls = 1;
204 hdl_entry = &del_hdr->hdl[0];
205 hdl_entry->hdl = qmap_hdr_hdl;
206
207 ret = ipa2_del_hdr(del_hdr);
208 if (ret || hdl_entry->status)
209 IPAWANERR("ipa2_del_hdr failed\n");
210 else
211 IPAWANDBG("hdrs deletion done\n");
212
213 qmap_hdr_hdl = 0;
214 kfree(del_hdr);
215}
216
217static void ipa_del_qmap_hdr(uint32_t hdr_hdl)
218{
219 struct ipa_ioc_del_hdr *del_hdr;
220 struct ipa_hdr_del *hdl_entry;
221 u32 pyld_sz;
222 int ret;
223
224 if (hdr_hdl == 0) {
225 IPAWANERR("Invalid hdr_hdl provided\n");
226 return;
227 }
228
229 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
230 sizeof(struct ipa_hdr_del);
231 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
232 if (!del_hdr) {
233 IPAWANERR("fail to alloc exception hdr_del\n");
234 return;
235 }
236
237 del_hdr->commit = 1;
238 del_hdr->num_hdls = 1;
239 hdl_entry = &del_hdr->hdl[0];
240 hdl_entry->hdl = hdr_hdl;
241
242 ret = ipa2_del_hdr(del_hdr);
243 if (ret || hdl_entry->status)
244 IPAWANERR("ipa2_del_hdr failed\n");
245 else
246 IPAWANDBG("header deletion done\n");
247
248 qmap_hdr_hdl = 0;
249 kfree(del_hdr);
250}
251
252static void ipa_del_mux_qmap_hdrs(void)
253{
254 int index;
255
256 for (index = 0; index < rmnet_index; index++) {
257 ipa_del_qmap_hdr(mux_channel[index].hdr_hdl);
258 mux_channel[index].hdr_hdl = 0;
259 }
260}
261
262static int ipa_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
263{
264 struct ipa_ioc_add_hdr *hdr;
265 struct ipa_hdr_add *hdr_entry;
266 char hdr_name[IPA_RESOURCE_NAME_MAX];
267 u32 pyld_sz;
268 int ret;
269
270 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
271 sizeof(struct ipa_hdr_add);
272 hdr = kzalloc(pyld_sz, GFP_KERNEL);
273 if (!hdr) {
274 IPAWANERR("fail to alloc exception hdr\n");
275 return -ENOMEM;
276 }
277 hdr->num_hdrs = 1;
278 hdr->commit = 1;
279 hdr_entry = &hdr->hdr[0];
280
281 snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
282 A2_MUX_HDR_NAME_V4_PREF,
283 mux_id);
284 strlcpy(hdr_entry->name, hdr_name,
285 IPA_RESOURCE_NAME_MAX);
286
287 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
288 hdr_entry->hdr[1] = (uint8_t) mux_id;
289 IPAWANDBG("header (%s) with mux-id: (%d)\n",
290 hdr_name,
291 hdr_entry->hdr[1]);
292 if (ipa2_add_hdr(hdr)) {
293 IPAWANERR("fail to add IPA_QMAP hdr\n");
294 ret = -EPERM;
295 goto bail;
296 }
297
298 if (hdr_entry->status) {
299 IPAWANERR("fail to add IPA_QMAP hdr\n");
300 ret = -EPERM;
301 goto bail;
302 }
303
304 ret = 0;
305 *hdr_hdl = hdr_entry->hdr_hdl;
306bail:
307 kfree(hdr);
308 return ret;
309}
310
311/**
312* ipa_setup_dflt_wan_rt_tables() - Setup default wan routing tables
313*
314* Return codes:
315* 0: success
316* -ENOMEM: failed to allocate memory
317* -EPERM: failed to add the tables
318*/
319static int ipa_setup_dflt_wan_rt_tables(void)
320{
321 struct ipa_ioc_add_rt_rule *rt_rule;
322 struct ipa_rt_rule_add *rt_rule_entry;
323
324 rt_rule =
325 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
326 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
327 if (!rt_rule) {
328 IPAWANERR("fail to alloc mem\n");
329 return -ENOMEM;
330 }
331 /* setup a default v4 route to point to Apps */
332 rt_rule->num_rules = 1;
333 rt_rule->commit = 1;
334 rt_rule->ip = IPA_IP_v4;
335 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
336 IPA_RESOURCE_NAME_MAX);
337
338 rt_rule_entry = &rt_rule->rules[0];
339 rt_rule_entry->at_rear = 1;
340 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
341 rt_rule_entry->rule.hdr_hdl = qmap_hdr_hdl;
342
343 if (ipa2_add_rt_rule(rt_rule)) {
344 IPAWANERR("fail to add dflt_wan v4 rule\n");
345 kfree(rt_rule);
346 return -EPERM;
347 }
348
349 IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
350 dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
351
352 /* setup a default v6 route to point to A5 */
353 rt_rule->ip = IPA_IP_v6;
354 if (ipa2_add_rt_rule(rt_rule)) {
355 IPAWANERR("fail to add dflt_wan v6 rule\n");
356 kfree(rt_rule);
357 return -EPERM;
358 }
359 IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
360 dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
361
362 kfree(rt_rule);
363 return 0;
364}
365
366static void ipa_del_dflt_wan_rt_tables(void)
367{
368 struct ipa_ioc_del_rt_rule *rt_rule;
369 struct ipa_rt_rule_del *rt_rule_entry;
370 int len;
371
372 len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
373 sizeof(struct ipa_rt_rule_del);
374 rt_rule = kzalloc(len, GFP_KERNEL);
375 if (!rt_rule) {
376 IPAWANERR("unable to allocate memory for del route rule\n");
377 return;
378 }
379
380 memset(rt_rule, 0, len);
381 rt_rule->commit = 1;
382 rt_rule->num_hdls = 1;
383 rt_rule->ip = IPA_IP_v4;
384
385 rt_rule_entry = &rt_rule->hdl[0];
386 rt_rule_entry->status = -1;
387 rt_rule_entry->hdl = dflt_v4_wan_rt_hdl;
388
389 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
390 rt_rule_entry->hdl, IPA_IP_v4);
391 if (ipa2_del_rt_rule(rt_rule) ||
392 (rt_rule_entry->status)) {
393 IPAWANERR("Routing rule deletion failed!\n");
394 }
395
396 rt_rule->ip = IPA_IP_v6;
397 rt_rule_entry->hdl = dflt_v6_wan_rt_hdl;
398 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
399 rt_rule_entry->hdl, IPA_IP_v6);
400 if (ipa2_del_rt_rule(rt_rule) ||
401 (rt_rule_entry->status)) {
402 IPAWANERR("Routing rule deletion failed!\n");
403 }
404
405 kfree(rt_rule);
406}
407
408int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
409 *rule_req, uint32_t *rule_hdl)
410{
411 int i, j;
412
413 if (rule_req->filter_spec_list_valid == true) {
414 num_q6_rule = rule_req->filter_spec_list_len;
415 IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule);
416 } else {
417 num_q6_rule = 0;
418 IPAWANERR("got no UL rules from modem\n");
419 return -EINVAL;
420 }
421
422 /* copy UL filter rules from Modem*/
423 for (i = 0; i < num_q6_rule; i++) {
424 /* check if rules overside the cache*/
425 if (i == MAX_NUM_Q6_RULE) {
426 IPAWANERR("Reaching (%d) max cache ",
427 MAX_NUM_Q6_RULE);
428 IPAWANERR(" however total (%d)\n",
429 num_q6_rule);
430 goto failure;
431 }
432 /* construct UL_filter_rule handler QMI use-cas */
433 ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl =
434 UL_FILTER_RULE_HANDLE_START + i;
435 rule_hdl[i] = ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
436 ipa_qmi_ctx->q6_ul_filter_rule[i].ip =
437 rule_req->filter_spec_list[i].ip_type;
438 ipa_qmi_ctx->q6_ul_filter_rule[i].action =
439 rule_req->filter_spec_list[i].filter_action;
440 if (rule_req->filter_spec_list[i].is_routing_table_index_valid
441 == true)
442 ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
443 rule_req->filter_spec_list[i].route_table_index;
444 if (rule_req->filter_spec_list[i].is_mux_id_valid == true)
445 ipa_qmi_ctx->q6_ul_filter_rule[i].mux_id =
446 rule_req->filter_spec_list[i].mux_id;
447 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap =
448 rule_req->filter_spec_list[i].filter_rule.
449 rule_eq_bitmap;
450 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present =
451 rule_req->filter_spec_list[i].filter_rule.
452 tos_eq_present;
453 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq =
454 rule_req->filter_spec_list[i].filter_rule.tos_eq;
455 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
456 protocol_eq_present = rule_req->filter_spec_list[i].
457 filter_rule.protocol_eq_present;
458 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq =
459 rule_req->filter_spec_list[i].filter_rule.
460 protocol_eq;
461
462 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
463 num_ihl_offset_range_16 = rule_req->filter_spec_list[i].
464 filter_rule.num_ihl_offset_range_16;
465 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
466 num_ihl_offset_range_16; j++) {
467 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
468 ihl_offset_range_16[j].offset = rule_req->
469 filter_spec_list[i].filter_rule.
470 ihl_offset_range_16[j].offset;
471 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
472 ihl_offset_range_16[j].range_low = rule_req->
473 filter_spec_list[i].filter_rule.
474 ihl_offset_range_16[j].range_low;
475 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
476 ihl_offset_range_16[j].range_high = rule_req->
477 filter_spec_list[i].filter_rule.
478 ihl_offset_range_16[j].range_high;
479 }
480 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 =
481 rule_req->filter_spec_list[i].filter_rule.
482 num_offset_meq_32;
483 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
484 num_offset_meq_32; j++) {
485 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
486 offset_meq_32[j].offset = rule_req->filter_spec_list[i].
487 filter_rule.offset_meq_32[j].offset;
488 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
489 offset_meq_32[j].mask = rule_req->filter_spec_list[i].
490 filter_rule.offset_meq_32[j].mask;
491 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
492 offset_meq_32[j].value = rule_req->filter_spec_list[i].
493 filter_rule.offset_meq_32[j].value;
494 }
495
496 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present =
497 rule_req->filter_spec_list[i].filter_rule.tc_eq_present;
498 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq =
499 rule_req->filter_spec_list[i].filter_rule.tc_eq;
500 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present =
501 rule_req->filter_spec_list[i].filter_rule.
502 flow_eq_present;
503 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq =
504 rule_req->filter_spec_list[i].filter_rule.flow_eq;
505 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
506 ihl_offset_eq_16_present = rule_req->filter_spec_list[i].
507 filter_rule.ihl_offset_eq_16_present;
508 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
509 ihl_offset_eq_16.offset = rule_req->filter_spec_list[i].
510 filter_rule.ihl_offset_eq_16.offset;
511 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
512 ihl_offset_eq_16.value = rule_req->filter_spec_list[i].
513 filter_rule.ihl_offset_eq_16.value;
514
515 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
516 ihl_offset_eq_32_present = rule_req->filter_spec_list[i].
517 filter_rule.ihl_offset_eq_32_present;
518 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
519 ihl_offset_eq_32.offset = rule_req->filter_spec_list[i].
520 filter_rule.ihl_offset_eq_32.offset;
521 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
522 ihl_offset_eq_32.value = rule_req->filter_spec_list[i].
523 filter_rule.ihl_offset_eq_32.value;
524
525 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
526 num_ihl_offset_meq_32 = rule_req->filter_spec_list[i].
527 filter_rule.num_ihl_offset_meq_32;
528 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].
529 eq_attrib.num_ihl_offset_meq_32; j++) {
530 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
531 ihl_offset_meq_32[j].offset = rule_req->
532 filter_spec_list[i].filter_rule.
533 ihl_offset_meq_32[j].offset;
534 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
535 ihl_offset_meq_32[j].mask = rule_req->
536 filter_spec_list[i].filter_rule.
537 ihl_offset_meq_32[j].mask;
538 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
539 ihl_offset_meq_32[j].value = rule_req->
540 filter_spec_list[i].filter_rule.
541 ihl_offset_meq_32[j].value;
542 }
543 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 =
544 rule_req->filter_spec_list[i].filter_rule.
545 num_offset_meq_128;
546 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
547 num_offset_meq_128; j++) {
548 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
549 offset_meq_128[j].offset = rule_req->
550 filter_spec_list[i].filter_rule.
551 offset_meq_128[j].offset;
552 memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
553 offset_meq_128[j].mask,
554 rule_req->filter_spec_list[i].
555 filter_rule.offset_meq_128[j].mask, 16);
556 memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
557 offset_meq_128[j].value, rule_req->
558 filter_spec_list[i].filter_rule.
559 offset_meq_128[j].value, 16);
560 }
561
562 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
563 metadata_meq32_present = rule_req->filter_spec_list[i].
564 filter_rule.metadata_meq32_present;
565 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
566 metadata_meq32.offset = rule_req->filter_spec_list[i].
567 filter_rule.metadata_meq32.offset;
568 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
569 metadata_meq32.mask = rule_req->filter_spec_list[i].
570 filter_rule.metadata_meq32.mask;
571 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.
572 value = rule_req->filter_spec_list[i].filter_rule.
573 metadata_meq32.value;
574 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
575 ipv4_frag_eq_present = rule_req->filter_spec_list[i].
576 filter_rule.ipv4_frag_eq_present;
577 }
578
579 if (rule_req->xlat_filter_indices_list_valid) {
580 if (rule_req->xlat_filter_indices_list_len > num_q6_rule) {
581 IPAWANERR("Number of xlat indices is not valid: %d\n",
582 rule_req->xlat_filter_indices_list_len);
583 goto failure;
584 }
585 IPAWANDBG("Receive %d XLAT indices: ",
586 rule_req->xlat_filter_indices_list_len);
587 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++)
588 IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]);
589 IPAWANDBG("\n");
590
591 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
592 if (rule_req->xlat_filter_indices_list[i]
593 >= num_q6_rule) {
594 IPAWANERR("Xlat rule idx is wrong: %d\n",
595 rule_req->xlat_filter_indices_list[i]);
596 goto failure;
597 } else {
598 ipa_qmi_ctx->q6_ul_filter_rule
599 [rule_req->xlat_filter_indices_list[i]]
600 .is_xlat_rule = 1;
601 IPAWANDBG("Rule %d is xlat rule\n",
602 rule_req->xlat_filter_indices_list[i]);
603 }
604 }
605 }
606 goto success;
607
608failure:
609 num_q6_rule = 0;
610 memset(ipa_qmi_ctx->q6_ul_filter_rule, 0,
611 sizeof(ipa_qmi_ctx->q6_ul_filter_rule));
612 return -EINVAL;
613
614success:
615 return 0;
616}
617
618static int wwan_add_ul_flt_rule_to_ipa(void)
619{
620 u32 pyld_sz;
621 int i, retval = 0;
622 int num_v4_rule = 0, num_v6_rule = 0;
623 struct ipa_ioc_add_flt_rule *param;
624 struct ipa_flt_rule_add flt_rule_entry;
625 struct ipa_fltr_installed_notif_req_msg_v01 *req;
626
627 if (ipa_qmi_ctx == NULL) {
628 IPAWANERR("ipa_qmi_ctx is NULL!\n");
629 return -EFAULT;
630 }
631
632 pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) +
633 sizeof(struct ipa_flt_rule_add);
634 param = kzalloc(pyld_sz, GFP_KERNEL);
635 if (!param)
636 return -ENOMEM;
637
638 req = (struct ipa_fltr_installed_notif_req_msg_v01 *)
639 kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
640 GFP_KERNEL);
641 if (!req) {
642 kfree(param);
643 return -ENOMEM;
644 }
645
646 param->commit = 1;
647 param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD;
648 param->global = false;
649 param->num_rules = (uint8_t)1;
650
651 mutex_lock(&ipa_qmi_lock);
652 for (i = 0; i < num_q6_rule; i++) {
653 param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
654 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
655 flt_rule_entry.at_rear = true;
656 flt_rule_entry.rule.action =
657 ipa_qmi_ctx->q6_ul_filter_rule[i].action;
658 flt_rule_entry.rule.rt_tbl_idx
659 = ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx;
660 flt_rule_entry.rule.retain_hdr = true;
661
662 /* debug rt-hdl*/
663 IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n",
664 i, flt_rule_entry.rule.rt_tbl_idx);
665 flt_rule_entry.rule.eq_attrib_type = true;
666 memcpy(&(flt_rule_entry.rule.eq_attrib),
667 &ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib,
668 sizeof(struct ipa_ipfltri_rule_eq));
669 memcpy(&(param->rules[0]), &flt_rule_entry,
670 sizeof(struct ipa_flt_rule_add));
671 if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
672 retval = -EFAULT;
673 IPAWANERR("add A7 UL filter rule(%d) failed\n", i);
674 } else {
675 /* store the rule handler */
676 ipa_qmi_ctx->q6_ul_filter_rule_hdl[i] =
677 param->rules[0].flt_rule_hdl;
678 }
679 }
680 mutex_unlock(&ipa_qmi_lock);
681
682 /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
683 req->source_pipe_index =
684 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
685 req->install_status = QMI_RESULT_SUCCESS_V01;
686 req->filter_index_list_len = num_q6_rule;
687 mutex_lock(&ipa_qmi_lock);
688 for (i = 0; i < num_q6_rule; i++) {
689 if (ipa_qmi_ctx->q6_ul_filter_rule[i].ip == IPA_IP_v4) {
690 req->filter_index_list[i].filter_index = num_v4_rule;
691 num_v4_rule++;
692 } else {
693 req->filter_index_list[i].filter_index = num_v6_rule;
694 num_v6_rule++;
695 }
696 req->filter_index_list[i].filter_handle =
697 ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
698 }
699 mutex_unlock(&ipa_qmi_lock);
700 if (qmi_filter_notify_send(req)) {
701 IPAWANDBG("add filter rule index on A7-RX failed\n");
702 retval = -EFAULT;
703 }
704 old_num_q6_rule = num_q6_rule;
705 IPAWANDBG("add (%d) filter rule index on A7-RX\n",
706 old_num_q6_rule);
707 kfree(param);
708 kfree(req);
709 return retval;
710}
711
712static int wwan_del_ul_flt_rule_to_ipa(void)
713{
714 u32 pyld_sz;
715 int i, retval = 0;
716 struct ipa_ioc_del_flt_rule *param;
717 struct ipa_flt_rule_del flt_rule_entry;
718
719 pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) +
720 sizeof(struct ipa_flt_rule_del);
721 param = kzalloc(pyld_sz, GFP_KERNEL);
722 if (!param) {
723 IPAWANERR("kzalloc failed\n");
724 return -ENOMEM;
725 }
726
727 param->commit = 1;
728 param->num_hdls = (uint8_t) 1;
729
730 for (i = 0; i < old_num_q6_rule; i++) {
731 param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
732 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
733 flt_rule_entry.hdl = ipa_qmi_ctx->q6_ul_filter_rule_hdl[i];
734 /* debug rt-hdl*/
735 IPAWANDBG("delete-IPA rule index(%d)\n", i);
736 memcpy(&(param->hdl[0]), &flt_rule_entry,
737 sizeof(struct ipa_flt_rule_del));
738 if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
739 IPAWANERR("del A7 UL filter rule(%d) failed\n", i);
740 kfree(param);
741 return -EFAULT;
742 }
743 }
744
745 /* set UL filter-rule add-indication */
746 a7_ul_flt_set = false;
747 old_num_q6_rule = 0;
748
749 kfree(param);
750 return retval;
751}
752
753static int find_mux_channel_index(uint32_t mux_id)
754{
755 int i;
756
757 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
758 if (mux_id == mux_channel[i].mux_id)
759 return i;
760 }
761 return MAX_NUM_OF_MUX_CHANNEL;
762}
763
764static int find_vchannel_name_index(const char *vchannel_name)
765{
766 int i;
767
768 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
769 if (strcmp(mux_channel[i].vchannel_name, vchannel_name == 0))
770 return i;
771 }
772 return MAX_NUM_OF_MUX_CHANNEL;
773}
774
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +0530775static enum ipa_upstream_type find_upstream_type(const char *upstreamIface)
776{
777 int i;
778
779 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
780 if (strcmp(mux_channel[i].vchannel_name,
781 upstreamIface) == 0)
782 return IPA_UPSTEAM_MODEM;
783 }
784
785 if (strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0)
786 return IPA_UPSTEAM_WLAN;
787 else
788 return IPA_UPSTEAM_MAX;
789}
790
Amir Levy9659e592016-10-27 18:08:27 +0300791static int wwan_register_to_ipa(int index)
792{
793 struct ipa_tx_intf tx_properties = {0};
794 struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
795 struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
796 struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
797 struct ipa_rx_intf rx_properties = {0};
798 struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
799 struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
800 struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
801 struct ipa_ext_intf ext_properties = {0};
802 struct ipa_ioc_ext_intf_prop *ext_ioc_properties;
803 u32 pyld_sz;
804 int ret = 0, i;
805
806 IPAWANDBG("index(%d) device[%s]:\n", index,
807 mux_channel[index].vchannel_name);
808 if (!mux_channel[index].mux_hdr_set) {
809 ret = ipa_add_qmap_hdr(mux_channel[index].mux_id,
810 &mux_channel[index].hdr_hdl);
811 if (ret) {
812 IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
813 return ret;
814 }
815 mux_channel[index].mux_hdr_set = true;
816 }
817 tx_properties.prop = tx_ioc_properties;
818 tx_ipv4_property = &tx_properties.prop[0];
819 tx_ipv4_property->ip = IPA_IP_v4;
820 tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
821 snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
822 A2_MUX_HDR_NAME_V4_PREF,
823 mux_channel[index].mux_id);
824 tx_ipv6_property = &tx_properties.prop[1];
825 tx_ipv6_property->ip = IPA_IP_v6;
826 tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
827 /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
828 snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
829 A2_MUX_HDR_NAME_V4_PREF,
830 mux_channel[index].mux_id);
831 tx_properties.num_props = 2;
832
833 rx_properties.prop = rx_ioc_properties;
834 rx_ipv4_property = &rx_properties.prop[0];
835 rx_ipv4_property->ip = IPA_IP_v4;
836 rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
837 rx_ipv4_property->attrib.meta_data =
838 mux_channel[index].mux_id << WWAN_METADATA_SHFT;
839 rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
840 rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
841 rx_ipv6_property = &rx_properties.prop[1];
842 rx_ipv6_property->ip = IPA_IP_v6;
843 rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
844 rx_ipv6_property->attrib.meta_data =
845 mux_channel[index].mux_id << WWAN_METADATA_SHFT;
846 rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
847 rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
848 rx_properties.num_props = 2;
849
850 pyld_sz = num_q6_rule *
851 sizeof(struct ipa_ioc_ext_intf_prop);
852 ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
853 if (!ext_ioc_properties) {
854 IPAWANERR("Error allocate memory\n");
855 return -ENOMEM;
856 }
857
858 ext_properties.prop = ext_ioc_properties;
859 ext_properties.excp_pipe_valid = true;
860 ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
861 ext_properties.num_props = num_q6_rule;
862 for (i = 0; i < num_q6_rule; i++) {
863 memcpy(&(ext_properties.prop[i]),
864 &(ipa_qmi_ctx->q6_ul_filter_rule[i]),
865 sizeof(struct ipa_ioc_ext_intf_prop));
866 ext_properties.prop[i].mux_id = mux_channel[index].mux_id;
867 IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
868 ext_properties.prop[i].ip,
869 ext_properties.prop[i].rt_tbl_idx);
870 IPAWANDBG("action: %d mux:%d\n",
871 ext_properties.prop[i].action,
872 ext_properties.prop[i].mux_id);
873 }
874 ret = ipa2_register_intf_ext(mux_channel[index].
875 vchannel_name, &tx_properties,
876 &rx_properties, &ext_properties);
877 if (ret) {
878 IPAWANERR("[%s]:ipa2_register_intf failed %d\n",
879 mux_channel[index].vchannel_name, ret);
880 goto fail;
881 }
882 mux_channel[index].ul_flt_reg = true;
883fail:
884 kfree(ext_ioc_properties);
885 return ret;
886}
887
888static void ipa_cleanup_deregister_intf(void)
889{
890 int i;
891 int ret;
892
893 for (i = 0; i < rmnet_index; i++) {
894 if (mux_channel[i].ul_flt_reg) {
895 ret = ipa2_deregister_intf(
896 mux_channel[i].vchannel_name);
897 if (ret < 0) {
898 IPAWANERR("de-register device %s(%d) failed\n",
899 mux_channel[i].vchannel_name,
900 i);
901 return;
902 }
903 IPAWANDBG("de-register device %s(%d) success\n",
904 mux_channel[i].vchannel_name,
905 i);
906 }
907 mux_channel[i].ul_flt_reg = false;
908 }
909}
910
911int wwan_update_mux_channel_prop(void)
912{
913 int ret = 0, i;
914 /* install UL filter rules */
915 if (egress_set) {
916 if (ipa_qmi_ctx &&
917 ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
918 IPAWANDBG("setup UL filter rules\n");
919 if (a7_ul_flt_set) {
920 IPAWANDBG("del previous UL filter rules\n");
921 /* delete rule hdlers */
922 ret = wwan_del_ul_flt_rule_to_ipa();
923 if (ret) {
924 IPAWANERR("failed to del old rules\n");
925 return -EINVAL;
926 }
927 IPAWANDBG("deleted old UL rules\n");
928 }
929 ret = wwan_add_ul_flt_rule_to_ipa();
930 }
931 if (ret)
932 IPAWANERR("failed to install UL rules\n");
933 else
934 a7_ul_flt_set = true;
935 }
936 /* update Tx/Rx/Ext property */
937 IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
938 if (rmnet_index == 0) {
939 IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
940 return ret;
941 }
942
943 ipa_cleanup_deregister_intf();
944
945 for (i = 0; i < rmnet_index; i++) {
946 ret = wwan_register_to_ipa(i);
947 if (ret < 0) {
948 IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
949 mux_channel[i].vchannel_name,
950 mux_channel[i].mux_id,
951 i);
952 return -ENODEV;
953 }
954 IPAWANERR("dev(%s) has registered to IPA\n",
955 mux_channel[i].vchannel_name);
956 mux_channel[i].ul_flt_reg = true;
957 }
958 return ret;
959}
960
961#ifdef INIT_COMPLETION
962#define reinit_completion(x) INIT_COMPLETION(*(x))
963#endif /* INIT_COMPLETION */
964
965static int __ipa_wwan_open(struct net_device *dev)
966{
967 struct wwan_private *wwan_ptr = netdev_priv(dev);
968
969 IPAWANDBG("[%s] __wwan_open()\n", dev->name);
970 if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
971 reinit_completion(&wwan_ptr->resource_granted_completion);
972 wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
973
974 if (ipa_rmnet_res.ipa_napi_enable)
975 napi_enable(&(wwan_ptr->napi));
976 return 0;
977}
978
979/**
980 * wwan_open() - Opens the wwan network interface. Opens logical
981 * channel on A2 MUX driver and starts the network stack queue
982 *
983 * @dev: network device
984 *
985 * Return codes:
986 * 0: success
987 * -ENODEV: Error while opening logical channel on A2 MUX driver
988 */
989static int ipa_wwan_open(struct net_device *dev)
990{
991 int rc = 0;
992
993 IPAWANDBG("[%s] wwan_open()\n", dev->name);
994 rc = __ipa_wwan_open(dev);
995 if (rc == 0)
996 netif_start_queue(dev);
997 return rc;
998}
999
1000static int __ipa_wwan_close(struct net_device *dev)
1001{
1002 struct wwan_private *wwan_ptr = netdev_priv(dev);
1003 int rc = 0;
1004
1005 if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
1006 wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
1007 /* do not close wwan port once up, this causes
1008 * remote side to hang if tried to open again
1009 */
1010 reinit_completion(&wwan_ptr->resource_granted_completion);
1011 if (ipa_rmnet_res.ipa_napi_enable)
1012 napi_disable(&(wwan_ptr->napi));
1013 rc = ipa2_deregister_intf(dev->name);
1014 if (rc) {
1015 IPAWANERR("[%s]: ipa2_deregister_intf failed %d\n",
1016 dev->name, rc);
1017 return rc;
1018 }
1019 return rc;
1020 } else {
1021 return -EBADF;
1022 }
1023}
1024
1025/**
1026 * ipa_wwan_stop() - Stops the wwan network interface. Closes
1027 * logical channel on A2 MUX driver and stops the network stack
1028 * queue
1029 *
1030 * @dev: network device
1031 *
1032 * Return codes:
1033 * 0: success
1034 * -ENODEV: Error while opening logical channel on A2 MUX driver
1035 */
1036static int ipa_wwan_stop(struct net_device *dev)
1037{
1038 IPAWANDBG("[%s] ipa_wwan_stop()\n", dev->name);
1039 __ipa_wwan_close(dev);
1040 netif_stop_queue(dev);
1041 return 0;
1042}
1043
1044static int ipa_wwan_change_mtu(struct net_device *dev, int new_mtu)
1045{
1046 if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
1047 return -EINVAL;
1048 IPAWANDBG("[%s] MTU change: old=%d new=%d\n",
1049 dev->name, dev->mtu, new_mtu);
1050 dev->mtu = new_mtu;
1051 return 0;
1052}
1053
1054/**
1055 * ipa_wwan_xmit() - Transmits an skb.
1056 *
1057 * @skb: skb to be transmitted
1058 * @dev: network device
1059 *
1060 * Return codes:
1061 * 0: success
1062 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1063 * later
1064 * -EFAULT: Error while transmitting the skb
1065 */
1066static int ipa_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
1067{
1068 int ret = 0;
1069 bool qmap_check;
1070 struct wwan_private *wwan_ptr = netdev_priv(dev);
1071 struct ipa_tx_meta meta;
1072
1073 if (skb->protocol != htons(ETH_P_MAP)) {
1074 IPAWANDBG
1075 ("SW filtering out none QMAP packet received from %s",
1076 current->comm);
Sunil Paidimarri6c818e82016-10-17 18:33:13 -07001077 dev_kfree_skb_any(skb);
1078 dev->stats.tx_dropped++;
Amir Levy9659e592016-10-27 18:08:27 +03001079 return NETDEV_TX_OK;
1080 }
1081
1082 qmap_check = RMNET_MAP_GET_CD_BIT(skb);
1083 if (netif_queue_stopped(dev)) {
1084 if (qmap_check &&
1085 atomic_read(&wwan_ptr->outstanding_pkts) <
1086 wwan_ptr->outstanding_high_ctl) {
1087 pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
1088 goto send;
1089 } else {
1090 pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
1091 return NETDEV_TX_BUSY;
1092 }
1093 }
1094
1095 /* checking High WM hit */
1096 if (atomic_read(&wwan_ptr->outstanding_pkts) >=
1097 wwan_ptr->outstanding_high) {
1098 if (!qmap_check) {
1099 IPAWANDBG("pending(%d)/(%d)- stop(%d), qmap_chk(%d)\n",
1100 atomic_read(&wwan_ptr->outstanding_pkts),
1101 wwan_ptr->outstanding_high,
1102 netif_queue_stopped(dev),
1103 qmap_check);
1104 netif_stop_queue(dev);
1105 return NETDEV_TX_BUSY;
1106 }
1107 }
1108
1109send:
1110 /* IPA_RM checking start */
1111 ret = ipa_rm_inactivity_timer_request_resource(
1112 IPA_RM_RESOURCE_WWAN_0_PROD);
1113 if (ret == -EINPROGRESS) {
1114 netif_stop_queue(dev);
1115 return NETDEV_TX_BUSY;
1116 }
1117 if (ret) {
1118 pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
1119 dev->name, ret);
Sunil Paidimarri6c818e82016-10-17 18:33:13 -07001120 dev_kfree_skb_any(skb);
1121 dev->stats.tx_dropped++;
Amir Levy9659e592016-10-27 18:08:27 +03001122 return -EFAULT;
1123 }
1124 /* IPA_RM checking end */
1125
1126 if (qmap_check) {
1127 memset(&meta, 0, sizeof(meta));
1128 meta.pkt_init_dst_ep_valid = true;
1129 meta.pkt_init_dst_ep_remote = true;
1130 ret = ipa2_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta);
1131 } else {
1132 ret = ipa2_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL);
1133 }
1134
1135 if (ret) {
1136 ret = NETDEV_TX_BUSY;
Amir Levy9659e592016-10-27 18:08:27 +03001137 goto out;
1138 }
1139
1140 atomic_inc(&wwan_ptr->outstanding_pkts);
1141 dev->stats.tx_packets++;
1142 dev->stats.tx_bytes += skb->len;
1143 ret = NETDEV_TX_OK;
1144out:
1145 ipa_rm_inactivity_timer_release_resource(
1146 IPA_RM_RESOURCE_WWAN_0_PROD);
1147 return ret;
1148}
1149
1150static void ipa_wwan_tx_timeout(struct net_device *dev)
1151{
1152 IPAWANERR("[%s] ipa_wwan_tx_timeout(), data stall in UL\n", dev->name);
1153}
1154
1155/**
1156 * apps_ipa_tx_complete_notify() - Rx notify
1157 *
1158 * @priv: driver context
1159 * @evt: event type
1160 * @data: data provided with event
1161 *
1162 * Check that the packet is the one we sent and release it
1163 * This function will be called in defered context in IPA wq.
1164 */
1165static void apps_ipa_tx_complete_notify(void *priv,
1166 enum ipa_dp_evt_type evt,
1167 unsigned long data)
1168{
1169 struct sk_buff *skb = (struct sk_buff *)data;
1170 struct net_device *dev = (struct net_device *)priv;
1171 struct wwan_private *wwan_ptr;
1172
1173 if (dev != ipa_netdevs[0]) {
1174 IPAWANDBG("Received pre-SSR packet completion\n");
1175 dev_kfree_skb_any(skb);
1176 return;
1177 }
1178
1179 if (evt != IPA_WRITE_DONE) {
1180 IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
1181 dev_kfree_skb_any(skb);
1182 dev->stats.tx_dropped++;
1183 return;
1184 }
1185
1186 wwan_ptr = netdev_priv(dev);
1187 atomic_dec(&wwan_ptr->outstanding_pkts);
1188 __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
1189 if (!atomic_read(&is_ssr) &&
1190 netif_queue_stopped(wwan_ptr->net) &&
1191 atomic_read(&wwan_ptr->outstanding_pkts) <
1192 (wwan_ptr->outstanding_low)) {
1193 IPAWANDBG("Outstanding low (%d) - wake up queue\n",
1194 wwan_ptr->outstanding_low);
1195 netif_wake_queue(wwan_ptr->net);
1196 }
1197 __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
1198 dev_kfree_skb_any(skb);
1199 ipa_rm_inactivity_timer_release_resource(
1200 IPA_RM_RESOURCE_WWAN_0_PROD);
1201}
1202
1203/**
1204 * apps_ipa_packet_receive_notify() - Rx notify
1205 *
1206 * @priv: driver context
1207 * @evt: event type
1208 * @data: data provided with event
1209 *
1210 * IPA will pass a packet to the Linux network stack with skb->data
1211 */
1212static void apps_ipa_packet_receive_notify(void *priv,
1213 enum ipa_dp_evt_type evt,
1214 unsigned long data)
1215{
1216 struct net_device *dev = (struct net_device *)priv;
1217
1218 if (evt == IPA_RECEIVE) {
1219 struct sk_buff *skb = (struct sk_buff *)data;
1220 int result;
1221 unsigned int packet_len = skb->len;
1222
1223 IPAWANDBG("Rx packet was received");
1224 skb->dev = ipa_netdevs[0];
1225 skb->protocol = htons(ETH_P_MAP);
1226
1227 if (ipa_rmnet_res.ipa_napi_enable) {
1228 trace_rmnet_ipa_netif_rcv_skb(dev->stats.rx_packets);
1229 result = netif_receive_skb(skb);
1230 } else {
1231 if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
1232 == 0) {
1233 trace_rmnet_ipa_netifni(dev->stats.rx_packets);
1234 result = netif_rx_ni(skb);
1235 } else {
1236 trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
1237 result = netif_rx(skb);
1238 }
1239 }
1240
1241 if (result) {
1242 pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
1243 __func__, __LINE__);
1244 dev->stats.rx_dropped++;
1245 }
1246 dev->stats.rx_packets++;
1247 dev->stats.rx_bytes += packet_len;
1248 } else if (evt == IPA_CLIENT_START_POLL)
1249 ipa_rmnet_rx_cb(priv);
1250 else if (evt == IPA_CLIENT_COMP_NAPI) {
1251 struct wwan_private *wwan_ptr = netdev_priv(dev);
1252
1253 if (ipa_rmnet_res.ipa_napi_enable)
1254 napi_complete(&(wwan_ptr->napi));
1255 } else
1256 IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
1257
1258}
1259
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001260static int handle_ingress_format(struct net_device *dev,
1261 struct rmnet_ioctl_extended_s *in)
1262{
1263 int ret = 0;
1264 struct rmnet_phys_ep_conf_s *ep_cfg;
1265
1266 IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
1267 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
1268 ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.cs_offload_en =
1269 IPA_ENABLE_CS_OFFLOAD_DL;
1270
1271 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
1272 IPAWANERR("get AGG size %d count %d\n",
1273 in->u.ingress_format.agg_size,
1274 in->u.ingress_format.agg_count);
1275
1276 ret = ipa_disable_apps_wan_cons_deaggr(
1277 in->u.ingress_format.agg_size,
1278 in->u.ingress_format.agg_count);
1279
1280 if (!ret) {
1281 ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_byte_limit =
1282 in->u.ingress_format.agg_size;
1283 ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit =
1284 in->u.ingress_format.agg_count;
1285
1286 if (ipa_rmnet_res.ipa_napi_enable) {
1287 ipa_to_apps_ep_cfg.recycle_enabled = true;
1288 ep_cfg = (struct rmnet_phys_ep_conf_s *)
1289 rcu_dereference(dev->rx_handler_data);
1290 ep_cfg->recycle = ipa_recycle_wan_skb;
1291 pr_info("Wan Recycle Enabled\n");
1292 }
1293 }
1294 }
1295
1296 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
1297 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
1298 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
1299 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
1300 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
1301
1302 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
1303 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
1304 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding =
1305 true;
1306 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
1307 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
1308 ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
1309
1310 ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
1311 ipa_to_apps_ep_cfg.notify = apps_ipa_packet_receive_notify;
1312 ipa_to_apps_ep_cfg.priv = dev;
1313
1314 ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001315 ipa_to_apps_ep_cfg.desc_fifo_sz =
1316 ipa_rmnet_res.wan_rx_desc_size * sizeof(struct sps_iovec);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001317
1318 mutex_lock(&ipa_to_apps_pipe_handle_guard);
1319 if (atomic_read(&is_ssr)) {
1320 IPAWANDBG("In SSR sequence/recovery\n");
1321 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
1322 return -EFAULT;
1323 }
1324 ret = ipa2_setup_sys_pipe(&ipa_to_apps_ep_cfg, &ipa_to_apps_hdl);
1325 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
1326
1327 if (ret)
1328 IPAWANERR("failed to configure ingress\n");
1329
1330 return ret;
1331}
1332
Amir Levy9659e592016-10-27 18:08:27 +03001333/**
1334 * ipa_wwan_ioctl() - I/O control for wwan network driver.
1335 *
1336 * @dev: network device
1337 * @ifr: ignored
1338 * @cmd: cmd to be excecuded. can be one of the following:
1339 * IPA_WWAN_IOCTL_OPEN - Open the network interface
1340 * IPA_WWAN_IOCTL_CLOSE - Close the network interface
1341 *
1342 * Return codes:
1343 * 0: success
1344 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1345 * later
1346 * -EFAULT: Error while transmitting the skb
1347 */
1348static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1349{
1350 int rc = 0;
1351 int mru = 1000, epid = 1, mux_index, len;
1352 struct ipa_msg_meta msg_meta;
1353 struct ipa_wan_msg *wan_msg = NULL;
1354 struct rmnet_ioctl_extended_s extend_ioctl_data;
1355 struct rmnet_ioctl_data_s ioctl_data;
1356
1357 IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
1358 switch (cmd) {
1359 /* Set Ethernet protocol */
1360 case RMNET_IOCTL_SET_LLP_ETHERNET:
1361 break;
1362 /* Set RAWIP protocol */
1363 case RMNET_IOCTL_SET_LLP_IP:
1364 break;
1365 /* Get link protocol */
1366 case RMNET_IOCTL_GET_LLP:
1367 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1368 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1369 sizeof(struct rmnet_ioctl_data_s)))
1370 rc = -EFAULT;
1371 break;
1372 /* Set QoS header enabled */
1373 case RMNET_IOCTL_SET_QOS_ENABLE:
1374 return -EINVAL;
1375 /* Set QoS header disabled */
1376 case RMNET_IOCTL_SET_QOS_DISABLE:
1377 break;
1378 /* Get QoS header state */
1379 case RMNET_IOCTL_GET_QOS:
1380 ioctl_data.u.operation_mode = RMNET_MODE_NONE;
1381 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1382 sizeof(struct rmnet_ioctl_data_s)))
1383 rc = -EFAULT;
1384 break;
1385 /* Get operation mode */
1386 case RMNET_IOCTL_GET_OPMODE:
1387 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1388 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1389 sizeof(struct rmnet_ioctl_data_s)))
1390 rc = -EFAULT;
1391 break;
1392 /* Open transport port */
1393 case RMNET_IOCTL_OPEN:
1394 break;
1395 /* Close transport port */
1396 case RMNET_IOCTL_CLOSE:
1397 break;
1398 /* Flow enable */
1399 case RMNET_IOCTL_FLOW_ENABLE:
1400 IPAWANDBG("Received flow enable\n");
1401 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1402 sizeof(struct rmnet_ioctl_data_s))) {
1403 rc = -EFAULT;
1404 break;
1405 }
1406 ipa_flow_control(IPA_CLIENT_USB_PROD, true,
1407 ioctl_data.u.tcm_handle);
1408 break;
1409 /* Flow disable */
1410 case RMNET_IOCTL_FLOW_DISABLE:
1411 IPAWANDBG("Received flow disable\n");
1412 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1413 sizeof(struct rmnet_ioctl_data_s))) {
1414 rc = -EFAULT;
1415 break;
1416 }
1417 ipa_flow_control(IPA_CLIENT_USB_PROD, false,
1418 ioctl_data.u.tcm_handle);
1419 break;
1420 /* Set flow handle */
1421 case RMNET_IOCTL_FLOW_SET_HNDL:
1422 break;
1423
1424 /* Extended IOCTLs */
1425 case RMNET_IOCTL_EXTENDED:
1426 IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
1427 if (copy_from_user(&extend_ioctl_data,
1428 (u8 *)ifr->ifr_ifru.ifru_data,
1429 sizeof(struct rmnet_ioctl_extended_s))) {
1430 IPAWANERR("failed to copy extended ioctl data\n");
1431 rc = -EFAULT;
1432 break;
1433 }
1434 switch (extend_ioctl_data.extended_ioctl) {
1435 /* Get features */
1436 case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
1437 IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n");
1438 extend_ioctl_data.u.data =
1439 (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL |
1440 RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT |
1441 RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT);
1442 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1443 &extend_ioctl_data,
1444 sizeof(struct rmnet_ioctl_extended_s)))
1445 rc = -EFAULT;
1446 break;
1447 /* Set MRU */
1448 case RMNET_IOCTL_SET_MRU:
1449 mru = extend_ioctl_data.u.data;
1450 IPAWANDBG("get MRU size %d\n",
1451 extend_ioctl_data.u.data);
1452 break;
1453 /* Get MRU */
1454 case RMNET_IOCTL_GET_MRU:
1455 extend_ioctl_data.u.data = mru;
1456 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1457 &extend_ioctl_data,
1458 sizeof(struct rmnet_ioctl_extended_s)))
1459 rc = -EFAULT;
1460 break;
1461 /* GET SG support */
1462 case RMNET_IOCTL_GET_SG_SUPPORT:
1463 extend_ioctl_data.u.data =
1464 ipa_rmnet_res.ipa_advertise_sg_support;
1465 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1466 &extend_ioctl_data,
1467 sizeof(struct rmnet_ioctl_extended_s)))
1468 rc = -EFAULT;
1469 break;
1470 /* Get endpoint ID */
1471 case RMNET_IOCTL_GET_EPID:
1472 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
1473 extend_ioctl_data.u.data = epid;
1474 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1475 &extend_ioctl_data,
1476 sizeof(struct rmnet_ioctl_extended_s)))
1477 rc = -EFAULT;
1478 if (copy_from_user(&extend_ioctl_data,
1479 (u8 *)ifr->ifr_ifru.ifru_data,
1480 sizeof(struct rmnet_ioctl_extended_s))) {
1481 IPAWANERR("copy extended ioctl data failed\n");
1482 rc = -EFAULT;
1483 break;
1484 }
1485 IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n",
1486 extend_ioctl_data.u.data);
1487 break;
1488 /* Endpoint pair */
1489 case RMNET_IOCTL_GET_EP_PAIR:
1490 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n");
1491 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
1492 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
1493 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
1494 ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
1495 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1496 &extend_ioctl_data,
1497 sizeof(struct rmnet_ioctl_extended_s)))
1498 rc = -EFAULT;
1499 if (copy_from_user(&extend_ioctl_data,
1500 (u8 *)ifr->ifr_ifru.ifru_data,
1501 sizeof(struct rmnet_ioctl_extended_s))) {
1502 IPAWANERR("copy extended ioctl data failed\n");
1503 rc = -EFAULT;
1504 break;
1505 }
1506 IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n",
1507 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
1508 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
1509 break;
1510 /* Get driver name */
1511 case RMNET_IOCTL_GET_DRIVER_NAME:
1512 memcpy(&extend_ioctl_data.u.if_name,
1513 ipa_netdevs[0]->name,
1514 sizeof(IFNAMSIZ));
1515 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1516 &extend_ioctl_data,
1517 sizeof(struct rmnet_ioctl_extended_s)))
1518 rc = -EFAULT;
1519 break;
1520 /* Add MUX ID */
1521 case RMNET_IOCTL_ADD_MUX_CHANNEL:
1522 mux_index = find_mux_channel_index(
1523 extend_ioctl_data.u.rmnet_mux_val.mux_id);
1524 if (mux_index < MAX_NUM_OF_MUX_CHANNEL) {
1525 IPAWANDBG("already setup mux(%d)\n",
1526 extend_ioctl_data.u.
1527 rmnet_mux_val.mux_id);
1528 return rc;
1529 }
1530 if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
1531 IPAWANERR("Exceed mux_channel limit(%d)\n",
1532 rmnet_index);
1533 return -EFAULT;
1534 }
1535 IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
1536 extend_ioctl_data.u.rmnet_mux_val.mux_id,
1537 extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
1538 /* cache the mux name and id */
1539 mux_channel[rmnet_index].mux_id =
1540 extend_ioctl_data.u.rmnet_mux_val.mux_id;
1541 memcpy(mux_channel[rmnet_index].vchannel_name,
1542 extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
1543 sizeof(mux_channel[rmnet_index].vchannel_name));
Skylar Changba7c5112017-04-14 19:23:05 -07001544 mux_channel[rmnet_index].vchannel_name[
1545 IFNAMSIZ - 1] = '\0';
1546
Amir Levy9659e592016-10-27 18:08:27 +03001547 IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
1548 mux_channel[rmnet_index].vchannel_name,
1549 mux_channel[rmnet_index].mux_id,
1550 rmnet_index);
1551 /* check if UL filter rules coming*/
1552 if (num_q6_rule != 0) {
1553 IPAWANERR("dev(%s) register to IPA\n",
1554 extend_ioctl_data.u.rmnet_mux_val.
1555 vchannel_name);
1556 rc = wwan_register_to_ipa(rmnet_index);
1557 if (rc < 0) {
1558 IPAWANERR("device %s reg IPA failed\n",
1559 extend_ioctl_data.u.
1560 rmnet_mux_val.vchannel_name);
1561 return -ENODEV;
1562 }
1563 mux_channel[rmnet_index].mux_channel_set = true;
1564 mux_channel[rmnet_index].ul_flt_reg = true;
1565 } else {
1566 IPAWANDBG("dev(%s) haven't registered to IPA\n",
1567 extend_ioctl_data.u.
1568 rmnet_mux_val.vchannel_name);
1569 mux_channel[rmnet_index].mux_channel_set = true;
1570 mux_channel[rmnet_index].ul_flt_reg = false;
1571 }
1572 rmnet_index++;
1573 break;
1574 case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
1575 IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
1576 if ((extend_ioctl_data.u.data) &
1577 RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
1578 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 8;
1579 apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
1580 cs_offload_en =
1581 IPA_ENABLE_CS_OFFLOAD_UL;
1582 apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
1583 cs_metadata_hdr_offset = 1;
1584 } else {
1585 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
1586 }
1587 if ((extend_ioctl_data.u.data) &
1588 RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
1589 apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
1590 IPA_ENABLE_AGGR;
1591 else
1592 apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
1593 IPA_BYPASS_AGGR;
1594 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
1595 hdr_ofst_metadata_valid = 1;
1596 /* modem want offset at 0! */
1597 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
1598 apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.dst =
1599 IPA_CLIENT_APPS_LAN_WAN_PROD;
1600 apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.mode = IPA_BASIC;
1601
1602 apps_to_ipa_ep_cfg.client =
1603 IPA_CLIENT_APPS_LAN_WAN_PROD;
1604 apps_to_ipa_ep_cfg.notify =
1605 apps_ipa_tx_complete_notify;
1606 apps_to_ipa_ep_cfg.desc_fifo_sz =
1607 IPA_SYS_TX_DATA_DESC_FIFO_SZ;
1608 apps_to_ipa_ep_cfg.priv = dev;
1609
1610 rc = ipa2_setup_sys_pipe(&apps_to_ipa_ep_cfg,
1611 &apps_to_ipa_hdl);
1612 if (rc)
1613 IPAWANERR("failed to config egress endpoint\n");
1614
1615 if (num_q6_rule != 0) {
1616 /* already got Q6 UL filter rules*/
1617 if (ipa_qmi_ctx &&
1618 ipa_qmi_ctx->modem_cfg_emb_pipe_flt
1619 == false)
1620 rc = wwan_add_ul_flt_rule_to_ipa();
1621 else
1622 rc = 0;
1623 egress_set = true;
1624 if (rc)
1625 IPAWANERR("install UL rules failed\n");
1626 else
1627 a7_ul_flt_set = true;
1628 } else {
1629 /* wait Q6 UL filter rules*/
1630 egress_set = true;
1631 IPAWANDBG("no UL-rules, egress_set(%d)\n",
1632 egress_set);
1633 }
1634 break;
1635 case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001636 rc = handle_ingress_format(dev, &extend_ioctl_data);
Amir Levy9659e592016-10-27 18:08:27 +03001637 break;
1638 case RMNET_IOCTL_SET_XLAT_DEV_INFO:
1639 wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
1640 GFP_KERNEL);
1641 if (!wan_msg) {
1642 IPAWANERR("Failed to allocate memory.\n");
1643 return -ENOMEM;
1644 }
1645 len = sizeof(wan_msg->upstream_ifname) >
1646 sizeof(extend_ioctl_data.u.if_name) ?
1647 sizeof(extend_ioctl_data.u.if_name) :
1648 sizeof(wan_msg->upstream_ifname);
1649 strlcpy(wan_msg->upstream_ifname,
1650 extend_ioctl_data.u.if_name, len);
1651 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
1652 msg_meta.msg_type = WAN_XLAT_CONNECT;
1653 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
1654 rc = ipa2_send_msg(&msg_meta, wan_msg,
1655 ipa_wwan_msg_free_cb);
1656 if (rc) {
1657 IPAWANERR("Failed to send XLAT_CONNECT msg\n");
1658 kfree(wan_msg);
1659 }
1660 break;
1661 /* Get agg count */
1662 case RMNET_IOCTL_GET_AGGREGATION_COUNT:
1663 break;
1664 /* Set agg count */
1665 case RMNET_IOCTL_SET_AGGREGATION_COUNT:
1666 break;
1667 /* Get agg size */
1668 case RMNET_IOCTL_GET_AGGREGATION_SIZE:
1669 break;
1670 /* Set agg size */
1671 case RMNET_IOCTL_SET_AGGREGATION_SIZE:
1672 break;
1673 /* Do flow control */
1674 case RMNET_IOCTL_FLOW_CONTROL:
1675 break;
1676 /* For legacy use */
1677 case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL:
1678 break;
1679 /* Get HW/SW map */
1680 case RMNET_IOCTL_GET_HWSW_MAP:
1681 break;
1682 /* Set RX Headroom */
1683 case RMNET_IOCTL_SET_RX_HEADROOM:
1684 break;
1685 default:
1686 IPAWANERR("[%s] unsupported extended cmd[%d]",
1687 dev->name,
1688 extend_ioctl_data.extended_ioctl);
1689 rc = -EINVAL;
1690 }
1691 break;
1692 default:
1693 IPAWANERR("[%s] unsupported cmd[%d]",
1694 dev->name, cmd);
1695 rc = -EINVAL;
1696 }
1697 return rc;
1698}
1699
1700static const struct net_device_ops ipa_wwan_ops_ip = {
1701 .ndo_open = ipa_wwan_open,
1702 .ndo_stop = ipa_wwan_stop,
1703 .ndo_start_xmit = ipa_wwan_xmit,
1704 .ndo_tx_timeout = ipa_wwan_tx_timeout,
1705 .ndo_do_ioctl = ipa_wwan_ioctl,
1706 .ndo_change_mtu = ipa_wwan_change_mtu,
1707 .ndo_set_mac_address = 0,
1708 .ndo_validate_addr = 0,
1709};
1710
1711/**
1712 * wwan_setup() - Setups the wwan network driver.
1713 *
1714 * @dev: network device
1715 *
1716 * Return codes:
1717 * None
1718 */
1719
1720static void ipa_wwan_setup(struct net_device *dev)
1721{
1722 dev->netdev_ops = &ipa_wwan_ops_ip;
1723 ether_setup(dev);
1724 /* set this after calling ether_setup */
1725 dev->header_ops = 0; /* No header */
1726 dev->type = ARPHRD_RAWIP;
1727 dev->hard_header_len = 0;
1728 dev->mtu = WWAN_DATA_LEN;
1729 dev->addr_len = 0;
1730 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
1731 dev->needed_headroom = HEADROOM_FOR_QMAP;
1732 dev->needed_tailroom = TAILROOM;
1733 dev->watchdog_timeo = 1000;
1734}
1735
1736/* IPA_RM related functions start*/
1737static void q6_prod_rm_request_resource(struct work_struct *work);
1738static DECLARE_DELAYED_WORK(q6_con_rm_request, q6_prod_rm_request_resource);
1739static void q6_prod_rm_release_resource(struct work_struct *work);
1740static DECLARE_DELAYED_WORK(q6_con_rm_release, q6_prod_rm_release_resource);
1741
1742static void q6_prod_rm_request_resource(struct work_struct *work)
1743{
1744 int ret = 0;
1745
1746 ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
1747 if (ret < 0 && ret != -EINPROGRESS) {
1748 IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
1749 ret);
1750 return;
1751 }
1752}
1753
1754static int q6_rm_request_resource(void)
1755{
1756 queue_delayed_work(ipa_rm_q6_workqueue,
1757 &q6_con_rm_request, 0);
1758 return 0;
1759}
1760
1761static void q6_prod_rm_release_resource(struct work_struct *work)
1762{
1763 int ret = 0;
1764
1765 ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
1766 if (ret < 0 && ret != -EINPROGRESS) {
1767 IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
1768 ret);
1769 return;
1770 }
1771}
1772
1773
1774static int q6_rm_release_resource(void)
1775{
1776 queue_delayed_work(ipa_rm_q6_workqueue,
1777 &q6_con_rm_release, 0);
1778 return 0;
1779}
1780
1781
1782static void q6_rm_notify_cb(void *user_data,
1783 enum ipa_rm_event event,
1784 unsigned long data)
1785{
1786 switch (event) {
1787 case IPA_RM_RESOURCE_GRANTED:
1788 IPAWANDBG("%s: Q6_PROD GRANTED CB\n", __func__);
1789 break;
1790 case IPA_RM_RESOURCE_RELEASED:
1791 IPAWANDBG("%s: Q6_PROD RELEASED CB\n", __func__);
1792 break;
1793 default:
1794 return;
1795 }
1796}
1797static int q6_initialize_rm(void)
1798{
1799 struct ipa_rm_create_params create_params;
1800 struct ipa_rm_perf_profile profile;
1801 int result;
1802
1803 /* Initialize IPA_RM workqueue */
1804 ipa_rm_q6_workqueue = create_singlethread_workqueue("clnt_req");
1805 if (!ipa_rm_q6_workqueue)
1806 return -ENOMEM;
1807
1808 memset(&create_params, 0, sizeof(create_params));
1809 create_params.name = IPA_RM_RESOURCE_Q6_PROD;
1810 create_params.reg_params.notify_cb = &q6_rm_notify_cb;
1811 result = ipa_rm_create_resource(&create_params);
1812 if (result)
1813 goto create_rsrc_err1;
1814 memset(&create_params, 0, sizeof(create_params));
1815 create_params.name = IPA_RM_RESOURCE_Q6_CONS;
1816 create_params.release_resource = &q6_rm_release_resource;
1817 create_params.request_resource = &q6_rm_request_resource;
1818 result = ipa_rm_create_resource(&create_params);
1819 if (result)
1820 goto create_rsrc_err2;
1821 /* add dependency*/
1822 result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
1823 IPA_RM_RESOURCE_APPS_CONS);
1824 if (result)
1825 goto add_dpnd_err;
1826 /* setup Performance profile */
1827 memset(&profile, 0, sizeof(profile));
1828 profile.max_supported_bandwidth_mbps = 100;
1829 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
1830 &profile);
1831 if (result)
1832 goto set_perf_err;
1833 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
1834 &profile);
1835 if (result)
1836 goto set_perf_err;
1837 return result;
1838
1839set_perf_err:
1840 ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1841 IPA_RM_RESOURCE_APPS_CONS);
1842add_dpnd_err:
1843 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1844 if (result < 0)
1845 IPAWANERR("Error deleting resource %d, ret=%d\n",
1846 IPA_RM_RESOURCE_Q6_CONS, result);
1847create_rsrc_err2:
1848 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1849 if (result < 0)
1850 IPAWANERR("Error deleting resource %d, ret=%d\n",
1851 IPA_RM_RESOURCE_Q6_PROD, result);
1852create_rsrc_err1:
1853 destroy_workqueue(ipa_rm_q6_workqueue);
1854 return result;
1855}
1856
1857void q6_deinitialize_rm(void)
1858{
1859 int ret;
1860
1861 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1862 IPA_RM_RESOURCE_APPS_CONS);
1863 if (ret < 0)
1864 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
1865 IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
1866 ret);
1867 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1868 if (ret < 0)
1869 IPAWANERR("Error deleting resource %d, ret=%d\n",
1870 IPA_RM_RESOURCE_Q6_CONS, ret);
1871 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1872 if (ret < 0)
1873 IPAWANERR("Error deleting resource %d, ret=%d\n",
1874 IPA_RM_RESOURCE_Q6_PROD, ret);
1875 destroy_workqueue(ipa_rm_q6_workqueue);
1876}
1877
1878static void wake_tx_queue(struct work_struct *work)
1879{
1880 if (ipa_netdevs[0]) {
1881 __netif_tx_lock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
1882 netif_wake_queue(ipa_netdevs[0]);
1883 __netif_tx_unlock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
1884 }
1885}
1886
1887/**
1888 * ipa_rm_resource_granted() - Called upon
1889 * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
1890 *
1891 * @work: work object supplied ny workqueue
1892 *
1893 * Return codes:
1894 * None
1895 */
1896static void ipa_rm_resource_granted(void *dev)
1897{
1898 IPAWANDBG("Resource Granted - starting queue\n");
1899 schedule_work(&ipa_tx_wakequeue_work);
1900}
1901
1902/**
1903 * ipa_rm_notify() - Callback function for RM events. Handles
1904 * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
1905 * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
1906 * workqueue.
1907 *
1908 * @dev: network device
1909 * @event: IPA RM event
1910 * @data: Additional data provided by IPA RM
1911 *
1912 * Return codes:
1913 * None
1914 */
1915static void ipa_rm_notify(void *dev, enum ipa_rm_event event,
1916 unsigned long data)
1917{
1918 struct wwan_private *wwan_ptr = netdev_priv(dev);
1919
1920 pr_debug("%s: event %d\n", __func__, event);
1921 switch (event) {
1922 case IPA_RM_RESOURCE_GRANTED:
1923 if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
1924 complete_all(&wwan_ptr->resource_granted_completion);
1925 break;
1926 }
1927 ipa_rm_resource_granted(dev);
1928 break;
1929 case IPA_RM_RESOURCE_RELEASED:
1930 break;
1931 default:
1932 pr_err("%s: unknown event %d\n", __func__, event);
1933 break;
1934 }
1935}
1936
1937/* IPA_RM related functions end*/
1938
1939static int ssr_notifier_cb(struct notifier_block *this,
1940 unsigned long code,
1941 void *data);
1942
1943static struct notifier_block ssr_notifier = {
1944 .notifier_call = ssr_notifier_cb,
1945};
1946
1947static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
1948 struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
1949{
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001950 int result;
1951
1952 ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
Amir Levy9659e592016-10-27 18:08:27 +03001953 ipa_rmnet_drv_res->ipa_rmnet_ssr =
1954 of_property_read_bool(pdev->dev.of_node,
1955 "qcom,rmnet-ipa-ssr");
1956 pr_info("IPA SSR support = %s\n",
1957 ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False");
1958 ipa_rmnet_drv_res->ipa_loaduC =
1959 of_property_read_bool(pdev->dev.of_node,
1960 "qcom,ipa-loaduC");
1961 pr_info("IPA ipa-loaduC = %s\n",
1962 ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
1963
1964 ipa_rmnet_drv_res->ipa_advertise_sg_support =
1965 of_property_read_bool(pdev->dev.of_node,
1966 "qcom,ipa-advertise-sg-support");
1967 pr_info("IPA SG support = %s\n",
1968 ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
1969
1970 ipa_rmnet_drv_res->ipa_napi_enable =
1971 of_property_read_bool(pdev->dev.of_node,
1972 "qcom,ipa-napi-enable");
1973 pr_info("IPA Napi Enable = %s\n",
1974 ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001975
1976 /* Get IPA WAN RX desc fifo size */
1977 result = of_property_read_u32(pdev->dev.of_node,
1978 "qcom,wan-rx-desc-size",
1979 &ipa_rmnet_drv_res->wan_rx_desc_size);
1980 if (result)
1981 pr_info("using default for wan-rx-desc-size = %u\n",
1982 ipa_rmnet_drv_res->wan_rx_desc_size);
1983 else
1984 IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
1985 ipa_rmnet_drv_res->wan_rx_desc_size);
1986
Amir Levy9659e592016-10-27 18:08:27 +03001987 return 0;
1988}
1989
1990struct ipa_rmnet_context ipa_rmnet_ctx;
1991
1992/**
1993 * ipa_wwan_probe() - Initialized the module and registers as a
1994 * network interface to the network stack
1995 *
1996 * Return codes:
1997 * 0: success
1998 * -ENOMEM: No memory available
1999 * -EFAULT: Internal error
2000 * -ENODEV: IPA driver not loaded
2001 */
2002static int ipa_wwan_probe(struct platform_device *pdev)
2003{
2004 int ret, i;
2005 struct net_device *dev;
2006 struct wwan_private *wwan_ptr;
2007 struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */
2008 struct ipa_rm_perf_profile profile; /* IPA_RM */
2009
2010 pr_info("rmnet_ipa started initialization\n");
2011
2012 if (!ipa2_is_ready()) {
2013 IPAWANERR("IPA driver not loaded\n");
2014 return -ENODEV;
2015 }
2016
2017 ret = get_ipa_rmnet_dts_configuration(pdev, &ipa_rmnet_res);
2018 ipa_rmnet_ctx.ipa_rmnet_ssr = ipa_rmnet_res.ipa_rmnet_ssr;
2019
2020 ret = ipa_init_q6_smem();
2021 if (ret) {
2022 IPAWANERR("ipa_init_q6_smem failed!\n");
2023 return ret;
2024 }
2025
2026 /* initialize tx/rx enpoint setup */
2027 memset(&apps_to_ipa_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
2028 memset(&ipa_to_apps_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
2029
2030 /* initialize ex property setup */
2031 num_q6_rule = 0;
2032 old_num_q6_rule = 0;
2033 rmnet_index = 0;
2034 egress_set = false;
2035 a7_ul_flt_set = false;
2036 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
2037 memset(&mux_channel[i], 0, sizeof(struct rmnet_mux_val));
2038
2039 /* start A7 QMI service/client */
2040 if (ipa_rmnet_res.ipa_loaduC)
2041 /* Android platform loads uC */
2042 ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
2043 else
2044 /* LE platform not loads uC */
2045 ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
2046
2047 /* construct default WAN RT tbl for IPACM */
2048 ret = ipa_setup_a7_qmap_hdr();
2049 if (ret)
2050 goto setup_a7_qmap_hdr_err;
2051 ret = ipa_setup_dflt_wan_rt_tables();
2052 if (ret)
2053 goto setup_dflt_wan_rt_tables_err;
2054
2055 if (!atomic_read(&is_ssr)) {
2056 /* Start transport-driver fd ioctl for ipacm for first init */
2057 ret = wan_ioctl_init();
2058 if (ret)
2059 goto wan_ioctl_init_err;
2060 } else {
2061 /* Enable sending QMI messages after SSR */
2062 wan_ioctl_enable_qmi_messages();
2063 }
2064
2065 /* initialize wan-driver netdev */
2066 dev = alloc_netdev(sizeof(struct wwan_private),
2067 IPA_WWAN_DEV_NAME,
2068 NET_NAME_UNKNOWN,
2069 ipa_wwan_setup);
2070 if (!dev) {
2071 IPAWANERR("no memory for netdev\n");
2072 ret = -ENOMEM;
2073 goto alloc_netdev_err;
2074 }
2075 ipa_netdevs[0] = dev;
2076 wwan_ptr = netdev_priv(dev);
2077 memset(wwan_ptr, 0, sizeof(*wwan_ptr));
2078 IPAWANDBG("wwan_ptr (private) = %p", wwan_ptr);
2079 wwan_ptr->net = dev;
2080 wwan_ptr->outstanding_high_ctl = DEFAULT_OUTSTANDING_HIGH_CTL;
2081 wwan_ptr->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
2082 wwan_ptr->outstanding_low = DEFAULT_OUTSTANDING_LOW;
2083 atomic_set(&wwan_ptr->outstanding_pkts, 0);
2084 spin_lock_init(&wwan_ptr->lock);
2085 init_completion(&wwan_ptr->resource_granted_completion);
2086
2087 if (!atomic_read(&is_ssr)) {
2088 /* IPA_RM configuration starts */
2089 ret = q6_initialize_rm();
2090 if (ret) {
2091 IPAWANERR("%s: q6_initialize_rm failed, ret: %d\n",
2092 __func__, ret);
2093 goto q6_init_err;
2094 }
2095 }
2096
2097 memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
2098 ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
2099 ipa_rm_params.reg_params.user_data = dev;
2100 ipa_rm_params.reg_params.notify_cb = ipa_rm_notify;
2101 ret = ipa_rm_create_resource(&ipa_rm_params);
2102 if (ret) {
2103 pr_err("%s: unable to create resourse %d in IPA RM\n",
2104 __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
2105 goto create_rsrc_err;
2106 }
2107 ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
2108 IPA_RM_INACTIVITY_TIMER);
2109 if (ret) {
2110 pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
2111 __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
2112 goto timer_init_err;
2113 }
2114 /* add dependency */
2115 ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2116 IPA_RM_RESOURCE_Q6_CONS);
2117 if (ret)
2118 goto add_dpnd_err;
2119 /* setup Performance profile */
2120 memset(&profile, 0, sizeof(profile));
2121 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
2122 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
2123 &profile);
2124 if (ret)
2125 goto set_perf_err;
2126 /* IPA_RM configuration ends */
2127
2128 /* Enable SG support in netdevice. */
2129 if (ipa_rmnet_res.ipa_advertise_sg_support)
2130 dev->hw_features |= NETIF_F_SG;
2131
2132 /* Enable NAPI support in netdevice. */
2133 if (ipa_rmnet_res.ipa_napi_enable) {
2134 netif_napi_add(dev, &(wwan_ptr->napi),
2135 ipa_rmnet_poll, NAPI_WEIGHT);
2136 }
2137
2138 ret = register_netdev(dev);
2139 if (ret) {
2140 IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
2141 0, ret);
2142 goto set_perf_err;
2143 }
2144
2145 IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n",
2146 ipa_netdevs[0]->name);
2147 if (ret) {
2148 IPAWANERR("default configuration failed rc=%d\n",
2149 ret);
2150 goto config_err;
2151 }
2152 atomic_set(&is_initialized, 1);
2153 if (!atomic_read(&is_ssr)) {
2154 /* offline charging mode */
2155 ipa2_proxy_clk_unvote();
2156 }
2157 atomic_set(&is_ssr, 0);
2158
2159 pr_info("rmnet_ipa completed initialization\n");
2160 return 0;
2161config_err:
2162 if (ipa_rmnet_res.ipa_napi_enable)
2163 netif_napi_del(&(wwan_ptr->napi));
2164 unregister_netdev(ipa_netdevs[0]);
2165set_perf_err:
2166 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2167 IPA_RM_RESOURCE_Q6_CONS);
2168 if (ret)
2169 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2170 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2171 ret);
2172add_dpnd_err:
2173 ret = ipa_rm_inactivity_timer_destroy(
2174 IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
2175 if (ret)
2176 IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
2177 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2178timer_init_err:
2179 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2180 if (ret)
2181 IPAWANERR("Error deleting resource %d, ret=%d\n",
2182 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2183create_rsrc_err:
2184 q6_deinitialize_rm();
2185q6_init_err:
2186 free_netdev(ipa_netdevs[0]);
2187 ipa_netdevs[0] = NULL;
2188alloc_netdev_err:
2189 wan_ioctl_deinit();
2190wan_ioctl_init_err:
2191 ipa_del_dflt_wan_rt_tables();
2192setup_dflt_wan_rt_tables_err:
2193 ipa_del_a7_qmap_hdr();
2194setup_a7_qmap_hdr_err:
2195 ipa_qmi_service_exit();
2196 atomic_set(&is_ssr, 0);
2197 return ret;
2198}
2199
2200static int ipa_wwan_remove(struct platform_device *pdev)
2201{
2202 int ret;
2203 struct wwan_private *wwan_ptr;
2204
2205 wwan_ptr = netdev_priv(ipa_netdevs[0]);
2206
2207 pr_info("rmnet_ipa started deinitialization\n");
2208 mutex_lock(&ipa_to_apps_pipe_handle_guard);
2209 ret = ipa2_teardown_sys_pipe(ipa_to_apps_hdl);
2210 if (ret < 0)
2211 IPAWANERR("Failed to teardown IPA->APPS pipe\n");
2212 else
2213 ipa_to_apps_hdl = -1;
2214 if (ipa_rmnet_res.ipa_napi_enable)
2215 netif_napi_del(&(wwan_ptr->napi));
2216 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
2217 unregister_netdev(ipa_netdevs[0]);
2218 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2219 IPA_RM_RESOURCE_Q6_CONS);
2220 if (ret < 0)
2221 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2222 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2223 ret);
2224 ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
2225 if (ret < 0)
2226 IPAWANERR(
2227 "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
2228 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2229 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2230 if (ret < 0)
2231 IPAWANERR("Error deleting resource %d, ret=%d\n",
2232 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2233 cancel_work_sync(&ipa_tx_wakequeue_work);
2234 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2235 free_netdev(ipa_netdevs[0]);
2236 ipa_netdevs[0] = NULL;
2237 /* No need to remove wwan_ioctl during SSR */
2238 if (!atomic_read(&is_ssr))
2239 wan_ioctl_deinit();
2240 ipa_del_dflt_wan_rt_tables();
2241 ipa_del_a7_qmap_hdr();
2242 ipa_del_mux_qmap_hdrs();
2243 if (ipa_qmi_ctx && ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false)
2244 wwan_del_ul_flt_rule_to_ipa();
2245 ipa_cleanup_deregister_intf();
2246 atomic_set(&is_initialized, 0);
2247 pr_info("rmnet_ipa completed deinitialization\n");
2248 return 0;
2249}
2250
2251/**
2252* rmnet_ipa_ap_suspend() - suspend callback for runtime_pm
2253* @dev: pointer to device
2254*
2255* This callback will be invoked by the runtime_pm framework when an AP suspend
2256* operation is invoked, usually by pressing a suspend button.
2257*
2258* Returns -EAGAIN to runtime_pm framework in case there are pending packets
2259* in the Tx queue. This will postpone the suspend operation until all the
2260* pending packets will be transmitted.
2261*
2262* In case there are no packets to send, releases the WWAN0_PROD entity.
2263* As an outcome, the number of IPA active clients should be decremented
2264* until IPA clocks can be gated.
2265*/
2266static int rmnet_ipa_ap_suspend(struct device *dev)
2267{
2268 struct net_device *netdev = ipa_netdevs[0];
2269 struct wwan_private *wwan_ptr = netdev_priv(netdev);
2270
2271 IPAWANDBG("Enter...\n");
2272 /* Do not allow A7 to suspend in case there are oustanding packets */
2273 if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
2274 IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
2275 return -EAGAIN;
2276 }
2277
2278 /* Make sure that there is no Tx operation ongoing */
2279 netif_tx_lock_bh(netdev);
2280 ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2281 netif_tx_unlock_bh(netdev);
2282 IPAWANDBG("Exit\n");
2283
2284 return 0;
2285}
2286
2287/**
2288* rmnet_ipa_ap_resume() - resume callback for runtime_pm
2289* @dev: pointer to device
2290*
2291* This callback will be invoked by the runtime_pm framework when an AP resume
2292* operation is invoked.
2293*
2294* Enables the network interface queue and returns success to the
2295* runtime_pm framework.
2296*/
2297static int rmnet_ipa_ap_resume(struct device *dev)
2298{
2299 struct net_device *netdev = ipa_netdevs[0];
2300
2301 IPAWANDBG("Enter...\n");
2302 netif_wake_queue(netdev);
2303 IPAWANDBG("Exit\n");
2304
2305 return 0;
2306}
2307
2308static void ipa_stop_polling_stats(void)
2309{
2310 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2311 ipa_rmnet_ctx.polling_interval = 0;
2312}
2313
2314static const struct of_device_id rmnet_ipa_dt_match[] = {
2315 {.compatible = "qcom,rmnet-ipa"},
2316 {},
2317};
2318MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
2319
2320static const struct dev_pm_ops rmnet_ipa_pm_ops = {
2321 .suspend_noirq = rmnet_ipa_ap_suspend,
2322 .resume_noirq = rmnet_ipa_ap_resume,
2323};
2324
2325static struct platform_driver rmnet_ipa_driver = {
2326 .driver = {
2327 .name = "rmnet_ipa",
2328 .owner = THIS_MODULE,
2329 .pm = &rmnet_ipa_pm_ops,
2330 .of_match_table = rmnet_ipa_dt_match,
2331 },
2332 .probe = ipa_wwan_probe,
2333 .remove = ipa_wwan_remove,
2334};
2335
2336static int ssr_notifier_cb(struct notifier_block *this,
2337 unsigned long code,
2338 void *data)
2339{
2340 if (ipa_rmnet_ctx.ipa_rmnet_ssr) {
2341 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2342 pr_info("IPA received MPSS BEFORE_SHUTDOWN\n");
2343 atomic_set(&is_ssr, 1);
2344 ipa_q6_pre_shutdown_cleanup();
2345 if (ipa_netdevs[0])
2346 netif_stop_queue(ipa_netdevs[0]);
2347 ipa_qmi_stop_workqueues();
2348 wan_ioctl_stop_qmi_messages();
2349 ipa_stop_polling_stats();
2350 if (atomic_read(&is_initialized))
2351 platform_driver_unregister(&rmnet_ipa_driver);
2352 pr_info("IPA BEFORE_SHUTDOWN handling is complete\n");
2353 return NOTIFY_DONE;
2354 }
2355 if (code == SUBSYS_AFTER_SHUTDOWN) {
2356 pr_info("IPA received MPSS AFTER_SHUTDOWN\n");
2357 if (atomic_read(&is_ssr))
2358 ipa_q6_post_shutdown_cleanup();
2359 pr_info("IPA AFTER_SHUTDOWN handling is complete\n");
2360 return NOTIFY_DONE;
2361 }
2362 if (code == SUBSYS_AFTER_POWERUP) {
2363 pr_info("IPA received MPSS AFTER_POWERUP\n");
2364 if (!atomic_read(&is_initialized)
2365 && atomic_read(&is_ssr))
2366 platform_driver_register(&rmnet_ipa_driver);
2367 pr_info("IPA AFTER_POWERUP handling is complete\n");
2368 return NOTIFY_DONE;
2369 }
2370 if (code == SUBSYS_BEFORE_POWERUP) {
2371 pr_info("IPA received MPSS BEFORE_POWERUP\n");
2372 if (atomic_read(&is_ssr))
2373 /* clean up cached QMI msg/handlers */
2374 ipa_qmi_service_exit();
2375 ipa2_proxy_clk_vote();
2376 pr_info("IPA BEFORE_POWERUP handling is complete\n");
2377 return NOTIFY_DONE;
2378 }
2379 }
2380 return NOTIFY_DONE;
2381}
2382
2383/**
2384 * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa2_send_msg
2385 * @buff: pointer to buffer containing the message
2386 * @len: message len
2387 * @type: message type
2388 *
2389 * This function is invoked when ipa2_send_msg is complete (Provided as a
2390 * free function pointer along with the message).
2391 */
2392static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
2393{
2394 if (!buff) {
2395 IPAWANERR("Null buffer\n");
2396 return;
2397 }
2398
2399 if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
2400 type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
2401 IPAWANERR("Wrong type given. buff %p type %d\n",
2402 buff, type);
2403 }
2404 kfree(buff);
2405}
2406
2407/**
2408 * rmnet_ipa_get_stats_and_update(bool reset) - Gets pipe stats from Modem
2409 *
2410 * This function queries the IPA Modem driver for the pipe stats
2411 * via QMI, and updates the user space IPA entity.
2412 */
2413static void rmnet_ipa_get_stats_and_update(bool reset)
2414{
2415 struct ipa_get_data_stats_req_msg_v01 req;
2416 struct ipa_get_data_stats_resp_msg_v01 *resp;
2417 struct ipa_msg_meta msg_meta;
2418 int rc;
2419
2420 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2421 GFP_KERNEL);
2422 if (!resp) {
2423 IPAWANERR("Can't allocate memory for stats message\n");
2424 return;
2425 }
2426
2427 memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2428 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2429
2430 req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2431 if (reset == true) {
2432 req.reset_stats_valid = true;
2433 req.reset_stats = true;
2434 IPAWANERR("Get the latest pipe-stats and reset it\n");
2435 }
2436
2437 rc = ipa_qmi_get_data_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002438 if (rc) {
2439 IPAWANERR("ipa_qmi_get_data_stats failed: %d\n", rc);
2440 kfree(resp);
2441 return;
2442 }
Amir Levy9659e592016-10-27 18:08:27 +03002443
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002444 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2445 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
2446 msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01);
2447 rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2448 if (rc) {
2449 IPAWANERR("ipa2_send_msg failed: %d\n", rc);
2450 kfree(resp);
2451 return;
Amir Levy9659e592016-10-27 18:08:27 +03002452 }
2453}
2454
2455/**
2456 * tethering_stats_poll_queue() - Stats polling function
2457 * @work - Work entry
2458 *
2459 * This function is scheduled periodically (per the interval) in
2460 * order to poll the IPA Modem driver for the pipe stats.
2461 */
2462static void tethering_stats_poll_queue(struct work_struct *work)
2463{
2464 rmnet_ipa_get_stats_and_update(false);
2465
2466 /* Schedule again only if there's an active polling interval */
2467 if (ipa_rmnet_ctx.polling_interval != 0)
2468 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work,
2469 msecs_to_jiffies(ipa_rmnet_ctx.polling_interval*1000));
2470}
2471
2472/**
2473 * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem
2474 *
2475 * This function retrieves the data usage (used quota) from the IPA Modem driver
2476 * via QMI, and updates IPA user space entity.
2477 */
2478static void rmnet_ipa_get_network_stats_and_update(void)
2479{
2480 struct ipa_get_apn_data_stats_req_msg_v01 req;
2481 struct ipa_get_apn_data_stats_resp_msg_v01 *resp;
2482 struct ipa_msg_meta msg_meta;
2483 int rc;
2484
2485 resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
2486 GFP_KERNEL);
2487 if (!resp) {
2488 IPAWANERR("Can't allocate memory for network stats message\n");
2489 return;
2490 }
2491
2492 memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01));
2493 memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01));
2494
2495 req.mux_id_list_valid = true;
2496 req.mux_id_list_len = 1;
2497 req.mux_id_list[0] = ipa_rmnet_ctx.metered_mux_id;
2498
2499 rc = ipa_qmi_get_network_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002500 if (rc) {
2501 IPAWANERR("ipa_qmi_get_network_stats failed %d\n", rc);
2502 kfree(resp);
2503 return;
2504 }
Amir Levy9659e592016-10-27 18:08:27 +03002505
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002506 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2507 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
2508 msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
2509 rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2510 if (rc) {
2511 IPAWANERR("ipa2_send_msg failed: %d\n", rc);
2512 kfree(resp);
2513 return;
Amir Levy9659e592016-10-27 18:08:27 +03002514 }
2515}
2516
2517/**
2518 * rmnet_ipa_poll_tethering_stats() - Tethering stats polling IOCTL handler
2519 * @data - IOCTL data
2520 *
2521 * This function handles WAN_IOC_POLL_TETHERING_STATS.
2522 * In case polling interval received is 0, polling will stop
2523 * (If there's a polling in progress, it will allow it to finish), and then will
2524 * fetch network stats, and update the IPA user space.
2525 *
2526 * Return codes:
2527 * 0: Success
2528 */
2529int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
2530{
2531 ipa_rmnet_ctx.polling_interval = data->polling_interval_secs;
2532
2533 cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work);
2534
2535 if (ipa_rmnet_ctx.polling_interval == 0) {
2536 ipa_qmi_stop_data_qouta();
2537 rmnet_ipa_get_network_stats_and_update();
2538 rmnet_ipa_get_stats_and_update(true);
2539 return 0;
2540 }
2541
2542 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0);
2543 return 0;
2544}
2545
2546/**
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302547 * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler
Amir Levy9659e592016-10-27 18:08:27 +03002548 * @data - IOCTL data
2549 *
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302550 * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface.
Amir Levy9659e592016-10-27 18:08:27 +03002551 * It translates the given interface name to the Modem MUX ID and
2552 * sends the request of the quota to the IPA Modem driver via QMI.
2553 *
2554 * Return codes:
2555 * 0: Success
2556 * -EFAULT: Invalid interface name provided
2557 * other: See ipa_qmi_set_data_quota
2558 */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302559static int rmnet_ipa_set_data_quota_modem(struct wan_ioctl_set_data_quota *data)
Amir Levy9659e592016-10-27 18:08:27 +03002560{
2561 u32 mux_id;
2562 int index;
2563 struct ipa_set_data_usage_quota_req_msg_v01 req;
2564
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302565 /* stop quota */
2566 if (!data->set_quota)
2567 ipa_qmi_stop_data_qouta();
2568
Amir Levy9659e592016-10-27 18:08:27 +03002569 index = find_vchannel_name_index(data->interface_name);
2570 IPAWANERR("iface name %s, quota %lu\n",
2571 data->interface_name,
2572 (unsigned long int) data->quota_mbytes);
2573
2574 if (index == MAX_NUM_OF_MUX_CHANNEL) {
2575 IPAWANERR("%s is an invalid iface name\n",
2576 data->interface_name);
2577 return -EFAULT;
2578 }
2579
2580 mux_id = mux_channel[index].mux_id;
2581
2582 ipa_rmnet_ctx.metered_mux_id = mux_id;
2583
2584 memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
2585 req.apn_quota_list_valid = true;
2586 req.apn_quota_list_len = 1;
2587 req.apn_quota_list[0].mux_id = mux_id;
2588 req.apn_quota_list[0].num_Mbytes = data->quota_mbytes;
2589
2590 return ipa_qmi_set_data_quota(&req);
2591}
2592
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302593static int rmnet_ipa_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
2594{
2595 struct ipa_set_wifi_quota wifi_quota;
2596 int rc = 0;
2597
2598 memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota));
2599 wifi_quota.set_quota = data->set_quota;
2600 wifi_quota.quota_bytes = data->quota_mbytes;
2601 IPAWANDBG("iface name %s, quota %lu\n",
2602 data->interface_name,
2603 (unsigned long int) data->quota_mbytes);
2604
2605 rc = ipa2_set_wlan_quota(&wifi_quota);
2606 /* check if wlan-fw takes this quota-set */
2607 if (!wifi_quota.set_valid)
2608 rc = -EFAULT;
2609 return rc;
2610}
2611
2612/**
2613 * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
2614 * @data - IOCTL data
2615 *
2616 * This function handles WAN_IOC_SET_DATA_QUOTA.
2617 * It translates the given interface name to the Modem MUX ID and
2618 * sends the request of the quota to the IPA Modem driver via QMI.
2619 *
2620 * Return codes:
2621 * 0: Success
2622 * -EFAULT: Invalid interface name provided
2623 * other: See ipa_qmi_set_data_quota
2624 */
2625int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data)
2626{
2627 enum ipa_upstream_type upstream_type;
2628 int rc = 0;
2629
2630 /* get IPA backhaul type */
2631 upstream_type = find_upstream_type(data->interface_name);
2632
2633 if (upstream_type == IPA_UPSTEAM_MAX) {
2634 IPAWANERR("upstream iface %s not supported\n",
2635 data->interface_name);
2636 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2637 rc = rmnet_ipa_set_data_quota_wifi(data);
2638 if (rc) {
2639 IPAWANERR("set quota on wifi failed\n");
2640 return rc;
2641 }
2642 } else {
2643 rc = rmnet_ipa_set_data_quota_modem(data);
2644 if (rc) {
2645 IPAWANERR("set quota on modem failed\n");
2646 return rc;
2647 }
2648 }
2649 return rc;
2650}
2651
Amir Levy9659e592016-10-27 18:08:27 +03002652 /* rmnet_ipa_set_tether_client_pipe() -
2653 * @data - IOCTL data
2654 *
2655 * This function handles WAN_IOC_SET_DATA_QUOTA.
2656 * It translates the given interface name to the Modem MUX ID and
2657 * sends the request of the quota to the IPA Modem driver via QMI.
2658 *
2659 * Return codes:
2660 * 0: Success
Skylar Chang345c8142016-11-30 14:41:24 -08002661 * -EFAULT: Invalid src/dst pipes provided
Amir Levy9659e592016-10-27 18:08:27 +03002662 * other: See ipa_qmi_set_data_quota
2663 */
2664int rmnet_ipa_set_tether_client_pipe(
2665 struct wan_ioctl_set_tether_client_pipe *data)
2666{
2667 int number, i;
2668
Skylar Chang345c8142016-11-30 14:41:24 -08002669 /* error checking if ul_src_pipe_len valid or not*/
2670 if (data->ul_src_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
2671 data->ul_src_pipe_len < 0) {
2672 IPAWANERR("UL src pipes %d exceeding max %d\n",
2673 data->ul_src_pipe_len,
2674 QMI_IPA_MAX_PIPES_V01);
2675 return -EFAULT;
2676 }
2677 /* error checking if dl_dst_pipe_len valid or not*/
2678 if (data->dl_dst_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
2679 data->dl_dst_pipe_len < 0) {
2680 IPAWANERR("DL dst pipes %d exceeding max %d\n",
2681 data->dl_dst_pipe_len,
2682 QMI_IPA_MAX_PIPES_V01);
2683 return -EFAULT;
2684 }
2685
Amir Levy9659e592016-10-27 18:08:27 +03002686 IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
2687 data->ipa_client,
2688 data->ul_src_pipe_len,
2689 data->dl_dst_pipe_len,
2690 data->reset_client);
2691 number = data->ul_src_pipe_len;
2692 for (i = 0; i < number; i++) {
2693 IPAWANDBG("UL index-%d pipe %d\n", i,
2694 data->ul_src_pipe_list[i]);
2695 if (data->reset_client)
2696 ipa_set_client(data->ul_src_pipe_list[i],
2697 0, false);
2698 else
2699 ipa_set_client(data->ul_src_pipe_list[i],
2700 data->ipa_client, true);
2701 }
2702 number = data->dl_dst_pipe_len;
2703 for (i = 0; i < number; i++) {
2704 IPAWANDBG("DL index-%d pipe %d\n", i,
2705 data->dl_dst_pipe_list[i]);
2706 if (data->reset_client)
2707 ipa_set_client(data->dl_dst_pipe_list[i],
2708 0, false);
2709 else
2710 ipa_set_client(data->dl_dst_pipe_list[i],
2711 data->ipa_client, false);
2712 }
2713 return 0;
2714}
2715
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302716static int rmnet_ipa_query_tethering_stats_wifi(
2717 struct wan_ioctl_query_tether_stats *data, bool reset)
2718{
2719 struct ipa_get_wdi_sap_stats *sap_stats;
2720 int rc;
2721
2722 sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats),
2723 GFP_KERNEL);
2724 if (!sap_stats)
2725 return -ENOMEM;
2726
2727 sap_stats->reset_stats = reset;
2728 IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats);
2729
2730 rc = ipa2_get_wlan_stats(sap_stats);
2731 if (rc) {
2732 kfree(sap_stats);
2733 return rc;
2734 } else if (reset) {
2735 kfree(sap_stats);
2736 return 0;
2737 }
2738
2739 if (sap_stats->stats_valid) {
2740 data->ipv4_tx_packets = sap_stats->ipv4_tx_packets;
2741 data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes;
2742 data->ipv4_rx_packets = sap_stats->ipv4_rx_packets;
2743 data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes;
2744 data->ipv6_tx_packets = sap_stats->ipv6_tx_packets;
2745 data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes;
2746 data->ipv6_rx_packets = sap_stats->ipv6_rx_packets;
2747 data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes;
2748 }
2749
2750 IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2751 (unsigned long int) data->ipv4_rx_packets,
2752 (unsigned long int) data->ipv6_rx_packets,
2753 (unsigned long int) data->ipv4_rx_bytes,
2754 (unsigned long int) data->ipv6_rx_bytes);
2755 IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2756 (unsigned long int) data->ipv4_tx_packets,
2757 (unsigned long int) data->ipv6_tx_packets,
2758 (unsigned long int) data->ipv4_tx_bytes,
2759 (unsigned long int) data->ipv6_tx_bytes);
2760
2761 kfree(sap_stats);
2762 return rc;
2763}
2764
2765int rmnet_ipa_query_tethering_stats_modem(
2766 struct wan_ioctl_query_tether_stats *data,
2767 bool reset
2768)
Amir Levy9659e592016-10-27 18:08:27 +03002769{
2770 struct ipa_get_data_stats_req_msg_v01 *req;
2771 struct ipa_get_data_stats_resp_msg_v01 *resp;
2772 int pipe_len, rc;
2773
2774 req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
2775 GFP_KERNEL);
2776 if (!req) {
2777 IPAWANERR("failed to allocate memory for stats message\n");
2778 return -ENOMEM;
2779 }
2780 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2781 GFP_KERNEL);
2782 if (!resp) {
2783 IPAWANERR("failed to allocate memory for stats message\n");
2784 kfree(req);
2785 return -ENOMEM;
2786 }
2787 memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2788 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2789
2790 req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2791 if (reset) {
2792 req->reset_stats_valid = true;
2793 req->reset_stats = true;
2794 IPAWANERR("reset the pipe stats\n");
2795 } else {
2796 /* print tethered-client enum */
2797 IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
2798 }
2799
2800 rc = ipa_qmi_get_data_stats(req, resp);
2801 if (rc) {
2802 IPAWANERR("can't get ipa_qmi_get_data_stats\n");
2803 kfree(req);
2804 kfree(resp);
2805 return rc;
2806 } else if (reset) {
2807 kfree(req);
2808 kfree(resp);
2809 return 0;
2810 }
2811
2812 if (resp->dl_dst_pipe_stats_list_valid) {
2813 for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len;
2814 pipe_len++) {
2815 IPAWANDBG("Check entry(%d) dl_dst_pipe(%d)\n",
2816 pipe_len, resp->dl_dst_pipe_stats_list
2817 [pipe_len].pipe_index);
2818 IPAWANDBG("dl_p_v4(%lu)v6(%lu) dl_b_v4(%lu)v6(%lu)\n",
2819 (unsigned long int) resp->
2820 dl_dst_pipe_stats_list[pipe_len].
2821 num_ipv4_packets,
2822 (unsigned long int) resp->
2823 dl_dst_pipe_stats_list[pipe_len].
2824 num_ipv6_packets,
2825 (unsigned long int) resp->
2826 dl_dst_pipe_stats_list[pipe_len].
2827 num_ipv4_bytes,
2828 (unsigned long int) resp->
2829 dl_dst_pipe_stats_list[pipe_len].
2830 num_ipv6_bytes);
2831 if (ipa_get_client_uplink(resp->
2832 dl_dst_pipe_stats_list[pipe_len].
2833 pipe_index) == false) {
2834 if (data->ipa_client == ipa_get_client(resp->
2835 dl_dst_pipe_stats_list[pipe_len].
2836 pipe_index)) {
2837 /* update the DL stats */
2838 data->ipv4_rx_packets += resp->
2839 dl_dst_pipe_stats_list[pipe_len].
2840 num_ipv4_packets;
2841 data->ipv6_rx_packets += resp->
2842 dl_dst_pipe_stats_list[pipe_len].
2843 num_ipv6_packets;
2844 data->ipv4_rx_bytes += resp->
2845 dl_dst_pipe_stats_list[pipe_len].
2846 num_ipv4_bytes;
2847 data->ipv6_rx_bytes += resp->
2848 dl_dst_pipe_stats_list[pipe_len].
2849 num_ipv6_bytes;
2850 }
2851 }
2852 }
2853 }
2854 IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2855 (unsigned long int) data->ipv4_rx_packets,
2856 (unsigned long int) data->ipv6_rx_packets,
2857 (unsigned long int) data->ipv4_rx_bytes,
2858 (unsigned long int) data->ipv6_rx_bytes);
2859
2860 if (resp->ul_src_pipe_stats_list_valid) {
2861 for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len;
2862 pipe_len++) {
2863 IPAWANDBG("Check entry(%d) ul_dst_pipe(%d)\n",
2864 pipe_len,
2865 resp->ul_src_pipe_stats_list[pipe_len].
2866 pipe_index);
2867 IPAWANDBG("ul_p_v4(%lu)v6(%lu)ul_b_v4(%lu)v6(%lu)\n",
2868 (unsigned long int) resp->
2869 ul_src_pipe_stats_list[pipe_len].
2870 num_ipv4_packets,
2871 (unsigned long int) resp->
2872 ul_src_pipe_stats_list[pipe_len].
2873 num_ipv6_packets,
2874 (unsigned long int) resp->
2875 ul_src_pipe_stats_list[pipe_len].
2876 num_ipv4_bytes,
2877 (unsigned long int) resp->
2878 ul_src_pipe_stats_list[pipe_len].
2879 num_ipv6_bytes);
2880 if (ipa_get_client_uplink(resp->
2881 ul_src_pipe_stats_list[pipe_len].
2882 pipe_index) == true) {
2883 if (data->ipa_client == ipa_get_client(resp->
2884 ul_src_pipe_stats_list[pipe_len].
2885 pipe_index)) {
2886 /* update the DL stats */
2887 data->ipv4_tx_packets += resp->
2888 ul_src_pipe_stats_list[pipe_len].
2889 num_ipv4_packets;
2890 data->ipv6_tx_packets += resp->
2891 ul_src_pipe_stats_list[pipe_len].
2892 num_ipv6_packets;
2893 data->ipv4_tx_bytes += resp->
2894 ul_src_pipe_stats_list[pipe_len].
2895 num_ipv4_bytes;
2896 data->ipv6_tx_bytes += resp->
2897 ul_src_pipe_stats_list[pipe_len].
2898 num_ipv6_bytes;
2899 }
2900 }
2901 }
2902 }
2903 IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2904 (unsigned long int) data->ipv4_tx_packets,
2905 (unsigned long int) data->ipv6_tx_packets,
2906 (unsigned long int) data->ipv4_tx_bytes,
2907 (unsigned long int) data->ipv6_tx_bytes);
2908 kfree(req);
2909 kfree(resp);
2910 return 0;
2911}
2912
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302913int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
2914 bool reset)
2915{
2916 enum ipa_upstream_type upstream_type;
2917 int rc = 0;
2918
2919 /* get IPA backhaul type */
2920 upstream_type = find_upstream_type(data->upstreamIface);
2921
2922 if (upstream_type == IPA_UPSTEAM_MAX) {
2923 IPAWANERR("upstreamIface %s not supported\n",
2924 data->upstreamIface);
2925 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2926 IPAWANDBG_LOW(" query wifi-backhaul stats\n");
2927 rc = rmnet_ipa_query_tethering_stats_wifi(
2928 data, false);
2929 if (rc) {
2930 IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
2931 return rc;
2932 }
2933 } else {
2934 IPAWANDBG_LOW(" query modem-backhaul stats\n");
2935 rc = rmnet_ipa_query_tethering_stats_modem(
2936 data, false);
2937 if (rc) {
2938 IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
2939 return rc;
2940 }
2941 }
2942 return rc;
2943}
2944
2945int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
2946{
2947 enum ipa_upstream_type upstream_type;
2948 int rc = 0;
2949
2950 /* get IPA backhaul type */
2951 upstream_type = find_upstream_type(data->upstreamIface);
2952
2953 if (upstream_type == IPA_UPSTEAM_MAX) {
2954 IPAWANERR("upstream iface %s not supported\n",
2955 data->upstreamIface);
2956 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2957 IPAWANDBG(" reset wifi-backhaul stats\n");
2958 rc = rmnet_ipa_query_tethering_stats_wifi(
2959 NULL, true);
2960 if (rc) {
2961 IPAWANERR("reset WLAN stats failed\n");
2962 return rc;
2963 }
2964 } else {
2965 IPAWANDBG(" reset modem-backhaul stats\n");
2966 rc = rmnet_ipa_query_tethering_stats_modem(
2967 NULL, true);
2968 if (rc) {
2969 IPAWANERR("reset MODEM stats failed\n");
2970 return rc;
2971 }
2972 }
2973 return rc;
2974}
2975
2976
Amir Levy9659e592016-10-27 18:08:27 +03002977/**
2978 * ipa_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
2979 * @mux_id - The MUX ID on which the quota has been reached
2980 *
2981 * This function broadcasts a Netlink event using the kobject of the
2982 * rmnet_ipa interface in order to alert the user space that the quota
2983 * on the specific interface which matches the mux_id has been reached.
2984 *
2985 */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302986void ipa_broadcast_quota_reach_ind(u32 mux_id,
2987 enum ipa_upstream_type upstream_type)
Amir Levy9659e592016-10-27 18:08:27 +03002988{
2989 char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
2990 char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
2991 char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
2992 char *envp[IPA_UEVENT_NUM_EVNP] = {
2993 alert_msg, iface_name_l, iface_name_m, NULL };
2994 int res;
2995 int index;
2996
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302997 /* check upstream_type*/
2998 if (upstream_type == IPA_UPSTEAM_MAX) {
2999 IPAWANERR("upstreamIface type %d not supported\n",
3000 upstream_type);
Amir Levy9659e592016-10-27 18:08:27 +03003001 return;
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303002 } else if (upstream_type == IPA_UPSTEAM_MODEM) {
3003 index = find_mux_channel_index(mux_id);
3004 if (index == MAX_NUM_OF_MUX_CHANNEL) {
3005 IPAWANERR("%u is an mux ID\n", mux_id);
3006 return;
3007 }
Amir Levy9659e592016-10-27 18:08:27 +03003008 }
3009
3010 res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
3011 "ALERT_NAME=%s", "quotaReachedAlert");
3012 if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
3013 IPAWANERR("message too long (%d)", res);
3014 return;
3015 }
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303016
Amir Levy9659e592016-10-27 18:08:27 +03003017 /* posting msg for L-release for CNE */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303018 if (upstream_type == IPA_UPSTEAM_MODEM) {
Amir Levy9659e592016-10-27 18:08:27 +03003019 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303020 "UPSTREAM=%s", mux_channel[index].vchannel_name);
3021 } else {
3022 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
3023 "UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
3024 }
Amir Levy9659e592016-10-27 18:08:27 +03003025 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
3026 IPAWANERR("message too long (%d)", res);
3027 return;
3028 }
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303029
Amir Levy9659e592016-10-27 18:08:27 +03003030 /* posting msg for M-release for CNE */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303031 if (upstream_type == IPA_UPSTEAM_MODEM) {
Amir Levy9659e592016-10-27 18:08:27 +03003032 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303033 "INTERFACE=%s", mux_channel[index].vchannel_name);
3034 } else {
3035 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
3036 "INTERFACE=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
3037 }
Amir Levy9659e592016-10-27 18:08:27 +03003038 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
3039 IPAWANERR("message too long (%d)", res);
3040 return;
3041 }
3042
3043 IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
3044 alert_msg, iface_name_l, iface_name_m);
3045 kobject_uevent_env(&(ipa_netdevs[0]->dev.kobj), KOBJ_CHANGE, envp);
3046}
3047
3048/**
3049 * ipa_q6_handshake_complete() - Perform operations once Q6 is up
3050 * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR.
3051 *
3052 * This function is invoked once the handshake between the IPA AP driver
3053 * and IPA Q6 driver is complete. At this point, it is possible to perform
3054 * operations which can't be performed until IPA Q6 driver is up.
3055 *
3056 */
3057void ipa_q6_handshake_complete(bool ssr_bootup)
3058{
3059 /* It is required to recover the network stats after SSR recovery */
3060 if (ssr_bootup) {
3061 /*
3062 * In case the uC is required to be loaded by the Modem,
3063 * the proxy vote will be removed only when uC loading is
3064 * complete and indication is received by the AP. After SSR,
3065 * uC is already loaded. Therefore, proxy vote can be removed
3066 * once Modem init is complete.
3067 */
3068 ipa2_proxy_clk_unvote();
3069
3070 /*
3071 * It is required to recover the network stats after
3072 * SSR recovery
3073 */
3074 rmnet_ipa_get_network_stats_and_update();
3075
3076 /* Enable holb monitoring on Q6 pipes. */
3077 ipa_q6_monitor_holb_mitigation(true);
3078 }
3079}
3080
3081static int __init ipa_wwan_init(void)
3082{
3083 atomic_set(&is_initialized, 0);
3084 atomic_set(&is_ssr, 0);
3085
3086 mutex_init(&ipa_to_apps_pipe_handle_guard);
3087 ipa_to_apps_hdl = -1;
3088
3089 ipa_qmi_init();
3090
3091 /* Register for Modem SSR */
3092 subsys_notify_handle = subsys_notif_register_notifier(SUBSYS_MODEM,
3093 &ssr_notifier);
3094 if (!IS_ERR(subsys_notify_handle))
3095 return platform_driver_register(&rmnet_ipa_driver);
3096 else
3097 return (int)PTR_ERR(subsys_notify_handle);
3098}
3099
3100static void __exit ipa_wwan_cleanup(void)
3101{
3102 int ret;
3103
3104 ipa_qmi_cleanup();
3105 mutex_destroy(&ipa_to_apps_pipe_handle_guard);
3106 ret = subsys_notif_unregister_notifier(subsys_notify_handle,
3107 &ssr_notifier);
3108 if (ret)
3109 IPAWANERR(
3110 "Error subsys_notif_unregister_notifier system %s, ret=%d\n",
3111 SUBSYS_MODEM, ret);
3112 platform_driver_unregister(&rmnet_ipa_driver);
3113}
3114
3115static void ipa_wwan_msg_free_cb(void *buff, u32 len, u32 type)
3116{
3117 if (!buff)
3118 IPAWANERR("Null buffer.\n");
3119 kfree(buff);
3120}
3121
3122static void ipa_rmnet_rx_cb(void *priv)
3123{
3124 struct net_device *dev = priv;
3125 struct wwan_private *wwan_ptr;
3126
3127 IPAWANDBG("\n");
3128
3129 if (dev != ipa_netdevs[0]) {
3130 IPAWANERR("Not matching with netdev\n");
3131 return;
3132 }
3133
3134 wwan_ptr = netdev_priv(dev);
3135 napi_schedule(&(wwan_ptr->napi));
3136}
3137
3138static int ipa_rmnet_poll(struct napi_struct *napi, int budget)
3139{
3140 int rcvd_pkts = 0;
3141
3142 rcvd_pkts = ipa_rx_poll(ipa_to_apps_hdl, NAPI_WEIGHT);
3143 IPAWANDBG("rcvd packets: %d\n", rcvd_pkts);
3144 return rcvd_pkts;
3145}
3146
3147late_initcall(ipa_wwan_init);
3148module_exit(ipa_wwan_cleanup);
3149MODULE_DESCRIPTION("WWAN Network Interface");
3150MODULE_LICENSE("GPL v2");