blob: 11eeb2f452fa76d585bed729ff918c22ac14a7a9 [file] [log] [blame]
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * WWAN Transport Network Driver.
15 */
16
17#include <linux/completion.h>
18#include <linux/errno.h>
19#include <linux/if_arp.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/of_device.h>
26#include <linux/string.h>
27#include <linux/skbuff.h>
28#include <linux/version.h>
29#include <linux/workqueue.h>
30#include <net/pkt_sched.h>
31#include <soc/qcom/subsystem_restart.h>
32#include <soc/qcom/subsystem_notif.h>
33#include "ipa_qmi_service.h"
34#include <linux/rmnet_ipa_fd_ioctl.h>
35#include <linux/ipa.h>
36#include <uapi/linux/net_map.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020037#include <uapi/linux/msm_rmnet.h>
38#include <net/rmnet_config.h>
Amir Levy9659e592016-10-27 18:08:27 +030039
40#include "ipa_trace.h"
41
42#define WWAN_METADATA_SHFT 24
43#define WWAN_METADATA_MASK 0xFF000000
44#define WWAN_DATA_LEN 2000
45#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
46#define HEADROOM_FOR_QMAP 8 /* for mux header */
47#define TAILROOM 0 /* for padding by mux layer */
48#define MAX_NUM_OF_MUX_CHANNEL 10 /* max mux channels */
49#define UL_FILTER_RULE_HANDLE_START 69
50#define DEFAULT_OUTSTANDING_HIGH_CTL 96
51#define DEFAULT_OUTSTANDING_HIGH 64
52#define DEFAULT_OUTSTANDING_LOW 32
53
54#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +053055#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0"
56
Amir Levy9659e592016-10-27 18:08:27 +030057#define IPA_WWAN_DEVICE_COUNT (1)
58
59#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
60
61#define INVALID_MUX_ID 0xFF
62#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
63#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
64#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
65
66#define NAPI_WEIGHT 60
Sunil Paidimarri226cf032016-10-14 13:33:08 -070067#define IPA_WWAN_CONS_DESC_FIFO_SZ 1024
Amir Levy9659e592016-10-27 18:08:27 +030068
69static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
70static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
71static u32 qmap_hdr_hdl, dflt_v4_wan_rt_hdl, dflt_v6_wan_rt_hdl;
72static struct rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
73static int num_q6_rule, old_num_q6_rule;
74static int rmnet_index;
75static bool egress_set, a7_ul_flt_set;
76static struct workqueue_struct *ipa_rm_q6_workqueue; /* IPA_RM workqueue*/
77static atomic_t is_initialized;
78static atomic_t is_ssr;
79static void *subsys_notify_handle;
80
81u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */
82static struct mutex ipa_to_apps_pipe_handle_guard;
Skylar Chang8438ba52017-03-15 21:27:35 -070083static struct mutex add_mux_channel_lock;
Amir Levy9659e592016-10-27 18:08:27 +030084static int wwan_add_ul_flt_rule_to_ipa(void);
85static int wwan_del_ul_flt_rule_to_ipa(void);
86static void ipa_wwan_msg_free_cb(void*, u32, u32);
87static void ipa_rmnet_rx_cb(void *priv);
88static int ipa_rmnet_poll(struct napi_struct *napi, int budget);
89
90static void wake_tx_queue(struct work_struct *work);
91static DECLARE_WORK(ipa_tx_wakequeue_work, wake_tx_queue);
92
93static void tethering_stats_poll_queue(struct work_struct *work);
94static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work,
95 tethering_stats_poll_queue);
96
97enum wwan_device_status {
98 WWAN_DEVICE_INACTIVE = 0,
99 WWAN_DEVICE_ACTIVE = 1
100};
101
102struct ipa_rmnet_plat_drv_res {
103 bool ipa_rmnet_ssr;
104 bool ipa_loaduC;
105 bool ipa_advertise_sg_support;
106 bool ipa_napi_enable;
Sunil Paidimarri226cf032016-10-14 13:33:08 -0700107 u32 wan_rx_desc_size;
Amir Levy9659e592016-10-27 18:08:27 +0300108};
109
110static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
111/**
112 * struct wwan_private - WWAN private data
113 * @net: network interface struct implemented by this driver
114 * @stats: iface statistics
115 * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
116 * @outstanding_high: number of outstanding packets allowed
117 * @outstanding_low: number of outstanding packets which shall cause
118 * @ch_id: channel id
119 * @lock: spinlock for mutual exclusion
120 * @device_status: holds device status
121 *
122 * WWAN private - holds all relevant info about WWAN driver
123 */
124struct wwan_private {
125 struct net_device *net;
126 struct net_device_stats stats;
127 atomic_t outstanding_pkts;
128 int outstanding_high_ctl;
129 int outstanding_high;
130 int outstanding_low;
131 uint32_t ch_id;
132 spinlock_t lock;
133 struct completion resource_granted_completion;
134 enum wwan_device_status device_status;
135 struct napi_struct napi;
136};
137
138/**
139* ipa_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
140*
141* Return codes:
142* 0: success
143* -ENOMEM: failed to allocate memory
144* -EPERM: failed to add the tables
145*/
146static int ipa_setup_a7_qmap_hdr(void)
147{
148 struct ipa_ioc_add_hdr *hdr;
149 struct ipa_hdr_add *hdr_entry;
150 u32 pyld_sz;
151 int ret;
152
153 /* install the basic exception header */
154 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
155 sizeof(struct ipa_hdr_add);
156 hdr = kzalloc(pyld_sz, GFP_KERNEL);
157 if (!hdr) {
158 IPAWANERR("fail to alloc exception hdr\n");
159 return -ENOMEM;
160 }
161 hdr->num_hdrs = 1;
162 hdr->commit = 1;
163 hdr_entry = &hdr->hdr[0];
164
165 strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
166 IPA_RESOURCE_NAME_MAX);
167 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
168
169 if (ipa2_add_hdr(hdr)) {
170 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
171 ret = -EPERM;
172 goto bail;
173 }
174
175 if (hdr_entry->status) {
176 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
177 ret = -EPERM;
178 goto bail;
179 }
180 qmap_hdr_hdl = hdr_entry->hdr_hdl;
181
182 ret = 0;
183bail:
184 kfree(hdr);
185 return ret;
186}
187
188static void ipa_del_a7_qmap_hdr(void)
189{
190 struct ipa_ioc_del_hdr *del_hdr;
191 struct ipa_hdr_del *hdl_entry;
192 u32 pyld_sz;
193 int ret;
194
195 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
196 sizeof(struct ipa_hdr_del);
197 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
198 if (!del_hdr) {
199 IPAWANERR("fail to alloc exception hdr_del\n");
200 return;
201 }
202
203 del_hdr->commit = 1;
204 del_hdr->num_hdls = 1;
205 hdl_entry = &del_hdr->hdl[0];
206 hdl_entry->hdl = qmap_hdr_hdl;
207
208 ret = ipa2_del_hdr(del_hdr);
209 if (ret || hdl_entry->status)
210 IPAWANERR("ipa2_del_hdr failed\n");
211 else
212 IPAWANDBG("hdrs deletion done\n");
213
214 qmap_hdr_hdl = 0;
215 kfree(del_hdr);
216}
217
218static void ipa_del_qmap_hdr(uint32_t hdr_hdl)
219{
220 struct ipa_ioc_del_hdr *del_hdr;
221 struct ipa_hdr_del *hdl_entry;
222 u32 pyld_sz;
223 int ret;
224
225 if (hdr_hdl == 0) {
226 IPAWANERR("Invalid hdr_hdl provided\n");
227 return;
228 }
229
230 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
231 sizeof(struct ipa_hdr_del);
232 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
233 if (!del_hdr) {
234 IPAWANERR("fail to alloc exception hdr_del\n");
235 return;
236 }
237
238 del_hdr->commit = 1;
239 del_hdr->num_hdls = 1;
240 hdl_entry = &del_hdr->hdl[0];
241 hdl_entry->hdl = hdr_hdl;
242
243 ret = ipa2_del_hdr(del_hdr);
244 if (ret || hdl_entry->status)
245 IPAWANERR("ipa2_del_hdr failed\n");
246 else
247 IPAWANDBG("header deletion done\n");
248
249 qmap_hdr_hdl = 0;
250 kfree(del_hdr);
251}
252
253static void ipa_del_mux_qmap_hdrs(void)
254{
255 int index;
256
257 for (index = 0; index < rmnet_index; index++) {
258 ipa_del_qmap_hdr(mux_channel[index].hdr_hdl);
259 mux_channel[index].hdr_hdl = 0;
260 }
261}
262
263static int ipa_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
264{
265 struct ipa_ioc_add_hdr *hdr;
266 struct ipa_hdr_add *hdr_entry;
267 char hdr_name[IPA_RESOURCE_NAME_MAX];
268 u32 pyld_sz;
269 int ret;
270
271 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
272 sizeof(struct ipa_hdr_add);
273 hdr = kzalloc(pyld_sz, GFP_KERNEL);
274 if (!hdr) {
275 IPAWANERR("fail to alloc exception hdr\n");
276 return -ENOMEM;
277 }
278 hdr->num_hdrs = 1;
279 hdr->commit = 1;
280 hdr_entry = &hdr->hdr[0];
281
282 snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
283 A2_MUX_HDR_NAME_V4_PREF,
284 mux_id);
285 strlcpy(hdr_entry->name, hdr_name,
286 IPA_RESOURCE_NAME_MAX);
287
288 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
289 hdr_entry->hdr[1] = (uint8_t) mux_id;
290 IPAWANDBG("header (%s) with mux-id: (%d)\n",
291 hdr_name,
292 hdr_entry->hdr[1]);
293 if (ipa2_add_hdr(hdr)) {
294 IPAWANERR("fail to add IPA_QMAP hdr\n");
295 ret = -EPERM;
296 goto bail;
297 }
298
299 if (hdr_entry->status) {
300 IPAWANERR("fail to add IPA_QMAP hdr\n");
301 ret = -EPERM;
302 goto bail;
303 }
304
305 ret = 0;
306 *hdr_hdl = hdr_entry->hdr_hdl;
307bail:
308 kfree(hdr);
309 return ret;
310}
311
312/**
313* ipa_setup_dflt_wan_rt_tables() - Setup default wan routing tables
314*
315* Return codes:
316* 0: success
317* -ENOMEM: failed to allocate memory
318* -EPERM: failed to add the tables
319*/
320static int ipa_setup_dflt_wan_rt_tables(void)
321{
322 struct ipa_ioc_add_rt_rule *rt_rule;
323 struct ipa_rt_rule_add *rt_rule_entry;
324
325 rt_rule =
326 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
327 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
328 if (!rt_rule) {
329 IPAWANERR("fail to alloc mem\n");
330 return -ENOMEM;
331 }
332 /* setup a default v4 route to point to Apps */
333 rt_rule->num_rules = 1;
334 rt_rule->commit = 1;
335 rt_rule->ip = IPA_IP_v4;
336 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
337 IPA_RESOURCE_NAME_MAX);
338
339 rt_rule_entry = &rt_rule->rules[0];
340 rt_rule_entry->at_rear = 1;
341 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
342 rt_rule_entry->rule.hdr_hdl = qmap_hdr_hdl;
343
344 if (ipa2_add_rt_rule(rt_rule)) {
345 IPAWANERR("fail to add dflt_wan v4 rule\n");
346 kfree(rt_rule);
347 return -EPERM;
348 }
349
350 IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
351 dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
352
353 /* setup a default v6 route to point to A5 */
354 rt_rule->ip = IPA_IP_v6;
355 if (ipa2_add_rt_rule(rt_rule)) {
356 IPAWANERR("fail to add dflt_wan v6 rule\n");
357 kfree(rt_rule);
358 return -EPERM;
359 }
360 IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
361 dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
362
363 kfree(rt_rule);
364 return 0;
365}
366
367static void ipa_del_dflt_wan_rt_tables(void)
368{
369 struct ipa_ioc_del_rt_rule *rt_rule;
370 struct ipa_rt_rule_del *rt_rule_entry;
371 int len;
372
373 len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
374 sizeof(struct ipa_rt_rule_del);
375 rt_rule = kzalloc(len, GFP_KERNEL);
376 if (!rt_rule) {
377 IPAWANERR("unable to allocate memory for del route rule\n");
378 return;
379 }
380
381 memset(rt_rule, 0, len);
382 rt_rule->commit = 1;
383 rt_rule->num_hdls = 1;
384 rt_rule->ip = IPA_IP_v4;
385
386 rt_rule_entry = &rt_rule->hdl[0];
387 rt_rule_entry->status = -1;
388 rt_rule_entry->hdl = dflt_v4_wan_rt_hdl;
389
390 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
391 rt_rule_entry->hdl, IPA_IP_v4);
392 if (ipa2_del_rt_rule(rt_rule) ||
393 (rt_rule_entry->status)) {
394 IPAWANERR("Routing rule deletion failed!\n");
395 }
396
397 rt_rule->ip = IPA_IP_v6;
398 rt_rule_entry->hdl = dflt_v6_wan_rt_hdl;
399 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
400 rt_rule_entry->hdl, IPA_IP_v6);
401 if (ipa2_del_rt_rule(rt_rule) ||
402 (rt_rule_entry->status)) {
403 IPAWANERR("Routing rule deletion failed!\n");
404 }
405
406 kfree(rt_rule);
407}
408
409int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
410 *rule_req, uint32_t *rule_hdl)
411{
412 int i, j;
413
414 if (rule_req->filter_spec_list_valid == true) {
415 num_q6_rule = rule_req->filter_spec_list_len;
416 IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule);
417 } else {
418 num_q6_rule = 0;
419 IPAWANERR("got no UL rules from modem\n");
420 return -EINVAL;
421 }
422
423 /* copy UL filter rules from Modem*/
424 for (i = 0; i < num_q6_rule; i++) {
425 /* check if rules overside the cache*/
426 if (i == MAX_NUM_Q6_RULE) {
427 IPAWANERR("Reaching (%d) max cache ",
428 MAX_NUM_Q6_RULE);
429 IPAWANERR(" however total (%d)\n",
430 num_q6_rule);
431 goto failure;
432 }
433 /* construct UL_filter_rule handler QMI use-cas */
434 ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl =
435 UL_FILTER_RULE_HANDLE_START + i;
436 rule_hdl[i] = ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
437 ipa_qmi_ctx->q6_ul_filter_rule[i].ip =
438 rule_req->filter_spec_list[i].ip_type;
439 ipa_qmi_ctx->q6_ul_filter_rule[i].action =
440 rule_req->filter_spec_list[i].filter_action;
441 if (rule_req->filter_spec_list[i].is_routing_table_index_valid
442 == true)
443 ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
444 rule_req->filter_spec_list[i].route_table_index;
445 if (rule_req->filter_spec_list[i].is_mux_id_valid == true)
446 ipa_qmi_ctx->q6_ul_filter_rule[i].mux_id =
447 rule_req->filter_spec_list[i].mux_id;
448 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap =
449 rule_req->filter_spec_list[i].filter_rule.
450 rule_eq_bitmap;
451 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present =
452 rule_req->filter_spec_list[i].filter_rule.
453 tos_eq_present;
454 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq =
455 rule_req->filter_spec_list[i].filter_rule.tos_eq;
456 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
457 protocol_eq_present = rule_req->filter_spec_list[i].
458 filter_rule.protocol_eq_present;
459 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq =
460 rule_req->filter_spec_list[i].filter_rule.
461 protocol_eq;
462
463 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
464 num_ihl_offset_range_16 = rule_req->filter_spec_list[i].
465 filter_rule.num_ihl_offset_range_16;
466 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
467 num_ihl_offset_range_16; j++) {
468 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
469 ihl_offset_range_16[j].offset = rule_req->
470 filter_spec_list[i].filter_rule.
471 ihl_offset_range_16[j].offset;
472 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
473 ihl_offset_range_16[j].range_low = rule_req->
474 filter_spec_list[i].filter_rule.
475 ihl_offset_range_16[j].range_low;
476 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
477 ihl_offset_range_16[j].range_high = rule_req->
478 filter_spec_list[i].filter_rule.
479 ihl_offset_range_16[j].range_high;
480 }
481 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 =
482 rule_req->filter_spec_list[i].filter_rule.
483 num_offset_meq_32;
484 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
485 num_offset_meq_32; j++) {
486 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
487 offset_meq_32[j].offset = rule_req->filter_spec_list[i].
488 filter_rule.offset_meq_32[j].offset;
489 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
490 offset_meq_32[j].mask = rule_req->filter_spec_list[i].
491 filter_rule.offset_meq_32[j].mask;
492 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
493 offset_meq_32[j].value = rule_req->filter_spec_list[i].
494 filter_rule.offset_meq_32[j].value;
495 }
496
497 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present =
498 rule_req->filter_spec_list[i].filter_rule.tc_eq_present;
499 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq =
500 rule_req->filter_spec_list[i].filter_rule.tc_eq;
501 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present =
502 rule_req->filter_spec_list[i].filter_rule.
503 flow_eq_present;
504 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq =
505 rule_req->filter_spec_list[i].filter_rule.flow_eq;
506 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
507 ihl_offset_eq_16_present = rule_req->filter_spec_list[i].
508 filter_rule.ihl_offset_eq_16_present;
509 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
510 ihl_offset_eq_16.offset = rule_req->filter_spec_list[i].
511 filter_rule.ihl_offset_eq_16.offset;
512 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
513 ihl_offset_eq_16.value = rule_req->filter_spec_list[i].
514 filter_rule.ihl_offset_eq_16.value;
515
516 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
517 ihl_offset_eq_32_present = rule_req->filter_spec_list[i].
518 filter_rule.ihl_offset_eq_32_present;
519 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
520 ihl_offset_eq_32.offset = rule_req->filter_spec_list[i].
521 filter_rule.ihl_offset_eq_32.offset;
522 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
523 ihl_offset_eq_32.value = rule_req->filter_spec_list[i].
524 filter_rule.ihl_offset_eq_32.value;
525
526 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
527 num_ihl_offset_meq_32 = rule_req->filter_spec_list[i].
528 filter_rule.num_ihl_offset_meq_32;
529 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].
530 eq_attrib.num_ihl_offset_meq_32; j++) {
531 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
532 ihl_offset_meq_32[j].offset = rule_req->
533 filter_spec_list[i].filter_rule.
534 ihl_offset_meq_32[j].offset;
535 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
536 ihl_offset_meq_32[j].mask = rule_req->
537 filter_spec_list[i].filter_rule.
538 ihl_offset_meq_32[j].mask;
539 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
540 ihl_offset_meq_32[j].value = rule_req->
541 filter_spec_list[i].filter_rule.
542 ihl_offset_meq_32[j].value;
543 }
544 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 =
545 rule_req->filter_spec_list[i].filter_rule.
546 num_offset_meq_128;
547 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
548 num_offset_meq_128; j++) {
549 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
550 offset_meq_128[j].offset = rule_req->
551 filter_spec_list[i].filter_rule.
552 offset_meq_128[j].offset;
553 memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
554 offset_meq_128[j].mask,
555 rule_req->filter_spec_list[i].
556 filter_rule.offset_meq_128[j].mask, 16);
557 memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
558 offset_meq_128[j].value, rule_req->
559 filter_spec_list[i].filter_rule.
560 offset_meq_128[j].value, 16);
561 }
562
563 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
564 metadata_meq32_present = rule_req->filter_spec_list[i].
565 filter_rule.metadata_meq32_present;
566 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
567 metadata_meq32.offset = rule_req->filter_spec_list[i].
568 filter_rule.metadata_meq32.offset;
569 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
570 metadata_meq32.mask = rule_req->filter_spec_list[i].
571 filter_rule.metadata_meq32.mask;
572 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.
573 value = rule_req->filter_spec_list[i].filter_rule.
574 metadata_meq32.value;
575 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
576 ipv4_frag_eq_present = rule_req->filter_spec_list[i].
577 filter_rule.ipv4_frag_eq_present;
578 }
579
580 if (rule_req->xlat_filter_indices_list_valid) {
581 if (rule_req->xlat_filter_indices_list_len > num_q6_rule) {
582 IPAWANERR("Number of xlat indices is not valid: %d\n",
583 rule_req->xlat_filter_indices_list_len);
584 goto failure;
585 }
586 IPAWANDBG("Receive %d XLAT indices: ",
587 rule_req->xlat_filter_indices_list_len);
588 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++)
589 IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]);
590 IPAWANDBG("\n");
591
592 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
593 if (rule_req->xlat_filter_indices_list[i]
594 >= num_q6_rule) {
595 IPAWANERR("Xlat rule idx is wrong: %d\n",
596 rule_req->xlat_filter_indices_list[i]);
597 goto failure;
598 } else {
599 ipa_qmi_ctx->q6_ul_filter_rule
600 [rule_req->xlat_filter_indices_list[i]]
601 .is_xlat_rule = 1;
602 IPAWANDBG("Rule %d is xlat rule\n",
603 rule_req->xlat_filter_indices_list[i]);
604 }
605 }
606 }
607 goto success;
608
609failure:
610 num_q6_rule = 0;
611 memset(ipa_qmi_ctx->q6_ul_filter_rule, 0,
612 sizeof(ipa_qmi_ctx->q6_ul_filter_rule));
613 return -EINVAL;
614
615success:
616 return 0;
617}
618
619static int wwan_add_ul_flt_rule_to_ipa(void)
620{
621 u32 pyld_sz;
622 int i, retval = 0;
623 int num_v4_rule = 0, num_v6_rule = 0;
624 struct ipa_ioc_add_flt_rule *param;
625 struct ipa_flt_rule_add flt_rule_entry;
626 struct ipa_fltr_installed_notif_req_msg_v01 *req;
627
628 if (ipa_qmi_ctx == NULL) {
629 IPAWANERR("ipa_qmi_ctx is NULL!\n");
630 return -EFAULT;
631 }
632
633 pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) +
634 sizeof(struct ipa_flt_rule_add);
635 param = kzalloc(pyld_sz, GFP_KERNEL);
636 if (!param)
637 return -ENOMEM;
638
639 req = (struct ipa_fltr_installed_notif_req_msg_v01 *)
640 kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
641 GFP_KERNEL);
642 if (!req) {
643 kfree(param);
644 return -ENOMEM;
645 }
646
647 param->commit = 1;
648 param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD;
649 param->global = false;
650 param->num_rules = (uint8_t)1;
651
652 mutex_lock(&ipa_qmi_lock);
653 for (i = 0; i < num_q6_rule; i++) {
654 param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
655 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
656 flt_rule_entry.at_rear = true;
657 flt_rule_entry.rule.action =
658 ipa_qmi_ctx->q6_ul_filter_rule[i].action;
659 flt_rule_entry.rule.rt_tbl_idx
660 = ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx;
661 flt_rule_entry.rule.retain_hdr = true;
662
663 /* debug rt-hdl*/
664 IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n",
665 i, flt_rule_entry.rule.rt_tbl_idx);
666 flt_rule_entry.rule.eq_attrib_type = true;
667 memcpy(&(flt_rule_entry.rule.eq_attrib),
668 &ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib,
669 sizeof(struct ipa_ipfltri_rule_eq));
670 memcpy(&(param->rules[0]), &flt_rule_entry,
671 sizeof(struct ipa_flt_rule_add));
672 if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
673 retval = -EFAULT;
674 IPAWANERR("add A7 UL filter rule(%d) failed\n", i);
675 } else {
676 /* store the rule handler */
677 ipa_qmi_ctx->q6_ul_filter_rule_hdl[i] =
678 param->rules[0].flt_rule_hdl;
679 }
680 }
681 mutex_unlock(&ipa_qmi_lock);
682
683 /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
684 req->source_pipe_index =
685 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
686 req->install_status = QMI_RESULT_SUCCESS_V01;
687 req->filter_index_list_len = num_q6_rule;
688 mutex_lock(&ipa_qmi_lock);
689 for (i = 0; i < num_q6_rule; i++) {
690 if (ipa_qmi_ctx->q6_ul_filter_rule[i].ip == IPA_IP_v4) {
691 req->filter_index_list[i].filter_index = num_v4_rule;
692 num_v4_rule++;
693 } else {
694 req->filter_index_list[i].filter_index = num_v6_rule;
695 num_v6_rule++;
696 }
697 req->filter_index_list[i].filter_handle =
698 ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
699 }
700 mutex_unlock(&ipa_qmi_lock);
701 if (qmi_filter_notify_send(req)) {
702 IPAWANDBG("add filter rule index on A7-RX failed\n");
703 retval = -EFAULT;
704 }
705 old_num_q6_rule = num_q6_rule;
706 IPAWANDBG("add (%d) filter rule index on A7-RX\n",
707 old_num_q6_rule);
708 kfree(param);
709 kfree(req);
710 return retval;
711}
712
713static int wwan_del_ul_flt_rule_to_ipa(void)
714{
715 u32 pyld_sz;
716 int i, retval = 0;
717 struct ipa_ioc_del_flt_rule *param;
718 struct ipa_flt_rule_del flt_rule_entry;
719
720 pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) +
721 sizeof(struct ipa_flt_rule_del);
722 param = kzalloc(pyld_sz, GFP_KERNEL);
723 if (!param) {
724 IPAWANERR("kzalloc failed\n");
725 return -ENOMEM;
726 }
727
728 param->commit = 1;
729 param->num_hdls = (uint8_t) 1;
730
731 for (i = 0; i < old_num_q6_rule; i++) {
732 param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
733 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
734 flt_rule_entry.hdl = ipa_qmi_ctx->q6_ul_filter_rule_hdl[i];
735 /* debug rt-hdl*/
736 IPAWANDBG("delete-IPA rule index(%d)\n", i);
737 memcpy(&(param->hdl[0]), &flt_rule_entry,
738 sizeof(struct ipa_flt_rule_del));
739 if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
740 IPAWANERR("del A7 UL filter rule(%d) failed\n", i);
741 kfree(param);
742 return -EFAULT;
743 }
744 }
745
746 /* set UL filter-rule add-indication */
747 a7_ul_flt_set = false;
748 old_num_q6_rule = 0;
749
750 kfree(param);
751 return retval;
752}
753
754static int find_mux_channel_index(uint32_t mux_id)
755{
756 int i;
757
758 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
759 if (mux_id == mux_channel[i].mux_id)
760 return i;
761 }
762 return MAX_NUM_OF_MUX_CHANNEL;
763}
764
765static int find_vchannel_name_index(const char *vchannel_name)
766{
767 int i;
768
769 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
770 if (strcmp(mux_channel[i].vchannel_name, vchannel_name == 0))
771 return i;
772 }
773 return MAX_NUM_OF_MUX_CHANNEL;
774}
775
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +0530776static enum ipa_upstream_type find_upstream_type(const char *upstreamIface)
777{
778 int i;
779
780 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
781 if (strcmp(mux_channel[i].vchannel_name,
782 upstreamIface) == 0)
783 return IPA_UPSTEAM_MODEM;
784 }
785
786 if (strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0)
787 return IPA_UPSTEAM_WLAN;
788 else
789 return IPA_UPSTEAM_MAX;
790}
791
Amir Levy9659e592016-10-27 18:08:27 +0300792static int wwan_register_to_ipa(int index)
793{
794 struct ipa_tx_intf tx_properties = {0};
795 struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
796 struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
797 struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
798 struct ipa_rx_intf rx_properties = {0};
799 struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
800 struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
801 struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
802 struct ipa_ext_intf ext_properties = {0};
803 struct ipa_ioc_ext_intf_prop *ext_ioc_properties;
804 u32 pyld_sz;
805 int ret = 0, i;
806
807 IPAWANDBG("index(%d) device[%s]:\n", index,
808 mux_channel[index].vchannel_name);
809 if (!mux_channel[index].mux_hdr_set) {
810 ret = ipa_add_qmap_hdr(mux_channel[index].mux_id,
811 &mux_channel[index].hdr_hdl);
812 if (ret) {
813 IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
814 return ret;
815 }
816 mux_channel[index].mux_hdr_set = true;
817 }
818 tx_properties.prop = tx_ioc_properties;
819 tx_ipv4_property = &tx_properties.prop[0];
820 tx_ipv4_property->ip = IPA_IP_v4;
821 tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
822 snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
823 A2_MUX_HDR_NAME_V4_PREF,
824 mux_channel[index].mux_id);
825 tx_ipv6_property = &tx_properties.prop[1];
826 tx_ipv6_property->ip = IPA_IP_v6;
827 tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
828 /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
829 snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
830 A2_MUX_HDR_NAME_V4_PREF,
831 mux_channel[index].mux_id);
832 tx_properties.num_props = 2;
833
834 rx_properties.prop = rx_ioc_properties;
835 rx_ipv4_property = &rx_properties.prop[0];
836 rx_ipv4_property->ip = IPA_IP_v4;
837 rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
838 rx_ipv4_property->attrib.meta_data =
839 mux_channel[index].mux_id << WWAN_METADATA_SHFT;
840 rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
841 rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
842 rx_ipv6_property = &rx_properties.prop[1];
843 rx_ipv6_property->ip = IPA_IP_v6;
844 rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
845 rx_ipv6_property->attrib.meta_data =
846 mux_channel[index].mux_id << WWAN_METADATA_SHFT;
847 rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
848 rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
849 rx_properties.num_props = 2;
850
851 pyld_sz = num_q6_rule *
852 sizeof(struct ipa_ioc_ext_intf_prop);
853 ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
854 if (!ext_ioc_properties) {
855 IPAWANERR("Error allocate memory\n");
856 return -ENOMEM;
857 }
858
859 ext_properties.prop = ext_ioc_properties;
860 ext_properties.excp_pipe_valid = true;
861 ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
862 ext_properties.num_props = num_q6_rule;
863 for (i = 0; i < num_q6_rule; i++) {
864 memcpy(&(ext_properties.prop[i]),
865 &(ipa_qmi_ctx->q6_ul_filter_rule[i]),
866 sizeof(struct ipa_ioc_ext_intf_prop));
867 ext_properties.prop[i].mux_id = mux_channel[index].mux_id;
868 IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
869 ext_properties.prop[i].ip,
870 ext_properties.prop[i].rt_tbl_idx);
871 IPAWANDBG("action: %d mux:%d\n",
872 ext_properties.prop[i].action,
873 ext_properties.prop[i].mux_id);
874 }
875 ret = ipa2_register_intf_ext(mux_channel[index].
876 vchannel_name, &tx_properties,
877 &rx_properties, &ext_properties);
878 if (ret) {
879 IPAWANERR("[%s]:ipa2_register_intf failed %d\n",
880 mux_channel[index].vchannel_name, ret);
881 goto fail;
882 }
883 mux_channel[index].ul_flt_reg = true;
884fail:
885 kfree(ext_ioc_properties);
886 return ret;
887}
888
889static void ipa_cleanup_deregister_intf(void)
890{
891 int i;
892 int ret;
893
894 for (i = 0; i < rmnet_index; i++) {
895 if (mux_channel[i].ul_flt_reg) {
896 ret = ipa2_deregister_intf(
897 mux_channel[i].vchannel_name);
898 if (ret < 0) {
899 IPAWANERR("de-register device %s(%d) failed\n",
900 mux_channel[i].vchannel_name,
901 i);
902 return;
903 }
904 IPAWANDBG("de-register device %s(%d) success\n",
905 mux_channel[i].vchannel_name,
906 i);
907 }
908 mux_channel[i].ul_flt_reg = false;
909 }
910}
911
912int wwan_update_mux_channel_prop(void)
913{
914 int ret = 0, i;
915 /* install UL filter rules */
916 if (egress_set) {
917 if (ipa_qmi_ctx &&
918 ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
919 IPAWANDBG("setup UL filter rules\n");
920 if (a7_ul_flt_set) {
921 IPAWANDBG("del previous UL filter rules\n");
922 /* delete rule hdlers */
923 ret = wwan_del_ul_flt_rule_to_ipa();
924 if (ret) {
925 IPAWANERR("failed to del old rules\n");
926 return -EINVAL;
927 }
928 IPAWANDBG("deleted old UL rules\n");
929 }
930 ret = wwan_add_ul_flt_rule_to_ipa();
931 }
932 if (ret)
933 IPAWANERR("failed to install UL rules\n");
934 else
935 a7_ul_flt_set = true;
936 }
937 /* update Tx/Rx/Ext property */
938 IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
939 if (rmnet_index == 0) {
940 IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
941 return ret;
942 }
943
944 ipa_cleanup_deregister_intf();
945
946 for (i = 0; i < rmnet_index; i++) {
947 ret = wwan_register_to_ipa(i);
948 if (ret < 0) {
949 IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
950 mux_channel[i].vchannel_name,
951 mux_channel[i].mux_id,
952 i);
953 return -ENODEV;
954 }
955 IPAWANERR("dev(%s) has registered to IPA\n",
956 mux_channel[i].vchannel_name);
957 mux_channel[i].ul_flt_reg = true;
958 }
959 return ret;
960}
961
962#ifdef INIT_COMPLETION
963#define reinit_completion(x) INIT_COMPLETION(*(x))
964#endif /* INIT_COMPLETION */
965
966static int __ipa_wwan_open(struct net_device *dev)
967{
968 struct wwan_private *wwan_ptr = netdev_priv(dev);
969
970 IPAWANDBG("[%s] __wwan_open()\n", dev->name);
971 if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
972 reinit_completion(&wwan_ptr->resource_granted_completion);
973 wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
974
975 if (ipa_rmnet_res.ipa_napi_enable)
976 napi_enable(&(wwan_ptr->napi));
977 return 0;
978}
979
980/**
981 * wwan_open() - Opens the wwan network interface. Opens logical
982 * channel on A2 MUX driver and starts the network stack queue
983 *
984 * @dev: network device
985 *
986 * Return codes:
987 * 0: success
988 * -ENODEV: Error while opening logical channel on A2 MUX driver
989 */
990static int ipa_wwan_open(struct net_device *dev)
991{
992 int rc = 0;
993
994 IPAWANDBG("[%s] wwan_open()\n", dev->name);
995 rc = __ipa_wwan_open(dev);
996 if (rc == 0)
997 netif_start_queue(dev);
998 return rc;
999}
1000
1001static int __ipa_wwan_close(struct net_device *dev)
1002{
1003 struct wwan_private *wwan_ptr = netdev_priv(dev);
1004 int rc = 0;
1005
1006 if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
1007 wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
1008 /* do not close wwan port once up, this causes
1009 * remote side to hang if tried to open again
1010 */
1011 reinit_completion(&wwan_ptr->resource_granted_completion);
1012 if (ipa_rmnet_res.ipa_napi_enable)
1013 napi_disable(&(wwan_ptr->napi));
1014 rc = ipa2_deregister_intf(dev->name);
1015 if (rc) {
1016 IPAWANERR("[%s]: ipa2_deregister_intf failed %d\n",
1017 dev->name, rc);
1018 return rc;
1019 }
1020 return rc;
1021 } else {
1022 return -EBADF;
1023 }
1024}
1025
1026/**
1027 * ipa_wwan_stop() - Stops the wwan network interface. Closes
1028 * logical channel on A2 MUX driver and stops the network stack
1029 * queue
1030 *
1031 * @dev: network device
1032 *
1033 * Return codes:
1034 * 0: success
1035 * -ENODEV: Error while opening logical channel on A2 MUX driver
1036 */
1037static int ipa_wwan_stop(struct net_device *dev)
1038{
1039 IPAWANDBG("[%s] ipa_wwan_stop()\n", dev->name);
1040 __ipa_wwan_close(dev);
1041 netif_stop_queue(dev);
1042 return 0;
1043}
1044
1045static int ipa_wwan_change_mtu(struct net_device *dev, int new_mtu)
1046{
1047 if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
1048 return -EINVAL;
1049 IPAWANDBG("[%s] MTU change: old=%d new=%d\n",
1050 dev->name, dev->mtu, new_mtu);
1051 dev->mtu = new_mtu;
1052 return 0;
1053}
1054
1055/**
1056 * ipa_wwan_xmit() - Transmits an skb.
1057 *
1058 * @skb: skb to be transmitted
1059 * @dev: network device
1060 *
1061 * Return codes:
1062 * 0: success
1063 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1064 * later
1065 * -EFAULT: Error while transmitting the skb
1066 */
1067static int ipa_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
1068{
1069 int ret = 0;
1070 bool qmap_check;
1071 struct wwan_private *wwan_ptr = netdev_priv(dev);
1072 struct ipa_tx_meta meta;
1073
1074 if (skb->protocol != htons(ETH_P_MAP)) {
1075 IPAWANDBG
1076 ("SW filtering out none QMAP packet received from %s",
1077 current->comm);
Sunil Paidimarri6c818e82016-10-17 18:33:13 -07001078 dev_kfree_skb_any(skb);
1079 dev->stats.tx_dropped++;
Amir Levy9659e592016-10-27 18:08:27 +03001080 return NETDEV_TX_OK;
1081 }
1082
1083 qmap_check = RMNET_MAP_GET_CD_BIT(skb);
1084 if (netif_queue_stopped(dev)) {
1085 if (qmap_check &&
1086 atomic_read(&wwan_ptr->outstanding_pkts) <
1087 wwan_ptr->outstanding_high_ctl) {
1088 pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
1089 goto send;
1090 } else {
1091 pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
1092 return NETDEV_TX_BUSY;
1093 }
1094 }
1095
1096 /* checking High WM hit */
1097 if (atomic_read(&wwan_ptr->outstanding_pkts) >=
1098 wwan_ptr->outstanding_high) {
1099 if (!qmap_check) {
1100 IPAWANDBG("pending(%d)/(%d)- stop(%d), qmap_chk(%d)\n",
1101 atomic_read(&wwan_ptr->outstanding_pkts),
1102 wwan_ptr->outstanding_high,
1103 netif_queue_stopped(dev),
1104 qmap_check);
1105 netif_stop_queue(dev);
1106 return NETDEV_TX_BUSY;
1107 }
1108 }
1109
1110send:
1111 /* IPA_RM checking start */
1112 ret = ipa_rm_inactivity_timer_request_resource(
1113 IPA_RM_RESOURCE_WWAN_0_PROD);
1114 if (ret == -EINPROGRESS) {
1115 netif_stop_queue(dev);
1116 return NETDEV_TX_BUSY;
1117 }
1118 if (ret) {
1119 pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
1120 dev->name, ret);
Sunil Paidimarri6c818e82016-10-17 18:33:13 -07001121 dev_kfree_skb_any(skb);
1122 dev->stats.tx_dropped++;
Amir Levy9659e592016-10-27 18:08:27 +03001123 return -EFAULT;
1124 }
1125 /* IPA_RM checking end */
1126
1127 if (qmap_check) {
1128 memset(&meta, 0, sizeof(meta));
1129 meta.pkt_init_dst_ep_valid = true;
1130 meta.pkt_init_dst_ep_remote = true;
1131 ret = ipa2_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta);
1132 } else {
1133 ret = ipa2_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL);
1134 }
1135
1136 if (ret) {
1137 ret = NETDEV_TX_BUSY;
Amir Levy9659e592016-10-27 18:08:27 +03001138 goto out;
1139 }
1140
1141 atomic_inc(&wwan_ptr->outstanding_pkts);
1142 dev->stats.tx_packets++;
1143 dev->stats.tx_bytes += skb->len;
1144 ret = NETDEV_TX_OK;
1145out:
1146 ipa_rm_inactivity_timer_release_resource(
1147 IPA_RM_RESOURCE_WWAN_0_PROD);
1148 return ret;
1149}
1150
1151static void ipa_wwan_tx_timeout(struct net_device *dev)
1152{
1153 IPAWANERR("[%s] ipa_wwan_tx_timeout(), data stall in UL\n", dev->name);
1154}
1155
1156/**
1157 * apps_ipa_tx_complete_notify() - Rx notify
1158 *
1159 * @priv: driver context
1160 * @evt: event type
1161 * @data: data provided with event
1162 *
1163 * Check that the packet is the one we sent and release it
1164 * This function will be called in defered context in IPA wq.
1165 */
1166static void apps_ipa_tx_complete_notify(void *priv,
1167 enum ipa_dp_evt_type evt,
1168 unsigned long data)
1169{
1170 struct sk_buff *skb = (struct sk_buff *)data;
1171 struct net_device *dev = (struct net_device *)priv;
1172 struct wwan_private *wwan_ptr;
1173
1174 if (dev != ipa_netdevs[0]) {
1175 IPAWANDBG("Received pre-SSR packet completion\n");
1176 dev_kfree_skb_any(skb);
1177 return;
1178 }
1179
1180 if (evt != IPA_WRITE_DONE) {
1181 IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
1182 dev_kfree_skb_any(skb);
1183 dev->stats.tx_dropped++;
1184 return;
1185 }
1186
1187 wwan_ptr = netdev_priv(dev);
1188 atomic_dec(&wwan_ptr->outstanding_pkts);
1189 __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
1190 if (!atomic_read(&is_ssr) &&
1191 netif_queue_stopped(wwan_ptr->net) &&
1192 atomic_read(&wwan_ptr->outstanding_pkts) <
1193 (wwan_ptr->outstanding_low)) {
1194 IPAWANDBG("Outstanding low (%d) - wake up queue\n",
1195 wwan_ptr->outstanding_low);
1196 netif_wake_queue(wwan_ptr->net);
1197 }
1198 __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
1199 dev_kfree_skb_any(skb);
1200 ipa_rm_inactivity_timer_release_resource(
1201 IPA_RM_RESOURCE_WWAN_0_PROD);
1202}
1203
1204/**
1205 * apps_ipa_packet_receive_notify() - Rx notify
1206 *
1207 * @priv: driver context
1208 * @evt: event type
1209 * @data: data provided with event
1210 *
1211 * IPA will pass a packet to the Linux network stack with skb->data
1212 */
1213static void apps_ipa_packet_receive_notify(void *priv,
1214 enum ipa_dp_evt_type evt,
1215 unsigned long data)
1216{
1217 struct net_device *dev = (struct net_device *)priv;
1218
1219 if (evt == IPA_RECEIVE) {
1220 struct sk_buff *skb = (struct sk_buff *)data;
1221 int result;
1222 unsigned int packet_len = skb->len;
1223
1224 IPAWANDBG("Rx packet was received");
1225 skb->dev = ipa_netdevs[0];
1226 skb->protocol = htons(ETH_P_MAP);
1227
1228 if (ipa_rmnet_res.ipa_napi_enable) {
1229 trace_rmnet_ipa_netif_rcv_skb(dev->stats.rx_packets);
1230 result = netif_receive_skb(skb);
1231 } else {
1232 if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
1233 == 0) {
1234 trace_rmnet_ipa_netifni(dev->stats.rx_packets);
1235 result = netif_rx_ni(skb);
1236 } else {
1237 trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
1238 result = netif_rx(skb);
1239 }
1240 }
1241
1242 if (result) {
1243 pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
1244 __func__, __LINE__);
1245 dev->stats.rx_dropped++;
1246 }
1247 dev->stats.rx_packets++;
1248 dev->stats.rx_bytes += packet_len;
1249 } else if (evt == IPA_CLIENT_START_POLL)
1250 ipa_rmnet_rx_cb(priv);
1251 else if (evt == IPA_CLIENT_COMP_NAPI) {
1252 struct wwan_private *wwan_ptr = netdev_priv(dev);
1253
1254 if (ipa_rmnet_res.ipa_napi_enable)
1255 napi_complete(&(wwan_ptr->napi));
1256 } else
1257 IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
1258
1259}
1260
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001261static int handle_ingress_format(struct net_device *dev,
1262 struct rmnet_ioctl_extended_s *in)
1263{
1264 int ret = 0;
1265 struct rmnet_phys_ep_conf_s *ep_cfg;
1266
1267 IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
1268 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
1269 ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.cs_offload_en =
1270 IPA_ENABLE_CS_OFFLOAD_DL;
1271
1272 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
1273 IPAWANERR("get AGG size %d count %d\n",
1274 in->u.ingress_format.agg_size,
1275 in->u.ingress_format.agg_count);
1276
1277 ret = ipa_disable_apps_wan_cons_deaggr(
1278 in->u.ingress_format.agg_size,
1279 in->u.ingress_format.agg_count);
1280
1281 if (!ret) {
1282 ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_byte_limit =
1283 in->u.ingress_format.agg_size;
1284 ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit =
1285 in->u.ingress_format.agg_count;
1286
1287 if (ipa_rmnet_res.ipa_napi_enable) {
1288 ipa_to_apps_ep_cfg.recycle_enabled = true;
1289 ep_cfg = (struct rmnet_phys_ep_conf_s *)
1290 rcu_dereference(dev->rx_handler_data);
1291 ep_cfg->recycle = ipa_recycle_wan_skb;
1292 pr_info("Wan Recycle Enabled\n");
1293 }
1294 }
1295 }
1296
1297 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
1298 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
1299 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
1300 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
1301 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
1302
1303 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
1304 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
1305 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding =
1306 true;
1307 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
1308 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
1309 ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
1310
1311 ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
1312 ipa_to_apps_ep_cfg.notify = apps_ipa_packet_receive_notify;
1313 ipa_to_apps_ep_cfg.priv = dev;
1314
1315 ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001316 ipa_to_apps_ep_cfg.desc_fifo_sz =
1317 ipa_rmnet_res.wan_rx_desc_size * sizeof(struct sps_iovec);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001318
1319 mutex_lock(&ipa_to_apps_pipe_handle_guard);
1320 if (atomic_read(&is_ssr)) {
1321 IPAWANDBG("In SSR sequence/recovery\n");
1322 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
1323 return -EFAULT;
1324 }
1325 ret = ipa2_setup_sys_pipe(&ipa_to_apps_ep_cfg, &ipa_to_apps_hdl);
1326 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
1327
1328 if (ret)
1329 IPAWANERR("failed to configure ingress\n");
1330
1331 return ret;
1332}
1333
Amir Levy9659e592016-10-27 18:08:27 +03001334/**
1335 * ipa_wwan_ioctl() - I/O control for wwan network driver.
1336 *
1337 * @dev: network device
1338 * @ifr: ignored
1339 * @cmd: cmd to be excecuded. can be one of the following:
1340 * IPA_WWAN_IOCTL_OPEN - Open the network interface
1341 * IPA_WWAN_IOCTL_CLOSE - Close the network interface
1342 *
1343 * Return codes:
1344 * 0: success
1345 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1346 * later
1347 * -EFAULT: Error while transmitting the skb
1348 */
1349static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1350{
1351 int rc = 0;
1352 int mru = 1000, epid = 1, mux_index, len;
1353 struct ipa_msg_meta msg_meta;
1354 struct ipa_wan_msg *wan_msg = NULL;
1355 struct rmnet_ioctl_extended_s extend_ioctl_data;
1356 struct rmnet_ioctl_data_s ioctl_data;
1357
1358 IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
1359 switch (cmd) {
1360 /* Set Ethernet protocol */
1361 case RMNET_IOCTL_SET_LLP_ETHERNET:
1362 break;
1363 /* Set RAWIP protocol */
1364 case RMNET_IOCTL_SET_LLP_IP:
1365 break;
1366 /* Get link protocol */
1367 case RMNET_IOCTL_GET_LLP:
1368 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1369 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1370 sizeof(struct rmnet_ioctl_data_s)))
1371 rc = -EFAULT;
1372 break;
1373 /* Set QoS header enabled */
1374 case RMNET_IOCTL_SET_QOS_ENABLE:
1375 return -EINVAL;
1376 /* Set QoS header disabled */
1377 case RMNET_IOCTL_SET_QOS_DISABLE:
1378 break;
1379 /* Get QoS header state */
1380 case RMNET_IOCTL_GET_QOS:
1381 ioctl_data.u.operation_mode = RMNET_MODE_NONE;
1382 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1383 sizeof(struct rmnet_ioctl_data_s)))
1384 rc = -EFAULT;
1385 break;
1386 /* Get operation mode */
1387 case RMNET_IOCTL_GET_OPMODE:
1388 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1389 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1390 sizeof(struct rmnet_ioctl_data_s)))
1391 rc = -EFAULT;
1392 break;
1393 /* Open transport port */
1394 case RMNET_IOCTL_OPEN:
1395 break;
1396 /* Close transport port */
1397 case RMNET_IOCTL_CLOSE:
1398 break;
1399 /* Flow enable */
1400 case RMNET_IOCTL_FLOW_ENABLE:
1401 IPAWANDBG("Received flow enable\n");
1402 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1403 sizeof(struct rmnet_ioctl_data_s))) {
1404 rc = -EFAULT;
1405 break;
1406 }
1407 ipa_flow_control(IPA_CLIENT_USB_PROD, true,
1408 ioctl_data.u.tcm_handle);
1409 break;
1410 /* Flow disable */
1411 case RMNET_IOCTL_FLOW_DISABLE:
1412 IPAWANDBG("Received flow disable\n");
1413 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1414 sizeof(struct rmnet_ioctl_data_s))) {
1415 rc = -EFAULT;
1416 break;
1417 }
1418 ipa_flow_control(IPA_CLIENT_USB_PROD, false,
1419 ioctl_data.u.tcm_handle);
1420 break;
1421 /* Set flow handle */
1422 case RMNET_IOCTL_FLOW_SET_HNDL:
1423 break;
1424
1425 /* Extended IOCTLs */
1426 case RMNET_IOCTL_EXTENDED:
1427 IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
1428 if (copy_from_user(&extend_ioctl_data,
1429 (u8 *)ifr->ifr_ifru.ifru_data,
1430 sizeof(struct rmnet_ioctl_extended_s))) {
1431 IPAWANERR("failed to copy extended ioctl data\n");
1432 rc = -EFAULT;
1433 break;
1434 }
1435 switch (extend_ioctl_data.extended_ioctl) {
1436 /* Get features */
1437 case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
1438 IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n");
1439 extend_ioctl_data.u.data =
1440 (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL |
1441 RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT |
1442 RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT);
1443 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1444 &extend_ioctl_data,
1445 sizeof(struct rmnet_ioctl_extended_s)))
1446 rc = -EFAULT;
1447 break;
1448 /* Set MRU */
1449 case RMNET_IOCTL_SET_MRU:
1450 mru = extend_ioctl_data.u.data;
1451 IPAWANDBG("get MRU size %d\n",
1452 extend_ioctl_data.u.data);
1453 break;
1454 /* Get MRU */
1455 case RMNET_IOCTL_GET_MRU:
1456 extend_ioctl_data.u.data = mru;
1457 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1458 &extend_ioctl_data,
1459 sizeof(struct rmnet_ioctl_extended_s)))
1460 rc = -EFAULT;
1461 break;
1462 /* GET SG support */
1463 case RMNET_IOCTL_GET_SG_SUPPORT:
1464 extend_ioctl_data.u.data =
1465 ipa_rmnet_res.ipa_advertise_sg_support;
1466 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1467 &extend_ioctl_data,
1468 sizeof(struct rmnet_ioctl_extended_s)))
1469 rc = -EFAULT;
1470 break;
1471 /* Get endpoint ID */
1472 case RMNET_IOCTL_GET_EPID:
1473 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
1474 extend_ioctl_data.u.data = epid;
1475 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1476 &extend_ioctl_data,
1477 sizeof(struct rmnet_ioctl_extended_s)))
1478 rc = -EFAULT;
1479 if (copy_from_user(&extend_ioctl_data,
1480 (u8 *)ifr->ifr_ifru.ifru_data,
1481 sizeof(struct rmnet_ioctl_extended_s))) {
1482 IPAWANERR("copy extended ioctl data failed\n");
1483 rc = -EFAULT;
1484 break;
1485 }
1486 IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n",
1487 extend_ioctl_data.u.data);
1488 break;
1489 /* Endpoint pair */
1490 case RMNET_IOCTL_GET_EP_PAIR:
1491 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n");
1492 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
1493 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
1494 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
1495 ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
1496 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1497 &extend_ioctl_data,
1498 sizeof(struct rmnet_ioctl_extended_s)))
1499 rc = -EFAULT;
1500 if (copy_from_user(&extend_ioctl_data,
1501 (u8 *)ifr->ifr_ifru.ifru_data,
1502 sizeof(struct rmnet_ioctl_extended_s))) {
1503 IPAWANERR("copy extended ioctl data failed\n");
1504 rc = -EFAULT;
1505 break;
1506 }
1507 IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n",
1508 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
1509 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
1510 break;
1511 /* Get driver name */
1512 case RMNET_IOCTL_GET_DRIVER_NAME:
1513 memcpy(&extend_ioctl_data.u.if_name,
1514 ipa_netdevs[0]->name,
1515 sizeof(IFNAMSIZ));
1516 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1517 &extend_ioctl_data,
1518 sizeof(struct rmnet_ioctl_extended_s)))
1519 rc = -EFAULT;
1520 break;
1521 /* Add MUX ID */
1522 case RMNET_IOCTL_ADD_MUX_CHANNEL:
1523 mux_index = find_mux_channel_index(
1524 extend_ioctl_data.u.rmnet_mux_val.mux_id);
1525 if (mux_index < MAX_NUM_OF_MUX_CHANNEL) {
1526 IPAWANDBG("already setup mux(%d)\n",
1527 extend_ioctl_data.u.
1528 rmnet_mux_val.mux_id);
1529 return rc;
1530 }
Skylar Chang8438ba52017-03-15 21:27:35 -07001531 mutex_lock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001532 if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
1533 IPAWANERR("Exceed mux_channel limit(%d)\n",
1534 rmnet_index);
Skylar Chang8438ba52017-03-15 21:27:35 -07001535 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001536 return -EFAULT;
1537 }
1538 IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
1539 extend_ioctl_data.u.rmnet_mux_val.mux_id,
1540 extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
1541 /* cache the mux name and id */
1542 mux_channel[rmnet_index].mux_id =
1543 extend_ioctl_data.u.rmnet_mux_val.mux_id;
1544 memcpy(mux_channel[rmnet_index].vchannel_name,
1545 extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
1546 sizeof(mux_channel[rmnet_index].vchannel_name));
Skylar Changba7c5112017-04-14 19:23:05 -07001547 mux_channel[rmnet_index].vchannel_name[
1548 IFNAMSIZ - 1] = '\0';
1549
Amir Levy9659e592016-10-27 18:08:27 +03001550 IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
1551 mux_channel[rmnet_index].vchannel_name,
1552 mux_channel[rmnet_index].mux_id,
1553 rmnet_index);
1554 /* check if UL filter rules coming*/
1555 if (num_q6_rule != 0) {
1556 IPAWANERR("dev(%s) register to IPA\n",
1557 extend_ioctl_data.u.rmnet_mux_val.
1558 vchannel_name);
1559 rc = wwan_register_to_ipa(rmnet_index);
1560 if (rc < 0) {
1561 IPAWANERR("device %s reg IPA failed\n",
1562 extend_ioctl_data.u.
1563 rmnet_mux_val.vchannel_name);
Skylar Chang8438ba52017-03-15 21:27:35 -07001564 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001565 return -ENODEV;
1566 }
1567 mux_channel[rmnet_index].mux_channel_set = true;
1568 mux_channel[rmnet_index].ul_flt_reg = true;
1569 } else {
1570 IPAWANDBG("dev(%s) haven't registered to IPA\n",
1571 extend_ioctl_data.u.
1572 rmnet_mux_val.vchannel_name);
1573 mux_channel[rmnet_index].mux_channel_set = true;
1574 mux_channel[rmnet_index].ul_flt_reg = false;
1575 }
1576 rmnet_index++;
Skylar Chang8438ba52017-03-15 21:27:35 -07001577 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001578 break;
1579 case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
1580 IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
1581 if ((extend_ioctl_data.u.data) &
1582 RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
1583 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 8;
1584 apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
1585 cs_offload_en =
1586 IPA_ENABLE_CS_OFFLOAD_UL;
1587 apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
1588 cs_metadata_hdr_offset = 1;
1589 } else {
1590 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
1591 }
1592 if ((extend_ioctl_data.u.data) &
1593 RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
1594 apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
1595 IPA_ENABLE_AGGR;
1596 else
1597 apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
1598 IPA_BYPASS_AGGR;
1599 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
1600 hdr_ofst_metadata_valid = 1;
1601 /* modem want offset at 0! */
1602 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
1603 apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.dst =
1604 IPA_CLIENT_APPS_LAN_WAN_PROD;
1605 apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.mode = IPA_BASIC;
1606
1607 apps_to_ipa_ep_cfg.client =
1608 IPA_CLIENT_APPS_LAN_WAN_PROD;
1609 apps_to_ipa_ep_cfg.notify =
1610 apps_ipa_tx_complete_notify;
1611 apps_to_ipa_ep_cfg.desc_fifo_sz =
1612 IPA_SYS_TX_DATA_DESC_FIFO_SZ;
1613 apps_to_ipa_ep_cfg.priv = dev;
1614
1615 rc = ipa2_setup_sys_pipe(&apps_to_ipa_ep_cfg,
1616 &apps_to_ipa_hdl);
1617 if (rc)
1618 IPAWANERR("failed to config egress endpoint\n");
1619
1620 if (num_q6_rule != 0) {
1621 /* already got Q6 UL filter rules*/
1622 if (ipa_qmi_ctx &&
1623 ipa_qmi_ctx->modem_cfg_emb_pipe_flt
1624 == false)
1625 rc = wwan_add_ul_flt_rule_to_ipa();
1626 else
1627 rc = 0;
1628 egress_set = true;
1629 if (rc)
1630 IPAWANERR("install UL rules failed\n");
1631 else
1632 a7_ul_flt_set = true;
1633 } else {
1634 /* wait Q6 UL filter rules*/
1635 egress_set = true;
1636 IPAWANDBG("no UL-rules, egress_set(%d)\n",
1637 egress_set);
1638 }
1639 break;
1640 case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001641 rc = handle_ingress_format(dev, &extend_ioctl_data);
Amir Levy9659e592016-10-27 18:08:27 +03001642 break;
1643 case RMNET_IOCTL_SET_XLAT_DEV_INFO:
1644 wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
1645 GFP_KERNEL);
1646 if (!wan_msg) {
1647 IPAWANERR("Failed to allocate memory.\n");
1648 return -ENOMEM;
1649 }
1650 len = sizeof(wan_msg->upstream_ifname) >
1651 sizeof(extend_ioctl_data.u.if_name) ?
1652 sizeof(extend_ioctl_data.u.if_name) :
1653 sizeof(wan_msg->upstream_ifname);
1654 strlcpy(wan_msg->upstream_ifname,
1655 extend_ioctl_data.u.if_name, len);
1656 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
1657 msg_meta.msg_type = WAN_XLAT_CONNECT;
1658 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
1659 rc = ipa2_send_msg(&msg_meta, wan_msg,
1660 ipa_wwan_msg_free_cb);
1661 if (rc) {
1662 IPAWANERR("Failed to send XLAT_CONNECT msg\n");
1663 kfree(wan_msg);
1664 }
1665 break;
1666 /* Get agg count */
1667 case RMNET_IOCTL_GET_AGGREGATION_COUNT:
1668 break;
1669 /* Set agg count */
1670 case RMNET_IOCTL_SET_AGGREGATION_COUNT:
1671 break;
1672 /* Get agg size */
1673 case RMNET_IOCTL_GET_AGGREGATION_SIZE:
1674 break;
1675 /* Set agg size */
1676 case RMNET_IOCTL_SET_AGGREGATION_SIZE:
1677 break;
1678 /* Do flow control */
1679 case RMNET_IOCTL_FLOW_CONTROL:
1680 break;
1681 /* For legacy use */
1682 case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL:
1683 break;
1684 /* Get HW/SW map */
1685 case RMNET_IOCTL_GET_HWSW_MAP:
1686 break;
1687 /* Set RX Headroom */
1688 case RMNET_IOCTL_SET_RX_HEADROOM:
1689 break;
1690 default:
1691 IPAWANERR("[%s] unsupported extended cmd[%d]",
1692 dev->name,
1693 extend_ioctl_data.extended_ioctl);
1694 rc = -EINVAL;
1695 }
1696 break;
1697 default:
1698 IPAWANERR("[%s] unsupported cmd[%d]",
1699 dev->name, cmd);
1700 rc = -EINVAL;
1701 }
1702 return rc;
1703}
1704
1705static const struct net_device_ops ipa_wwan_ops_ip = {
1706 .ndo_open = ipa_wwan_open,
1707 .ndo_stop = ipa_wwan_stop,
1708 .ndo_start_xmit = ipa_wwan_xmit,
1709 .ndo_tx_timeout = ipa_wwan_tx_timeout,
1710 .ndo_do_ioctl = ipa_wwan_ioctl,
1711 .ndo_change_mtu = ipa_wwan_change_mtu,
1712 .ndo_set_mac_address = 0,
1713 .ndo_validate_addr = 0,
1714};
1715
1716/**
1717 * wwan_setup() - Setups the wwan network driver.
1718 *
1719 * @dev: network device
1720 *
1721 * Return codes:
1722 * None
1723 */
1724
1725static void ipa_wwan_setup(struct net_device *dev)
1726{
1727 dev->netdev_ops = &ipa_wwan_ops_ip;
1728 ether_setup(dev);
1729 /* set this after calling ether_setup */
1730 dev->header_ops = 0; /* No header */
1731 dev->type = ARPHRD_RAWIP;
1732 dev->hard_header_len = 0;
1733 dev->mtu = WWAN_DATA_LEN;
1734 dev->addr_len = 0;
1735 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
1736 dev->needed_headroom = HEADROOM_FOR_QMAP;
1737 dev->needed_tailroom = TAILROOM;
1738 dev->watchdog_timeo = 1000;
1739}
1740
1741/* IPA_RM related functions start*/
1742static void q6_prod_rm_request_resource(struct work_struct *work);
1743static DECLARE_DELAYED_WORK(q6_con_rm_request, q6_prod_rm_request_resource);
1744static void q6_prod_rm_release_resource(struct work_struct *work);
1745static DECLARE_DELAYED_WORK(q6_con_rm_release, q6_prod_rm_release_resource);
1746
1747static void q6_prod_rm_request_resource(struct work_struct *work)
1748{
1749 int ret = 0;
1750
1751 ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
1752 if (ret < 0 && ret != -EINPROGRESS) {
1753 IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
1754 ret);
1755 return;
1756 }
1757}
1758
1759static int q6_rm_request_resource(void)
1760{
1761 queue_delayed_work(ipa_rm_q6_workqueue,
1762 &q6_con_rm_request, 0);
1763 return 0;
1764}
1765
1766static void q6_prod_rm_release_resource(struct work_struct *work)
1767{
1768 int ret = 0;
1769
1770 ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
1771 if (ret < 0 && ret != -EINPROGRESS) {
1772 IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
1773 ret);
1774 return;
1775 }
1776}
1777
1778
1779static int q6_rm_release_resource(void)
1780{
1781 queue_delayed_work(ipa_rm_q6_workqueue,
1782 &q6_con_rm_release, 0);
1783 return 0;
1784}
1785
1786
1787static void q6_rm_notify_cb(void *user_data,
1788 enum ipa_rm_event event,
1789 unsigned long data)
1790{
1791 switch (event) {
1792 case IPA_RM_RESOURCE_GRANTED:
1793 IPAWANDBG("%s: Q6_PROD GRANTED CB\n", __func__);
1794 break;
1795 case IPA_RM_RESOURCE_RELEASED:
1796 IPAWANDBG("%s: Q6_PROD RELEASED CB\n", __func__);
1797 break;
1798 default:
1799 return;
1800 }
1801}
1802static int q6_initialize_rm(void)
1803{
1804 struct ipa_rm_create_params create_params;
1805 struct ipa_rm_perf_profile profile;
1806 int result;
1807
1808 /* Initialize IPA_RM workqueue */
1809 ipa_rm_q6_workqueue = create_singlethread_workqueue("clnt_req");
1810 if (!ipa_rm_q6_workqueue)
1811 return -ENOMEM;
1812
1813 memset(&create_params, 0, sizeof(create_params));
1814 create_params.name = IPA_RM_RESOURCE_Q6_PROD;
1815 create_params.reg_params.notify_cb = &q6_rm_notify_cb;
1816 result = ipa_rm_create_resource(&create_params);
1817 if (result)
1818 goto create_rsrc_err1;
1819 memset(&create_params, 0, sizeof(create_params));
1820 create_params.name = IPA_RM_RESOURCE_Q6_CONS;
1821 create_params.release_resource = &q6_rm_release_resource;
1822 create_params.request_resource = &q6_rm_request_resource;
1823 result = ipa_rm_create_resource(&create_params);
1824 if (result)
1825 goto create_rsrc_err2;
1826 /* add dependency*/
1827 result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
1828 IPA_RM_RESOURCE_APPS_CONS);
1829 if (result)
1830 goto add_dpnd_err;
1831 /* setup Performance profile */
1832 memset(&profile, 0, sizeof(profile));
1833 profile.max_supported_bandwidth_mbps = 100;
1834 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
1835 &profile);
1836 if (result)
1837 goto set_perf_err;
1838 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
1839 &profile);
1840 if (result)
1841 goto set_perf_err;
1842 return result;
1843
1844set_perf_err:
1845 ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1846 IPA_RM_RESOURCE_APPS_CONS);
1847add_dpnd_err:
1848 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1849 if (result < 0)
1850 IPAWANERR("Error deleting resource %d, ret=%d\n",
1851 IPA_RM_RESOURCE_Q6_CONS, result);
1852create_rsrc_err2:
1853 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1854 if (result < 0)
1855 IPAWANERR("Error deleting resource %d, ret=%d\n",
1856 IPA_RM_RESOURCE_Q6_PROD, result);
1857create_rsrc_err1:
1858 destroy_workqueue(ipa_rm_q6_workqueue);
1859 return result;
1860}
1861
1862void q6_deinitialize_rm(void)
1863{
1864 int ret;
1865
1866 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1867 IPA_RM_RESOURCE_APPS_CONS);
1868 if (ret < 0)
1869 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
1870 IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
1871 ret);
1872 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1873 if (ret < 0)
1874 IPAWANERR("Error deleting resource %d, ret=%d\n",
1875 IPA_RM_RESOURCE_Q6_CONS, ret);
1876 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1877 if (ret < 0)
1878 IPAWANERR("Error deleting resource %d, ret=%d\n",
1879 IPA_RM_RESOURCE_Q6_PROD, ret);
1880 destroy_workqueue(ipa_rm_q6_workqueue);
1881}
1882
1883static void wake_tx_queue(struct work_struct *work)
1884{
1885 if (ipa_netdevs[0]) {
1886 __netif_tx_lock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
1887 netif_wake_queue(ipa_netdevs[0]);
1888 __netif_tx_unlock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
1889 }
1890}
1891
1892/**
1893 * ipa_rm_resource_granted() - Called upon
1894 * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
1895 *
1896 * @work: work object supplied ny workqueue
1897 *
1898 * Return codes:
1899 * None
1900 */
1901static void ipa_rm_resource_granted(void *dev)
1902{
1903 IPAWANDBG("Resource Granted - starting queue\n");
1904 schedule_work(&ipa_tx_wakequeue_work);
1905}
1906
1907/**
1908 * ipa_rm_notify() - Callback function for RM events. Handles
1909 * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
1910 * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
1911 * workqueue.
1912 *
1913 * @dev: network device
1914 * @event: IPA RM event
1915 * @data: Additional data provided by IPA RM
1916 *
1917 * Return codes:
1918 * None
1919 */
1920static void ipa_rm_notify(void *dev, enum ipa_rm_event event,
1921 unsigned long data)
1922{
1923 struct wwan_private *wwan_ptr = netdev_priv(dev);
1924
1925 pr_debug("%s: event %d\n", __func__, event);
1926 switch (event) {
1927 case IPA_RM_RESOURCE_GRANTED:
1928 if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
1929 complete_all(&wwan_ptr->resource_granted_completion);
1930 break;
1931 }
1932 ipa_rm_resource_granted(dev);
1933 break;
1934 case IPA_RM_RESOURCE_RELEASED:
1935 break;
1936 default:
1937 pr_err("%s: unknown event %d\n", __func__, event);
1938 break;
1939 }
1940}
1941
1942/* IPA_RM related functions end*/
1943
1944static int ssr_notifier_cb(struct notifier_block *this,
1945 unsigned long code,
1946 void *data);
1947
1948static struct notifier_block ssr_notifier = {
1949 .notifier_call = ssr_notifier_cb,
1950};
1951
1952static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
1953 struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
1954{
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001955 int result;
1956
1957 ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
Amir Levy9659e592016-10-27 18:08:27 +03001958 ipa_rmnet_drv_res->ipa_rmnet_ssr =
1959 of_property_read_bool(pdev->dev.of_node,
1960 "qcom,rmnet-ipa-ssr");
1961 pr_info("IPA SSR support = %s\n",
1962 ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False");
1963 ipa_rmnet_drv_res->ipa_loaduC =
1964 of_property_read_bool(pdev->dev.of_node,
1965 "qcom,ipa-loaduC");
1966 pr_info("IPA ipa-loaduC = %s\n",
1967 ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
1968
1969 ipa_rmnet_drv_res->ipa_advertise_sg_support =
1970 of_property_read_bool(pdev->dev.of_node,
1971 "qcom,ipa-advertise-sg-support");
1972 pr_info("IPA SG support = %s\n",
1973 ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
1974
1975 ipa_rmnet_drv_res->ipa_napi_enable =
1976 of_property_read_bool(pdev->dev.of_node,
1977 "qcom,ipa-napi-enable");
1978 pr_info("IPA Napi Enable = %s\n",
1979 ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001980
1981 /* Get IPA WAN RX desc fifo size */
1982 result = of_property_read_u32(pdev->dev.of_node,
1983 "qcom,wan-rx-desc-size",
1984 &ipa_rmnet_drv_res->wan_rx_desc_size);
1985 if (result)
1986 pr_info("using default for wan-rx-desc-size = %u\n",
1987 ipa_rmnet_drv_res->wan_rx_desc_size);
1988 else
1989 IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
1990 ipa_rmnet_drv_res->wan_rx_desc_size);
1991
Amir Levy9659e592016-10-27 18:08:27 +03001992 return 0;
1993}
1994
1995struct ipa_rmnet_context ipa_rmnet_ctx;
1996
1997/**
1998 * ipa_wwan_probe() - Initialized the module and registers as a
1999 * network interface to the network stack
2000 *
2001 * Return codes:
2002 * 0: success
2003 * -ENOMEM: No memory available
2004 * -EFAULT: Internal error
2005 * -ENODEV: IPA driver not loaded
2006 */
2007static int ipa_wwan_probe(struct platform_device *pdev)
2008{
2009 int ret, i;
2010 struct net_device *dev;
2011 struct wwan_private *wwan_ptr;
2012 struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */
2013 struct ipa_rm_perf_profile profile; /* IPA_RM */
2014
2015 pr_info("rmnet_ipa started initialization\n");
2016
2017 if (!ipa2_is_ready()) {
2018 IPAWANERR("IPA driver not loaded\n");
2019 return -ENODEV;
2020 }
2021
2022 ret = get_ipa_rmnet_dts_configuration(pdev, &ipa_rmnet_res);
2023 ipa_rmnet_ctx.ipa_rmnet_ssr = ipa_rmnet_res.ipa_rmnet_ssr;
2024
2025 ret = ipa_init_q6_smem();
2026 if (ret) {
2027 IPAWANERR("ipa_init_q6_smem failed!\n");
2028 return ret;
2029 }
2030
2031 /* initialize tx/rx enpoint setup */
2032 memset(&apps_to_ipa_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
2033 memset(&ipa_to_apps_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
2034
2035 /* initialize ex property setup */
2036 num_q6_rule = 0;
2037 old_num_q6_rule = 0;
2038 rmnet_index = 0;
2039 egress_set = false;
2040 a7_ul_flt_set = false;
2041 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
2042 memset(&mux_channel[i], 0, sizeof(struct rmnet_mux_val));
2043
2044 /* start A7 QMI service/client */
2045 if (ipa_rmnet_res.ipa_loaduC)
2046 /* Android platform loads uC */
2047 ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
2048 else
2049 /* LE platform not loads uC */
2050 ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
2051
2052 /* construct default WAN RT tbl for IPACM */
2053 ret = ipa_setup_a7_qmap_hdr();
2054 if (ret)
2055 goto setup_a7_qmap_hdr_err;
2056 ret = ipa_setup_dflt_wan_rt_tables();
2057 if (ret)
2058 goto setup_dflt_wan_rt_tables_err;
2059
2060 if (!atomic_read(&is_ssr)) {
2061 /* Start transport-driver fd ioctl for ipacm for first init */
2062 ret = wan_ioctl_init();
2063 if (ret)
2064 goto wan_ioctl_init_err;
2065 } else {
2066 /* Enable sending QMI messages after SSR */
2067 wan_ioctl_enable_qmi_messages();
2068 }
2069
2070 /* initialize wan-driver netdev */
2071 dev = alloc_netdev(sizeof(struct wwan_private),
2072 IPA_WWAN_DEV_NAME,
2073 NET_NAME_UNKNOWN,
2074 ipa_wwan_setup);
2075 if (!dev) {
2076 IPAWANERR("no memory for netdev\n");
2077 ret = -ENOMEM;
2078 goto alloc_netdev_err;
2079 }
2080 ipa_netdevs[0] = dev;
2081 wwan_ptr = netdev_priv(dev);
2082 memset(wwan_ptr, 0, sizeof(*wwan_ptr));
2083 IPAWANDBG("wwan_ptr (private) = %p", wwan_ptr);
2084 wwan_ptr->net = dev;
2085 wwan_ptr->outstanding_high_ctl = DEFAULT_OUTSTANDING_HIGH_CTL;
2086 wwan_ptr->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
2087 wwan_ptr->outstanding_low = DEFAULT_OUTSTANDING_LOW;
2088 atomic_set(&wwan_ptr->outstanding_pkts, 0);
2089 spin_lock_init(&wwan_ptr->lock);
2090 init_completion(&wwan_ptr->resource_granted_completion);
2091
2092 if (!atomic_read(&is_ssr)) {
2093 /* IPA_RM configuration starts */
2094 ret = q6_initialize_rm();
2095 if (ret) {
2096 IPAWANERR("%s: q6_initialize_rm failed, ret: %d\n",
2097 __func__, ret);
2098 goto q6_init_err;
2099 }
2100 }
2101
2102 memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
2103 ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
2104 ipa_rm_params.reg_params.user_data = dev;
2105 ipa_rm_params.reg_params.notify_cb = ipa_rm_notify;
2106 ret = ipa_rm_create_resource(&ipa_rm_params);
2107 if (ret) {
2108 pr_err("%s: unable to create resourse %d in IPA RM\n",
2109 __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
2110 goto create_rsrc_err;
2111 }
2112 ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
2113 IPA_RM_INACTIVITY_TIMER);
2114 if (ret) {
2115 pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
2116 __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
2117 goto timer_init_err;
2118 }
2119 /* add dependency */
2120 ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2121 IPA_RM_RESOURCE_Q6_CONS);
2122 if (ret)
2123 goto add_dpnd_err;
2124 /* setup Performance profile */
2125 memset(&profile, 0, sizeof(profile));
2126 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
2127 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
2128 &profile);
2129 if (ret)
2130 goto set_perf_err;
2131 /* IPA_RM configuration ends */
2132
2133 /* Enable SG support in netdevice. */
2134 if (ipa_rmnet_res.ipa_advertise_sg_support)
2135 dev->hw_features |= NETIF_F_SG;
2136
2137 /* Enable NAPI support in netdevice. */
2138 if (ipa_rmnet_res.ipa_napi_enable) {
2139 netif_napi_add(dev, &(wwan_ptr->napi),
2140 ipa_rmnet_poll, NAPI_WEIGHT);
2141 }
2142
2143 ret = register_netdev(dev);
2144 if (ret) {
2145 IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
2146 0, ret);
2147 goto set_perf_err;
2148 }
2149
2150 IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n",
2151 ipa_netdevs[0]->name);
2152 if (ret) {
2153 IPAWANERR("default configuration failed rc=%d\n",
2154 ret);
2155 goto config_err;
2156 }
2157 atomic_set(&is_initialized, 1);
2158 if (!atomic_read(&is_ssr)) {
2159 /* offline charging mode */
2160 ipa2_proxy_clk_unvote();
2161 }
2162 atomic_set(&is_ssr, 0);
2163
2164 pr_info("rmnet_ipa completed initialization\n");
2165 return 0;
2166config_err:
2167 if (ipa_rmnet_res.ipa_napi_enable)
2168 netif_napi_del(&(wwan_ptr->napi));
2169 unregister_netdev(ipa_netdevs[0]);
2170set_perf_err:
2171 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2172 IPA_RM_RESOURCE_Q6_CONS);
2173 if (ret)
2174 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2175 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2176 ret);
2177add_dpnd_err:
2178 ret = ipa_rm_inactivity_timer_destroy(
2179 IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
2180 if (ret)
2181 IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
2182 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2183timer_init_err:
2184 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2185 if (ret)
2186 IPAWANERR("Error deleting resource %d, ret=%d\n",
2187 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2188create_rsrc_err:
2189 q6_deinitialize_rm();
2190q6_init_err:
2191 free_netdev(ipa_netdevs[0]);
2192 ipa_netdevs[0] = NULL;
2193alloc_netdev_err:
2194 wan_ioctl_deinit();
2195wan_ioctl_init_err:
2196 ipa_del_dflt_wan_rt_tables();
2197setup_dflt_wan_rt_tables_err:
2198 ipa_del_a7_qmap_hdr();
2199setup_a7_qmap_hdr_err:
2200 ipa_qmi_service_exit();
2201 atomic_set(&is_ssr, 0);
2202 return ret;
2203}
2204
2205static int ipa_wwan_remove(struct platform_device *pdev)
2206{
2207 int ret;
2208 struct wwan_private *wwan_ptr;
2209
2210 wwan_ptr = netdev_priv(ipa_netdevs[0]);
2211
2212 pr_info("rmnet_ipa started deinitialization\n");
2213 mutex_lock(&ipa_to_apps_pipe_handle_guard);
2214 ret = ipa2_teardown_sys_pipe(ipa_to_apps_hdl);
2215 if (ret < 0)
2216 IPAWANERR("Failed to teardown IPA->APPS pipe\n");
2217 else
2218 ipa_to_apps_hdl = -1;
2219 if (ipa_rmnet_res.ipa_napi_enable)
2220 netif_napi_del(&(wwan_ptr->napi));
2221 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
2222 unregister_netdev(ipa_netdevs[0]);
2223 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2224 IPA_RM_RESOURCE_Q6_CONS);
2225 if (ret < 0)
2226 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2227 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2228 ret);
2229 ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
2230 if (ret < 0)
2231 IPAWANERR(
2232 "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
2233 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2234 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2235 if (ret < 0)
2236 IPAWANERR("Error deleting resource %d, ret=%d\n",
2237 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2238 cancel_work_sync(&ipa_tx_wakequeue_work);
2239 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2240 free_netdev(ipa_netdevs[0]);
2241 ipa_netdevs[0] = NULL;
2242 /* No need to remove wwan_ioctl during SSR */
2243 if (!atomic_read(&is_ssr))
2244 wan_ioctl_deinit();
2245 ipa_del_dflt_wan_rt_tables();
2246 ipa_del_a7_qmap_hdr();
2247 ipa_del_mux_qmap_hdrs();
2248 if (ipa_qmi_ctx && ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false)
2249 wwan_del_ul_flt_rule_to_ipa();
2250 ipa_cleanup_deregister_intf();
2251 atomic_set(&is_initialized, 0);
2252 pr_info("rmnet_ipa completed deinitialization\n");
2253 return 0;
2254}
2255
2256/**
2257* rmnet_ipa_ap_suspend() - suspend callback for runtime_pm
2258* @dev: pointer to device
2259*
2260* This callback will be invoked by the runtime_pm framework when an AP suspend
2261* operation is invoked, usually by pressing a suspend button.
2262*
2263* Returns -EAGAIN to runtime_pm framework in case there are pending packets
2264* in the Tx queue. This will postpone the suspend operation until all the
2265* pending packets will be transmitted.
2266*
2267* In case there are no packets to send, releases the WWAN0_PROD entity.
2268* As an outcome, the number of IPA active clients should be decremented
2269* until IPA clocks can be gated.
2270*/
2271static int rmnet_ipa_ap_suspend(struct device *dev)
2272{
2273 struct net_device *netdev = ipa_netdevs[0];
2274 struct wwan_private *wwan_ptr = netdev_priv(netdev);
2275
2276 IPAWANDBG("Enter...\n");
2277 /* Do not allow A7 to suspend in case there are oustanding packets */
2278 if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
2279 IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
2280 return -EAGAIN;
2281 }
2282
2283 /* Make sure that there is no Tx operation ongoing */
2284 netif_tx_lock_bh(netdev);
2285 ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2286 netif_tx_unlock_bh(netdev);
2287 IPAWANDBG("Exit\n");
2288
2289 return 0;
2290}
2291
2292/**
2293* rmnet_ipa_ap_resume() - resume callback for runtime_pm
2294* @dev: pointer to device
2295*
2296* This callback will be invoked by the runtime_pm framework when an AP resume
2297* operation is invoked.
2298*
2299* Enables the network interface queue and returns success to the
2300* runtime_pm framework.
2301*/
2302static int rmnet_ipa_ap_resume(struct device *dev)
2303{
2304 struct net_device *netdev = ipa_netdevs[0];
2305
2306 IPAWANDBG("Enter...\n");
2307 netif_wake_queue(netdev);
2308 IPAWANDBG("Exit\n");
2309
2310 return 0;
2311}
2312
2313static void ipa_stop_polling_stats(void)
2314{
2315 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2316 ipa_rmnet_ctx.polling_interval = 0;
2317}
2318
2319static const struct of_device_id rmnet_ipa_dt_match[] = {
2320 {.compatible = "qcom,rmnet-ipa"},
2321 {},
2322};
2323MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
2324
2325static const struct dev_pm_ops rmnet_ipa_pm_ops = {
2326 .suspend_noirq = rmnet_ipa_ap_suspend,
2327 .resume_noirq = rmnet_ipa_ap_resume,
2328};
2329
2330static struct platform_driver rmnet_ipa_driver = {
2331 .driver = {
2332 .name = "rmnet_ipa",
2333 .owner = THIS_MODULE,
2334 .pm = &rmnet_ipa_pm_ops,
2335 .of_match_table = rmnet_ipa_dt_match,
2336 },
2337 .probe = ipa_wwan_probe,
2338 .remove = ipa_wwan_remove,
2339};
2340
Skylar Chang09e0e252017-03-20 14:51:29 -07002341/**
2342 * rmnet_ipa_send_ssr_notification(bool ssr_done) - send SSR notification
2343 *
2344 * This function sends the SSR notification before modem shutdown and
2345 * after_powerup from SSR framework, to user-space module
2346 */
2347static void rmnet_ipa_send_ssr_notification(bool ssr_done)
2348{
2349 struct ipa_msg_meta msg_meta;
2350 int rc;
2351
2352 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2353 if (ssr_done)
2354 msg_meta.msg_type = IPA_SSR_AFTER_POWERUP;
2355 else
2356 msg_meta.msg_type = IPA_SSR_BEFORE_SHUTDOWN;
2357 rc = ipa_send_msg(&msg_meta, NULL, NULL);
2358 if (rc) {
2359 IPAWANERR("ipa_send_msg failed: %d\n", rc);
2360 return;
2361 }
2362}
2363
Amir Levy9659e592016-10-27 18:08:27 +03002364static int ssr_notifier_cb(struct notifier_block *this,
2365 unsigned long code,
2366 void *data)
2367{
2368 if (ipa_rmnet_ctx.ipa_rmnet_ssr) {
2369 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2370 pr_info("IPA received MPSS BEFORE_SHUTDOWN\n");
Skylar Chang09e0e252017-03-20 14:51:29 -07002371 /* send SSR before-shutdown notification to IPACM */
2372 rmnet_ipa_send_ssr_notification(false);
Amir Levy9659e592016-10-27 18:08:27 +03002373 atomic_set(&is_ssr, 1);
2374 ipa_q6_pre_shutdown_cleanup();
2375 if (ipa_netdevs[0])
2376 netif_stop_queue(ipa_netdevs[0]);
2377 ipa_qmi_stop_workqueues();
2378 wan_ioctl_stop_qmi_messages();
2379 ipa_stop_polling_stats();
2380 if (atomic_read(&is_initialized))
2381 platform_driver_unregister(&rmnet_ipa_driver);
2382 pr_info("IPA BEFORE_SHUTDOWN handling is complete\n");
2383 return NOTIFY_DONE;
2384 }
2385 if (code == SUBSYS_AFTER_SHUTDOWN) {
2386 pr_info("IPA received MPSS AFTER_SHUTDOWN\n");
2387 if (atomic_read(&is_ssr))
2388 ipa_q6_post_shutdown_cleanup();
2389 pr_info("IPA AFTER_SHUTDOWN handling is complete\n");
2390 return NOTIFY_DONE;
2391 }
2392 if (code == SUBSYS_AFTER_POWERUP) {
2393 pr_info("IPA received MPSS AFTER_POWERUP\n");
2394 if (!atomic_read(&is_initialized)
2395 && atomic_read(&is_ssr))
2396 platform_driver_register(&rmnet_ipa_driver);
2397 pr_info("IPA AFTER_POWERUP handling is complete\n");
2398 return NOTIFY_DONE;
2399 }
2400 if (code == SUBSYS_BEFORE_POWERUP) {
2401 pr_info("IPA received MPSS BEFORE_POWERUP\n");
2402 if (atomic_read(&is_ssr))
2403 /* clean up cached QMI msg/handlers */
2404 ipa_qmi_service_exit();
2405 ipa2_proxy_clk_vote();
2406 pr_info("IPA BEFORE_POWERUP handling is complete\n");
2407 return NOTIFY_DONE;
2408 }
2409 }
2410 return NOTIFY_DONE;
2411}
2412
2413/**
2414 * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa2_send_msg
2415 * @buff: pointer to buffer containing the message
2416 * @len: message len
2417 * @type: message type
2418 *
2419 * This function is invoked when ipa2_send_msg is complete (Provided as a
2420 * free function pointer along with the message).
2421 */
2422static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
2423{
2424 if (!buff) {
2425 IPAWANERR("Null buffer\n");
2426 return;
2427 }
2428
2429 if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
2430 type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
2431 IPAWANERR("Wrong type given. buff %p type %d\n",
2432 buff, type);
2433 }
2434 kfree(buff);
2435}
2436
2437/**
2438 * rmnet_ipa_get_stats_and_update(bool reset) - Gets pipe stats from Modem
2439 *
2440 * This function queries the IPA Modem driver for the pipe stats
2441 * via QMI, and updates the user space IPA entity.
2442 */
2443static void rmnet_ipa_get_stats_and_update(bool reset)
2444{
2445 struct ipa_get_data_stats_req_msg_v01 req;
2446 struct ipa_get_data_stats_resp_msg_v01 *resp;
2447 struct ipa_msg_meta msg_meta;
2448 int rc;
2449
2450 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2451 GFP_KERNEL);
2452 if (!resp) {
2453 IPAWANERR("Can't allocate memory for stats message\n");
2454 return;
2455 }
2456
2457 memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2458 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2459
2460 req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2461 if (reset == true) {
2462 req.reset_stats_valid = true;
2463 req.reset_stats = true;
2464 IPAWANERR("Get the latest pipe-stats and reset it\n");
2465 }
2466
2467 rc = ipa_qmi_get_data_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002468 if (rc) {
2469 IPAWANERR("ipa_qmi_get_data_stats failed: %d\n", rc);
2470 kfree(resp);
2471 return;
2472 }
Amir Levy9659e592016-10-27 18:08:27 +03002473
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002474 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2475 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
2476 msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01);
2477 rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2478 if (rc) {
2479 IPAWANERR("ipa2_send_msg failed: %d\n", rc);
2480 kfree(resp);
2481 return;
Amir Levy9659e592016-10-27 18:08:27 +03002482 }
2483}
2484
2485/**
2486 * tethering_stats_poll_queue() - Stats polling function
2487 * @work - Work entry
2488 *
2489 * This function is scheduled periodically (per the interval) in
2490 * order to poll the IPA Modem driver for the pipe stats.
2491 */
2492static void tethering_stats_poll_queue(struct work_struct *work)
2493{
2494 rmnet_ipa_get_stats_and_update(false);
2495
2496 /* Schedule again only if there's an active polling interval */
2497 if (ipa_rmnet_ctx.polling_interval != 0)
2498 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work,
2499 msecs_to_jiffies(ipa_rmnet_ctx.polling_interval*1000));
2500}
2501
2502/**
2503 * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem
2504 *
2505 * This function retrieves the data usage (used quota) from the IPA Modem driver
2506 * via QMI, and updates IPA user space entity.
2507 */
2508static void rmnet_ipa_get_network_stats_and_update(void)
2509{
2510 struct ipa_get_apn_data_stats_req_msg_v01 req;
2511 struct ipa_get_apn_data_stats_resp_msg_v01 *resp;
2512 struct ipa_msg_meta msg_meta;
2513 int rc;
2514
2515 resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
2516 GFP_KERNEL);
2517 if (!resp) {
2518 IPAWANERR("Can't allocate memory for network stats message\n");
2519 return;
2520 }
2521
2522 memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01));
2523 memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01));
2524
2525 req.mux_id_list_valid = true;
2526 req.mux_id_list_len = 1;
2527 req.mux_id_list[0] = ipa_rmnet_ctx.metered_mux_id;
2528
2529 rc = ipa_qmi_get_network_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002530 if (rc) {
2531 IPAWANERR("ipa_qmi_get_network_stats failed %d\n", rc);
2532 kfree(resp);
2533 return;
2534 }
Amir Levy9659e592016-10-27 18:08:27 +03002535
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002536 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2537 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
2538 msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
2539 rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2540 if (rc) {
2541 IPAWANERR("ipa2_send_msg failed: %d\n", rc);
2542 kfree(resp);
2543 return;
Amir Levy9659e592016-10-27 18:08:27 +03002544 }
2545}
2546
2547/**
Skylar Chang09e0e252017-03-20 14:51:29 -07002548 * rmnet_ipa_send_quota_reach_ind() - send quota_reach notification from
2549 * IPA Modem
2550 * This function sends the quota_reach indication from the IPA Modem driver
2551 * via QMI, to user-space module
2552 */
2553static void rmnet_ipa_send_quota_reach_ind(void)
2554{
2555 struct ipa_msg_meta msg_meta;
2556 int rc;
2557
2558 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2559 msg_meta.msg_type = IPA_QUOTA_REACH;
2560 rc = ipa_send_msg(&msg_meta, NULL, NULL);
2561 if (rc) {
2562 IPAWANERR("ipa_send_msg failed: %d\n", rc);
2563 return;
2564 }
2565}
2566
2567/**
Amir Levy9659e592016-10-27 18:08:27 +03002568 * rmnet_ipa_poll_tethering_stats() - Tethering stats polling IOCTL handler
2569 * @data - IOCTL data
2570 *
2571 * This function handles WAN_IOC_POLL_TETHERING_STATS.
2572 * In case polling interval received is 0, polling will stop
2573 * (If there's a polling in progress, it will allow it to finish), and then will
2574 * fetch network stats, and update the IPA user space.
2575 *
2576 * Return codes:
2577 * 0: Success
2578 */
2579int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
2580{
2581 ipa_rmnet_ctx.polling_interval = data->polling_interval_secs;
2582
2583 cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work);
2584
2585 if (ipa_rmnet_ctx.polling_interval == 0) {
2586 ipa_qmi_stop_data_qouta();
2587 rmnet_ipa_get_network_stats_and_update();
2588 rmnet_ipa_get_stats_and_update(true);
2589 return 0;
2590 }
2591
2592 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0);
2593 return 0;
2594}
2595
2596/**
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302597 * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler
Amir Levy9659e592016-10-27 18:08:27 +03002598 * @data - IOCTL data
2599 *
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302600 * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface.
Amir Levy9659e592016-10-27 18:08:27 +03002601 * It translates the given interface name to the Modem MUX ID and
2602 * sends the request of the quota to the IPA Modem driver via QMI.
2603 *
2604 * Return codes:
2605 * 0: Success
2606 * -EFAULT: Invalid interface name provided
2607 * other: See ipa_qmi_set_data_quota
2608 */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302609static int rmnet_ipa_set_data_quota_modem(struct wan_ioctl_set_data_quota *data)
Amir Levy9659e592016-10-27 18:08:27 +03002610{
2611 u32 mux_id;
2612 int index;
2613 struct ipa_set_data_usage_quota_req_msg_v01 req;
2614
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302615 /* stop quota */
2616 if (!data->set_quota)
2617 ipa_qmi_stop_data_qouta();
2618
Skylar Changcde17ed2017-06-21 16:51:26 -07002619 /* prevent string buffer overflows */
2620 data->interface_name[IFNAMSIZ-1] = '\0';
2621
Amir Levy9659e592016-10-27 18:08:27 +03002622 index = find_vchannel_name_index(data->interface_name);
2623 IPAWANERR("iface name %s, quota %lu\n",
2624 data->interface_name,
2625 (unsigned long int) data->quota_mbytes);
2626
2627 if (index == MAX_NUM_OF_MUX_CHANNEL) {
2628 IPAWANERR("%s is an invalid iface name\n",
2629 data->interface_name);
2630 return -EFAULT;
2631 }
2632
2633 mux_id = mux_channel[index].mux_id;
2634
2635 ipa_rmnet_ctx.metered_mux_id = mux_id;
2636
2637 memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
2638 req.apn_quota_list_valid = true;
2639 req.apn_quota_list_len = 1;
2640 req.apn_quota_list[0].mux_id = mux_id;
2641 req.apn_quota_list[0].num_Mbytes = data->quota_mbytes;
2642
2643 return ipa_qmi_set_data_quota(&req);
2644}
2645
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302646static int rmnet_ipa_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
2647{
2648 struct ipa_set_wifi_quota wifi_quota;
2649 int rc = 0;
2650
2651 memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota));
2652 wifi_quota.set_quota = data->set_quota;
2653 wifi_quota.quota_bytes = data->quota_mbytes;
2654 IPAWANDBG("iface name %s, quota %lu\n",
2655 data->interface_name,
2656 (unsigned long int) data->quota_mbytes);
2657
2658 rc = ipa2_set_wlan_quota(&wifi_quota);
2659 /* check if wlan-fw takes this quota-set */
2660 if (!wifi_quota.set_valid)
2661 rc = -EFAULT;
2662 return rc;
2663}
2664
2665/**
2666 * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
2667 * @data - IOCTL data
2668 *
2669 * This function handles WAN_IOC_SET_DATA_QUOTA.
2670 * It translates the given interface name to the Modem MUX ID and
2671 * sends the request of the quota to the IPA Modem driver via QMI.
2672 *
2673 * Return codes:
2674 * 0: Success
2675 * -EFAULT: Invalid interface name provided
2676 * other: See ipa_qmi_set_data_quota
2677 */
2678int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data)
2679{
2680 enum ipa_upstream_type upstream_type;
2681 int rc = 0;
2682
2683 /* get IPA backhaul type */
2684 upstream_type = find_upstream_type(data->interface_name);
2685
2686 if (upstream_type == IPA_UPSTEAM_MAX) {
2687 IPAWANERR("upstream iface %s not supported\n",
2688 data->interface_name);
2689 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2690 rc = rmnet_ipa_set_data_quota_wifi(data);
2691 if (rc) {
2692 IPAWANERR("set quota on wifi failed\n");
2693 return rc;
2694 }
2695 } else {
2696 rc = rmnet_ipa_set_data_quota_modem(data);
2697 if (rc) {
2698 IPAWANERR("set quota on modem failed\n");
2699 return rc;
2700 }
2701 }
2702 return rc;
2703}
2704
Amir Levy9659e592016-10-27 18:08:27 +03002705 /* rmnet_ipa_set_tether_client_pipe() -
2706 * @data - IOCTL data
2707 *
2708 * This function handles WAN_IOC_SET_DATA_QUOTA.
2709 * It translates the given interface name to the Modem MUX ID and
2710 * sends the request of the quota to the IPA Modem driver via QMI.
2711 *
2712 * Return codes:
2713 * 0: Success
Skylar Chang345c8142016-11-30 14:41:24 -08002714 * -EFAULT: Invalid src/dst pipes provided
Amir Levy9659e592016-10-27 18:08:27 +03002715 * other: See ipa_qmi_set_data_quota
2716 */
2717int rmnet_ipa_set_tether_client_pipe(
2718 struct wan_ioctl_set_tether_client_pipe *data)
2719{
2720 int number, i;
2721
Skylar Chang345c8142016-11-30 14:41:24 -08002722 /* error checking if ul_src_pipe_len valid or not*/
2723 if (data->ul_src_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
2724 data->ul_src_pipe_len < 0) {
2725 IPAWANERR("UL src pipes %d exceeding max %d\n",
2726 data->ul_src_pipe_len,
2727 QMI_IPA_MAX_PIPES_V01);
2728 return -EFAULT;
2729 }
2730 /* error checking if dl_dst_pipe_len valid or not*/
2731 if (data->dl_dst_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
2732 data->dl_dst_pipe_len < 0) {
2733 IPAWANERR("DL dst pipes %d exceeding max %d\n",
2734 data->dl_dst_pipe_len,
2735 QMI_IPA_MAX_PIPES_V01);
2736 return -EFAULT;
2737 }
2738
Amir Levy9659e592016-10-27 18:08:27 +03002739 IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
2740 data->ipa_client,
2741 data->ul_src_pipe_len,
2742 data->dl_dst_pipe_len,
2743 data->reset_client);
2744 number = data->ul_src_pipe_len;
2745 for (i = 0; i < number; i++) {
2746 IPAWANDBG("UL index-%d pipe %d\n", i,
2747 data->ul_src_pipe_list[i]);
2748 if (data->reset_client)
2749 ipa_set_client(data->ul_src_pipe_list[i],
2750 0, false);
2751 else
2752 ipa_set_client(data->ul_src_pipe_list[i],
2753 data->ipa_client, true);
2754 }
2755 number = data->dl_dst_pipe_len;
2756 for (i = 0; i < number; i++) {
2757 IPAWANDBG("DL index-%d pipe %d\n", i,
2758 data->dl_dst_pipe_list[i]);
2759 if (data->reset_client)
2760 ipa_set_client(data->dl_dst_pipe_list[i],
2761 0, false);
2762 else
2763 ipa_set_client(data->dl_dst_pipe_list[i],
2764 data->ipa_client, false);
2765 }
2766 return 0;
2767}
2768
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302769static int rmnet_ipa_query_tethering_stats_wifi(
2770 struct wan_ioctl_query_tether_stats *data, bool reset)
2771{
2772 struct ipa_get_wdi_sap_stats *sap_stats;
2773 int rc;
2774
2775 sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats),
2776 GFP_KERNEL);
2777 if (!sap_stats)
2778 return -ENOMEM;
2779
2780 sap_stats->reset_stats = reset;
2781 IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats);
2782
2783 rc = ipa2_get_wlan_stats(sap_stats);
2784 if (rc) {
2785 kfree(sap_stats);
2786 return rc;
2787 } else if (reset) {
2788 kfree(sap_stats);
2789 return 0;
2790 }
2791
2792 if (sap_stats->stats_valid) {
2793 data->ipv4_tx_packets = sap_stats->ipv4_tx_packets;
2794 data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes;
2795 data->ipv4_rx_packets = sap_stats->ipv4_rx_packets;
2796 data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes;
2797 data->ipv6_tx_packets = sap_stats->ipv6_tx_packets;
2798 data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes;
2799 data->ipv6_rx_packets = sap_stats->ipv6_rx_packets;
2800 data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes;
2801 }
2802
2803 IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2804 (unsigned long int) data->ipv4_rx_packets,
2805 (unsigned long int) data->ipv6_rx_packets,
2806 (unsigned long int) data->ipv4_rx_bytes,
2807 (unsigned long int) data->ipv6_rx_bytes);
2808 IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2809 (unsigned long int) data->ipv4_tx_packets,
2810 (unsigned long int) data->ipv6_tx_packets,
2811 (unsigned long int) data->ipv4_tx_bytes,
2812 (unsigned long int) data->ipv6_tx_bytes);
2813
2814 kfree(sap_stats);
2815 return rc;
2816}
2817
2818int rmnet_ipa_query_tethering_stats_modem(
2819 struct wan_ioctl_query_tether_stats *data,
2820 bool reset
2821)
Amir Levy9659e592016-10-27 18:08:27 +03002822{
2823 struct ipa_get_data_stats_req_msg_v01 *req;
2824 struct ipa_get_data_stats_resp_msg_v01 *resp;
2825 int pipe_len, rc;
2826
2827 req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
2828 GFP_KERNEL);
2829 if (!req) {
2830 IPAWANERR("failed to allocate memory for stats message\n");
2831 return -ENOMEM;
2832 }
2833 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2834 GFP_KERNEL);
2835 if (!resp) {
2836 IPAWANERR("failed to allocate memory for stats message\n");
2837 kfree(req);
2838 return -ENOMEM;
2839 }
2840 memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2841 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2842
2843 req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2844 if (reset) {
2845 req->reset_stats_valid = true;
2846 req->reset_stats = true;
2847 IPAWANERR("reset the pipe stats\n");
2848 } else {
2849 /* print tethered-client enum */
2850 IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
2851 }
2852
2853 rc = ipa_qmi_get_data_stats(req, resp);
2854 if (rc) {
2855 IPAWANERR("can't get ipa_qmi_get_data_stats\n");
2856 kfree(req);
2857 kfree(resp);
2858 return rc;
Amir Levy9659e592016-10-27 18:08:27 +03002859 }
2860
2861 if (resp->dl_dst_pipe_stats_list_valid) {
2862 for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len;
2863 pipe_len++) {
2864 IPAWANDBG("Check entry(%d) dl_dst_pipe(%d)\n",
2865 pipe_len, resp->dl_dst_pipe_stats_list
2866 [pipe_len].pipe_index);
2867 IPAWANDBG("dl_p_v4(%lu)v6(%lu) dl_b_v4(%lu)v6(%lu)\n",
2868 (unsigned long int) resp->
2869 dl_dst_pipe_stats_list[pipe_len].
2870 num_ipv4_packets,
2871 (unsigned long int) resp->
2872 dl_dst_pipe_stats_list[pipe_len].
2873 num_ipv6_packets,
2874 (unsigned long int) resp->
2875 dl_dst_pipe_stats_list[pipe_len].
2876 num_ipv4_bytes,
2877 (unsigned long int) resp->
2878 dl_dst_pipe_stats_list[pipe_len].
2879 num_ipv6_bytes);
2880 if (ipa_get_client_uplink(resp->
2881 dl_dst_pipe_stats_list[pipe_len].
2882 pipe_index) == false) {
2883 if (data->ipa_client == ipa_get_client(resp->
2884 dl_dst_pipe_stats_list[pipe_len].
2885 pipe_index)) {
2886 /* update the DL stats */
2887 data->ipv4_rx_packets += resp->
2888 dl_dst_pipe_stats_list[pipe_len].
2889 num_ipv4_packets;
2890 data->ipv6_rx_packets += resp->
2891 dl_dst_pipe_stats_list[pipe_len].
2892 num_ipv6_packets;
2893 data->ipv4_rx_bytes += resp->
2894 dl_dst_pipe_stats_list[pipe_len].
2895 num_ipv4_bytes;
2896 data->ipv6_rx_bytes += resp->
2897 dl_dst_pipe_stats_list[pipe_len].
2898 num_ipv6_bytes;
2899 }
2900 }
2901 }
2902 }
2903 IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2904 (unsigned long int) data->ipv4_rx_packets,
2905 (unsigned long int) data->ipv6_rx_packets,
2906 (unsigned long int) data->ipv4_rx_bytes,
2907 (unsigned long int) data->ipv6_rx_bytes);
2908
2909 if (resp->ul_src_pipe_stats_list_valid) {
2910 for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len;
2911 pipe_len++) {
2912 IPAWANDBG("Check entry(%d) ul_dst_pipe(%d)\n",
2913 pipe_len,
2914 resp->ul_src_pipe_stats_list[pipe_len].
2915 pipe_index);
2916 IPAWANDBG("ul_p_v4(%lu)v6(%lu)ul_b_v4(%lu)v6(%lu)\n",
2917 (unsigned long int) resp->
2918 ul_src_pipe_stats_list[pipe_len].
2919 num_ipv4_packets,
2920 (unsigned long int) resp->
2921 ul_src_pipe_stats_list[pipe_len].
2922 num_ipv6_packets,
2923 (unsigned long int) resp->
2924 ul_src_pipe_stats_list[pipe_len].
2925 num_ipv4_bytes,
2926 (unsigned long int) resp->
2927 ul_src_pipe_stats_list[pipe_len].
2928 num_ipv6_bytes);
2929 if (ipa_get_client_uplink(resp->
2930 ul_src_pipe_stats_list[pipe_len].
2931 pipe_index) == true) {
2932 if (data->ipa_client == ipa_get_client(resp->
2933 ul_src_pipe_stats_list[pipe_len].
2934 pipe_index)) {
2935 /* update the DL stats */
2936 data->ipv4_tx_packets += resp->
2937 ul_src_pipe_stats_list[pipe_len].
2938 num_ipv4_packets;
2939 data->ipv6_tx_packets += resp->
2940 ul_src_pipe_stats_list[pipe_len].
2941 num_ipv6_packets;
2942 data->ipv4_tx_bytes += resp->
2943 ul_src_pipe_stats_list[pipe_len].
2944 num_ipv4_bytes;
2945 data->ipv6_tx_bytes += resp->
2946 ul_src_pipe_stats_list[pipe_len].
2947 num_ipv6_bytes;
2948 }
2949 }
2950 }
2951 }
2952 IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2953 (unsigned long int) data->ipv4_tx_packets,
2954 (unsigned long int) data->ipv6_tx_packets,
2955 (unsigned long int) data->ipv4_tx_bytes,
2956 (unsigned long int) data->ipv6_tx_bytes);
2957 kfree(req);
2958 kfree(resp);
2959 return 0;
2960}
2961
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302962int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
2963 bool reset)
2964{
2965 enum ipa_upstream_type upstream_type;
2966 int rc = 0;
2967
2968 /* get IPA backhaul type */
2969 upstream_type = find_upstream_type(data->upstreamIface);
2970
2971 if (upstream_type == IPA_UPSTEAM_MAX) {
2972 IPAWANERR("upstreamIface %s not supported\n",
2973 data->upstreamIface);
2974 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2975 IPAWANDBG_LOW(" query wifi-backhaul stats\n");
2976 rc = rmnet_ipa_query_tethering_stats_wifi(
2977 data, false);
2978 if (rc) {
2979 IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
2980 return rc;
2981 }
2982 } else {
2983 IPAWANDBG_LOW(" query modem-backhaul stats\n");
2984 rc = rmnet_ipa_query_tethering_stats_modem(
2985 data, false);
2986 if (rc) {
2987 IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
2988 return rc;
2989 }
2990 }
2991 return rc;
2992}
2993
Skylar Chang09e0e252017-03-20 14:51:29 -07002994int rmnet_ipa_query_tethering_stats_all(
2995 struct wan_ioctl_query_tether_stats_all *data)
2996{
2997 struct wan_ioctl_query_tether_stats tether_stats;
2998 enum ipa_upstream_type upstream_type;
2999 int rc = 0;
3000
3001 memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
3002 /* get IPA backhaul type */
3003 upstream_type = find_upstream_type(data->upstreamIface);
3004
3005 if (upstream_type == IPA_UPSTEAM_MAX) {
3006 IPAWANERR(" Wrong upstreamIface name %s\n",
3007 data->upstreamIface);
3008 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
3009 IPAWANDBG_LOW(" query wifi-backhaul stats\n");
3010 rc = rmnet_ipa_query_tethering_stats_wifi(
3011 &tether_stats, data->reset_stats);
3012 if (rc) {
3013 IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
3014 return rc;
3015 }
3016 data->tx_bytes = tether_stats.ipv4_tx_bytes
3017 + tether_stats.ipv6_tx_bytes;
3018 data->rx_bytes = tether_stats.ipv4_rx_bytes
3019 + tether_stats.ipv6_rx_bytes;
3020 } else {
3021 IPAWANDBG_LOW(" query modem-backhaul stats\n");
3022 tether_stats.ipa_client = data->ipa_client;
3023 rc = rmnet_ipa_query_tethering_stats_modem(
3024 &tether_stats, data->reset_stats);
3025 if (rc) {
3026 IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
3027 return rc;
3028 }
3029 data->tx_bytes = tether_stats.ipv4_tx_bytes
3030 + tether_stats.ipv6_tx_bytes;
3031 data->rx_bytes = tether_stats.ipv4_rx_bytes
3032 + tether_stats.ipv6_rx_bytes;
3033 }
3034 return rc;
3035}
3036
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303037int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
3038{
3039 enum ipa_upstream_type upstream_type;
3040 int rc = 0;
3041
3042 /* get IPA backhaul type */
3043 upstream_type = find_upstream_type(data->upstreamIface);
3044
3045 if (upstream_type == IPA_UPSTEAM_MAX) {
3046 IPAWANERR("upstream iface %s not supported\n",
3047 data->upstreamIface);
3048 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
3049 IPAWANDBG(" reset wifi-backhaul stats\n");
3050 rc = rmnet_ipa_query_tethering_stats_wifi(
3051 NULL, true);
3052 if (rc) {
3053 IPAWANERR("reset WLAN stats failed\n");
3054 return rc;
3055 }
3056 } else {
3057 IPAWANDBG(" reset modem-backhaul stats\n");
3058 rc = rmnet_ipa_query_tethering_stats_modem(
3059 NULL, true);
3060 if (rc) {
3061 IPAWANERR("reset MODEM stats failed\n");
3062 return rc;
3063 }
3064 }
3065 return rc;
3066}
3067
3068
Amir Levy9659e592016-10-27 18:08:27 +03003069/**
3070 * ipa_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
3071 * @mux_id - The MUX ID on which the quota has been reached
3072 *
3073 * This function broadcasts a Netlink event using the kobject of the
3074 * rmnet_ipa interface in order to alert the user space that the quota
3075 * on the specific interface which matches the mux_id has been reached.
3076 *
3077 */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303078void ipa_broadcast_quota_reach_ind(u32 mux_id,
3079 enum ipa_upstream_type upstream_type)
Amir Levy9659e592016-10-27 18:08:27 +03003080{
3081 char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
3082 char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
3083 char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
3084 char *envp[IPA_UEVENT_NUM_EVNP] = {
3085 alert_msg, iface_name_l, iface_name_m, NULL };
3086 int res;
3087 int index;
3088
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303089 /* check upstream_type*/
3090 if (upstream_type == IPA_UPSTEAM_MAX) {
3091 IPAWANERR("upstreamIface type %d not supported\n",
3092 upstream_type);
Amir Levy9659e592016-10-27 18:08:27 +03003093 return;
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303094 } else if (upstream_type == IPA_UPSTEAM_MODEM) {
3095 index = find_mux_channel_index(mux_id);
3096 if (index == MAX_NUM_OF_MUX_CHANNEL) {
3097 IPAWANERR("%u is an mux ID\n", mux_id);
3098 return;
3099 }
Amir Levy9659e592016-10-27 18:08:27 +03003100 }
3101
3102 res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
3103 "ALERT_NAME=%s", "quotaReachedAlert");
3104 if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
3105 IPAWANERR("message too long (%d)", res);
3106 return;
3107 }
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303108
Amir Levy9659e592016-10-27 18:08:27 +03003109 /* posting msg for L-release for CNE */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303110 if (upstream_type == IPA_UPSTEAM_MODEM) {
Amir Levy9659e592016-10-27 18:08:27 +03003111 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303112 "UPSTREAM=%s", mux_channel[index].vchannel_name);
3113 } else {
3114 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
3115 "UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
3116 }
Amir Levy9659e592016-10-27 18:08:27 +03003117 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
3118 IPAWANERR("message too long (%d)", res);
3119 return;
3120 }
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303121
Amir Levy9659e592016-10-27 18:08:27 +03003122 /* posting msg for M-release for CNE */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303123 if (upstream_type == IPA_UPSTEAM_MODEM) {
Amir Levy9659e592016-10-27 18:08:27 +03003124 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303125 "INTERFACE=%s", mux_channel[index].vchannel_name);
3126 } else {
3127 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
3128 "INTERFACE=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
3129 }
Amir Levy9659e592016-10-27 18:08:27 +03003130 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
3131 IPAWANERR("message too long (%d)", res);
3132 return;
3133 }
3134
3135 IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
3136 alert_msg, iface_name_l, iface_name_m);
3137 kobject_uevent_env(&(ipa_netdevs[0]->dev.kobj), KOBJ_CHANGE, envp);
Skylar Chang09e0e252017-03-20 14:51:29 -07003138
3139 rmnet_ipa_send_quota_reach_ind();
Amir Levy9659e592016-10-27 18:08:27 +03003140}
3141
3142/**
3143 * ipa_q6_handshake_complete() - Perform operations once Q6 is up
3144 * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR.
3145 *
3146 * This function is invoked once the handshake between the IPA AP driver
3147 * and IPA Q6 driver is complete. At this point, it is possible to perform
3148 * operations which can't be performed until IPA Q6 driver is up.
3149 *
3150 */
3151void ipa_q6_handshake_complete(bool ssr_bootup)
3152{
3153 /* It is required to recover the network stats after SSR recovery */
3154 if (ssr_bootup) {
3155 /*
3156 * In case the uC is required to be loaded by the Modem,
3157 * the proxy vote will be removed only when uC loading is
3158 * complete and indication is received by the AP. After SSR,
3159 * uC is already loaded. Therefore, proxy vote can be removed
3160 * once Modem init is complete.
3161 */
3162 ipa2_proxy_clk_unvote();
3163
Skylar Chang09e0e252017-03-20 14:51:29 -07003164 /* send SSR power-up notification to IPACM */
3165 rmnet_ipa_send_ssr_notification(true);
3166
Amir Levy9659e592016-10-27 18:08:27 +03003167 /*
3168 * It is required to recover the network stats after
3169 * SSR recovery
3170 */
3171 rmnet_ipa_get_network_stats_and_update();
3172
3173 /* Enable holb monitoring on Q6 pipes. */
3174 ipa_q6_monitor_holb_mitigation(true);
3175 }
3176}
3177
3178static int __init ipa_wwan_init(void)
3179{
3180 atomic_set(&is_initialized, 0);
3181 atomic_set(&is_ssr, 0);
3182
3183 mutex_init(&ipa_to_apps_pipe_handle_guard);
Skylar Chang8438ba52017-03-15 21:27:35 -07003184 mutex_init(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03003185 ipa_to_apps_hdl = -1;
3186
3187 ipa_qmi_init();
3188
3189 /* Register for Modem SSR */
3190 subsys_notify_handle = subsys_notif_register_notifier(SUBSYS_MODEM,
3191 &ssr_notifier);
3192 if (!IS_ERR(subsys_notify_handle))
3193 return platform_driver_register(&rmnet_ipa_driver);
3194 else
3195 return (int)PTR_ERR(subsys_notify_handle);
3196}
3197
3198static void __exit ipa_wwan_cleanup(void)
3199{
3200 int ret;
3201
3202 ipa_qmi_cleanup();
3203 mutex_destroy(&ipa_to_apps_pipe_handle_guard);
Skylar Chang8438ba52017-03-15 21:27:35 -07003204 mutex_destroy(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03003205 ret = subsys_notif_unregister_notifier(subsys_notify_handle,
3206 &ssr_notifier);
3207 if (ret)
3208 IPAWANERR(
3209 "Error subsys_notif_unregister_notifier system %s, ret=%d\n",
3210 SUBSYS_MODEM, ret);
3211 platform_driver_unregister(&rmnet_ipa_driver);
3212}
3213
3214static void ipa_wwan_msg_free_cb(void *buff, u32 len, u32 type)
3215{
3216 if (!buff)
3217 IPAWANERR("Null buffer.\n");
3218 kfree(buff);
3219}
3220
3221static void ipa_rmnet_rx_cb(void *priv)
3222{
3223 struct net_device *dev = priv;
3224 struct wwan_private *wwan_ptr;
3225
3226 IPAWANDBG("\n");
3227
3228 if (dev != ipa_netdevs[0]) {
3229 IPAWANERR("Not matching with netdev\n");
3230 return;
3231 }
3232
3233 wwan_ptr = netdev_priv(dev);
3234 napi_schedule(&(wwan_ptr->napi));
3235}
3236
3237static int ipa_rmnet_poll(struct napi_struct *napi, int budget)
3238{
3239 int rcvd_pkts = 0;
3240
3241 rcvd_pkts = ipa_rx_poll(ipa_to_apps_hdl, NAPI_WEIGHT);
3242 IPAWANDBG("rcvd packets: %d\n", rcvd_pkts);
3243 return rcvd_pkts;
3244}
3245
3246late_initcall(ipa_wwan_init);
3247module_exit(ipa_wwan_cleanup);
3248MODULE_DESCRIPTION("WWAN Network Interface");
3249MODULE_LICENSE("GPL v2");