blob: 2f272d2cb3d793d462f46261938c3eff6cfb2f80 [file] [log] [blame]
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * WWAN Transport Network Driver.
15 */
16
17#include <linux/completion.h>
18#include <linux/errno.h>
19#include <linux/if_arp.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/of_device.h>
26#include <linux/string.h>
27#include <linux/skbuff.h>
28#include <linux/version.h>
29#include <linux/workqueue.h>
30#include <net/pkt_sched.h>
31#include <soc/qcom/subsystem_restart.h>
32#include <soc/qcom/subsystem_notif.h>
33#include "ipa_qmi_service.h"
34#include <linux/rmnet_ipa_fd_ioctl.h>
35#include <linux/ipa.h>
36#include <uapi/linux/net_map.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020037#include <uapi/linux/msm_rmnet.h>
38#include <net/rmnet_config.h>
Amir Levy9659e592016-10-27 18:08:27 +030039
40#include "ipa_trace.h"
41
42#define WWAN_METADATA_SHFT 24
43#define WWAN_METADATA_MASK 0xFF000000
44#define WWAN_DATA_LEN 2000
45#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
46#define HEADROOM_FOR_QMAP 8 /* for mux header */
47#define TAILROOM 0 /* for padding by mux layer */
48#define MAX_NUM_OF_MUX_CHANNEL 10 /* max mux channels */
49#define UL_FILTER_RULE_HANDLE_START 69
50#define DEFAULT_OUTSTANDING_HIGH_CTL 96
51#define DEFAULT_OUTSTANDING_HIGH 64
52#define DEFAULT_OUTSTANDING_LOW 32
53
54#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +053055#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0"
56
Amir Levy9659e592016-10-27 18:08:27 +030057#define IPA_WWAN_DEVICE_COUNT (1)
58
59#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
60
61#define INVALID_MUX_ID 0xFF
62#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
63#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
64#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
65
66#define NAPI_WEIGHT 60
Sunil Paidimarri226cf032016-10-14 13:33:08 -070067#define IPA_WWAN_CONS_DESC_FIFO_SZ 1024
Amir Levy9659e592016-10-27 18:08:27 +030068
69static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
70static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
71static u32 qmap_hdr_hdl, dflt_v4_wan_rt_hdl, dflt_v6_wan_rt_hdl;
72static struct rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
73static int num_q6_rule, old_num_q6_rule;
74static int rmnet_index;
75static bool egress_set, a7_ul_flt_set;
76static struct workqueue_struct *ipa_rm_q6_workqueue; /* IPA_RM workqueue*/
77static atomic_t is_initialized;
78static atomic_t is_ssr;
79static void *subsys_notify_handle;
80
81u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */
82static struct mutex ipa_to_apps_pipe_handle_guard;
Skylar Chang8438ba52017-03-15 21:27:35 -070083static struct mutex add_mux_channel_lock;
Amir Levy9659e592016-10-27 18:08:27 +030084static int wwan_add_ul_flt_rule_to_ipa(void);
85static int wwan_del_ul_flt_rule_to_ipa(void);
86static void ipa_wwan_msg_free_cb(void*, u32, u32);
87static void ipa_rmnet_rx_cb(void *priv);
88static int ipa_rmnet_poll(struct napi_struct *napi, int budget);
89
90static void wake_tx_queue(struct work_struct *work);
91static DECLARE_WORK(ipa_tx_wakequeue_work, wake_tx_queue);
92
93static void tethering_stats_poll_queue(struct work_struct *work);
94static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work,
95 tethering_stats_poll_queue);
96
97enum wwan_device_status {
98 WWAN_DEVICE_INACTIVE = 0,
99 WWAN_DEVICE_ACTIVE = 1
100};
101
102struct ipa_rmnet_plat_drv_res {
103 bool ipa_rmnet_ssr;
104 bool ipa_loaduC;
105 bool ipa_advertise_sg_support;
106 bool ipa_napi_enable;
Sunil Paidimarri226cf032016-10-14 13:33:08 -0700107 u32 wan_rx_desc_size;
Amir Levy9659e592016-10-27 18:08:27 +0300108};
109
110static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
111/**
112 * struct wwan_private - WWAN private data
113 * @net: network interface struct implemented by this driver
114 * @stats: iface statistics
115 * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
116 * @outstanding_high: number of outstanding packets allowed
117 * @outstanding_low: number of outstanding packets which shall cause
118 * @ch_id: channel id
119 * @lock: spinlock for mutual exclusion
120 * @device_status: holds device status
121 *
122 * WWAN private - holds all relevant info about WWAN driver
123 */
124struct wwan_private {
125 struct net_device *net;
126 struct net_device_stats stats;
127 atomic_t outstanding_pkts;
128 int outstanding_high_ctl;
129 int outstanding_high;
130 int outstanding_low;
131 uint32_t ch_id;
132 spinlock_t lock;
133 struct completion resource_granted_completion;
134 enum wwan_device_status device_status;
135 struct napi_struct napi;
136};
137
138/**
139* ipa_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
140*
141* Return codes:
142* 0: success
143* -ENOMEM: failed to allocate memory
144* -EPERM: failed to add the tables
145*/
146static int ipa_setup_a7_qmap_hdr(void)
147{
148 struct ipa_ioc_add_hdr *hdr;
149 struct ipa_hdr_add *hdr_entry;
150 u32 pyld_sz;
151 int ret;
152
153 /* install the basic exception header */
154 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
155 sizeof(struct ipa_hdr_add);
156 hdr = kzalloc(pyld_sz, GFP_KERNEL);
157 if (!hdr) {
158 IPAWANERR("fail to alloc exception hdr\n");
159 return -ENOMEM;
160 }
161 hdr->num_hdrs = 1;
162 hdr->commit = 1;
163 hdr_entry = &hdr->hdr[0];
164
165 strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
166 IPA_RESOURCE_NAME_MAX);
167 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
168
169 if (ipa2_add_hdr(hdr)) {
170 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
171 ret = -EPERM;
172 goto bail;
173 }
174
175 if (hdr_entry->status) {
176 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
177 ret = -EPERM;
178 goto bail;
179 }
180 qmap_hdr_hdl = hdr_entry->hdr_hdl;
181
182 ret = 0;
183bail:
184 kfree(hdr);
185 return ret;
186}
187
188static void ipa_del_a7_qmap_hdr(void)
189{
190 struct ipa_ioc_del_hdr *del_hdr;
191 struct ipa_hdr_del *hdl_entry;
192 u32 pyld_sz;
193 int ret;
194
195 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
196 sizeof(struct ipa_hdr_del);
197 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
198 if (!del_hdr) {
199 IPAWANERR("fail to alloc exception hdr_del\n");
200 return;
201 }
202
203 del_hdr->commit = 1;
204 del_hdr->num_hdls = 1;
205 hdl_entry = &del_hdr->hdl[0];
206 hdl_entry->hdl = qmap_hdr_hdl;
207
208 ret = ipa2_del_hdr(del_hdr);
209 if (ret || hdl_entry->status)
210 IPAWANERR("ipa2_del_hdr failed\n");
211 else
212 IPAWANDBG("hdrs deletion done\n");
213
214 qmap_hdr_hdl = 0;
215 kfree(del_hdr);
216}
217
218static void ipa_del_qmap_hdr(uint32_t hdr_hdl)
219{
220 struct ipa_ioc_del_hdr *del_hdr;
221 struct ipa_hdr_del *hdl_entry;
222 u32 pyld_sz;
223 int ret;
224
225 if (hdr_hdl == 0) {
226 IPAWANERR("Invalid hdr_hdl provided\n");
227 return;
228 }
229
230 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
231 sizeof(struct ipa_hdr_del);
232 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
233 if (!del_hdr) {
234 IPAWANERR("fail to alloc exception hdr_del\n");
235 return;
236 }
237
238 del_hdr->commit = 1;
239 del_hdr->num_hdls = 1;
240 hdl_entry = &del_hdr->hdl[0];
241 hdl_entry->hdl = hdr_hdl;
242
243 ret = ipa2_del_hdr(del_hdr);
244 if (ret || hdl_entry->status)
245 IPAWANERR("ipa2_del_hdr failed\n");
246 else
247 IPAWANDBG("header deletion done\n");
248
249 qmap_hdr_hdl = 0;
250 kfree(del_hdr);
251}
252
253static void ipa_del_mux_qmap_hdrs(void)
254{
255 int index;
256
257 for (index = 0; index < rmnet_index; index++) {
258 ipa_del_qmap_hdr(mux_channel[index].hdr_hdl);
259 mux_channel[index].hdr_hdl = 0;
260 }
261}
262
263static int ipa_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
264{
265 struct ipa_ioc_add_hdr *hdr;
266 struct ipa_hdr_add *hdr_entry;
267 char hdr_name[IPA_RESOURCE_NAME_MAX];
268 u32 pyld_sz;
269 int ret;
270
271 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
272 sizeof(struct ipa_hdr_add);
273 hdr = kzalloc(pyld_sz, GFP_KERNEL);
274 if (!hdr) {
275 IPAWANERR("fail to alloc exception hdr\n");
276 return -ENOMEM;
277 }
278 hdr->num_hdrs = 1;
279 hdr->commit = 1;
280 hdr_entry = &hdr->hdr[0];
281
282 snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
283 A2_MUX_HDR_NAME_V4_PREF,
284 mux_id);
285 strlcpy(hdr_entry->name, hdr_name,
286 IPA_RESOURCE_NAME_MAX);
287
288 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
289 hdr_entry->hdr[1] = (uint8_t) mux_id;
290 IPAWANDBG("header (%s) with mux-id: (%d)\n",
291 hdr_name,
292 hdr_entry->hdr[1]);
293 if (ipa2_add_hdr(hdr)) {
294 IPAWANERR("fail to add IPA_QMAP hdr\n");
295 ret = -EPERM;
296 goto bail;
297 }
298
299 if (hdr_entry->status) {
300 IPAWANERR("fail to add IPA_QMAP hdr\n");
301 ret = -EPERM;
302 goto bail;
303 }
304
305 ret = 0;
306 *hdr_hdl = hdr_entry->hdr_hdl;
307bail:
308 kfree(hdr);
309 return ret;
310}
311
312/**
313* ipa_setup_dflt_wan_rt_tables() - Setup default wan routing tables
314*
315* Return codes:
316* 0: success
317* -ENOMEM: failed to allocate memory
318* -EPERM: failed to add the tables
319*/
320static int ipa_setup_dflt_wan_rt_tables(void)
321{
322 struct ipa_ioc_add_rt_rule *rt_rule;
323 struct ipa_rt_rule_add *rt_rule_entry;
324
325 rt_rule =
326 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
327 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
328 if (!rt_rule) {
329 IPAWANERR("fail to alloc mem\n");
330 return -ENOMEM;
331 }
332 /* setup a default v4 route to point to Apps */
333 rt_rule->num_rules = 1;
334 rt_rule->commit = 1;
335 rt_rule->ip = IPA_IP_v4;
336 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
337 IPA_RESOURCE_NAME_MAX);
338
339 rt_rule_entry = &rt_rule->rules[0];
340 rt_rule_entry->at_rear = 1;
341 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
342 rt_rule_entry->rule.hdr_hdl = qmap_hdr_hdl;
343
344 if (ipa2_add_rt_rule(rt_rule)) {
345 IPAWANERR("fail to add dflt_wan v4 rule\n");
346 kfree(rt_rule);
347 return -EPERM;
348 }
349
350 IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
351 dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
352
353 /* setup a default v6 route to point to A5 */
354 rt_rule->ip = IPA_IP_v6;
355 if (ipa2_add_rt_rule(rt_rule)) {
356 IPAWANERR("fail to add dflt_wan v6 rule\n");
357 kfree(rt_rule);
358 return -EPERM;
359 }
360 IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
361 dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
362
363 kfree(rt_rule);
364 return 0;
365}
366
367static void ipa_del_dflt_wan_rt_tables(void)
368{
369 struct ipa_ioc_del_rt_rule *rt_rule;
370 struct ipa_rt_rule_del *rt_rule_entry;
371 int len;
372
373 len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
374 sizeof(struct ipa_rt_rule_del);
375 rt_rule = kzalloc(len, GFP_KERNEL);
376 if (!rt_rule) {
377 IPAWANERR("unable to allocate memory for del route rule\n");
378 return;
379 }
380
381 memset(rt_rule, 0, len);
382 rt_rule->commit = 1;
383 rt_rule->num_hdls = 1;
384 rt_rule->ip = IPA_IP_v4;
385
386 rt_rule_entry = &rt_rule->hdl[0];
387 rt_rule_entry->status = -1;
388 rt_rule_entry->hdl = dflt_v4_wan_rt_hdl;
389
390 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
391 rt_rule_entry->hdl, IPA_IP_v4);
392 if (ipa2_del_rt_rule(rt_rule) ||
393 (rt_rule_entry->status)) {
394 IPAWANERR("Routing rule deletion failed!\n");
395 }
396
397 rt_rule->ip = IPA_IP_v6;
398 rt_rule_entry->hdl = dflt_v6_wan_rt_hdl;
399 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
400 rt_rule_entry->hdl, IPA_IP_v6);
401 if (ipa2_del_rt_rule(rt_rule) ||
402 (rt_rule_entry->status)) {
403 IPAWANERR("Routing rule deletion failed!\n");
404 }
405
406 kfree(rt_rule);
407}
408
409int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
410 *rule_req, uint32_t *rule_hdl)
411{
412 int i, j;
413
Skylar Chang441cc5e2017-08-11 15:49:21 -0700414 /* prevent multi-threads accessing num_q6_rule */
415 mutex_lock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +0300416 if (rule_req->filter_spec_list_valid == true) {
417 num_q6_rule = rule_req->filter_spec_list_len;
418 IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule);
419 } else {
420 num_q6_rule = 0;
421 IPAWANERR("got no UL rules from modem\n");
Skylar Chang441cc5e2017-08-11 15:49:21 -0700422 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +0300423 return -EINVAL;
424 }
425
426 /* copy UL filter rules from Modem*/
427 for (i = 0; i < num_q6_rule; i++) {
428 /* check if rules overside the cache*/
429 if (i == MAX_NUM_Q6_RULE) {
430 IPAWANERR("Reaching (%d) max cache ",
431 MAX_NUM_Q6_RULE);
432 IPAWANERR(" however total (%d)\n",
433 num_q6_rule);
434 goto failure;
435 }
436 /* construct UL_filter_rule handler QMI use-cas */
437 ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl =
438 UL_FILTER_RULE_HANDLE_START + i;
439 rule_hdl[i] = ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
440 ipa_qmi_ctx->q6_ul_filter_rule[i].ip =
441 rule_req->filter_spec_list[i].ip_type;
442 ipa_qmi_ctx->q6_ul_filter_rule[i].action =
443 rule_req->filter_spec_list[i].filter_action;
444 if (rule_req->filter_spec_list[i].is_routing_table_index_valid
445 == true)
446 ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
447 rule_req->filter_spec_list[i].route_table_index;
448 if (rule_req->filter_spec_list[i].is_mux_id_valid == true)
449 ipa_qmi_ctx->q6_ul_filter_rule[i].mux_id =
450 rule_req->filter_spec_list[i].mux_id;
451 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap =
452 rule_req->filter_spec_list[i].filter_rule.
453 rule_eq_bitmap;
454 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present =
455 rule_req->filter_spec_list[i].filter_rule.
456 tos_eq_present;
457 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq =
458 rule_req->filter_spec_list[i].filter_rule.tos_eq;
459 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
460 protocol_eq_present = rule_req->filter_spec_list[i].
461 filter_rule.protocol_eq_present;
462 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq =
463 rule_req->filter_spec_list[i].filter_rule.
464 protocol_eq;
465
466 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
467 num_ihl_offset_range_16 = rule_req->filter_spec_list[i].
468 filter_rule.num_ihl_offset_range_16;
469 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
470 num_ihl_offset_range_16; j++) {
471 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
472 ihl_offset_range_16[j].offset = rule_req->
473 filter_spec_list[i].filter_rule.
474 ihl_offset_range_16[j].offset;
475 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
476 ihl_offset_range_16[j].range_low = rule_req->
477 filter_spec_list[i].filter_rule.
478 ihl_offset_range_16[j].range_low;
479 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
480 ihl_offset_range_16[j].range_high = rule_req->
481 filter_spec_list[i].filter_rule.
482 ihl_offset_range_16[j].range_high;
483 }
484 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 =
485 rule_req->filter_spec_list[i].filter_rule.
486 num_offset_meq_32;
487 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
488 num_offset_meq_32; j++) {
489 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
490 offset_meq_32[j].offset = rule_req->filter_spec_list[i].
491 filter_rule.offset_meq_32[j].offset;
492 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
493 offset_meq_32[j].mask = rule_req->filter_spec_list[i].
494 filter_rule.offset_meq_32[j].mask;
495 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
496 offset_meq_32[j].value = rule_req->filter_spec_list[i].
497 filter_rule.offset_meq_32[j].value;
498 }
499
500 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present =
501 rule_req->filter_spec_list[i].filter_rule.tc_eq_present;
502 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq =
503 rule_req->filter_spec_list[i].filter_rule.tc_eq;
504 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present =
505 rule_req->filter_spec_list[i].filter_rule.
506 flow_eq_present;
507 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq =
508 rule_req->filter_spec_list[i].filter_rule.flow_eq;
509 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
510 ihl_offset_eq_16_present = rule_req->filter_spec_list[i].
511 filter_rule.ihl_offset_eq_16_present;
512 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
513 ihl_offset_eq_16.offset = rule_req->filter_spec_list[i].
514 filter_rule.ihl_offset_eq_16.offset;
515 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
516 ihl_offset_eq_16.value = rule_req->filter_spec_list[i].
517 filter_rule.ihl_offset_eq_16.value;
518
519 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
520 ihl_offset_eq_32_present = rule_req->filter_spec_list[i].
521 filter_rule.ihl_offset_eq_32_present;
522 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
523 ihl_offset_eq_32.offset = rule_req->filter_spec_list[i].
524 filter_rule.ihl_offset_eq_32.offset;
525 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
526 ihl_offset_eq_32.value = rule_req->filter_spec_list[i].
527 filter_rule.ihl_offset_eq_32.value;
528
529 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
530 num_ihl_offset_meq_32 = rule_req->filter_spec_list[i].
531 filter_rule.num_ihl_offset_meq_32;
532 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].
533 eq_attrib.num_ihl_offset_meq_32; j++) {
534 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
535 ihl_offset_meq_32[j].offset = rule_req->
536 filter_spec_list[i].filter_rule.
537 ihl_offset_meq_32[j].offset;
538 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
539 ihl_offset_meq_32[j].mask = rule_req->
540 filter_spec_list[i].filter_rule.
541 ihl_offset_meq_32[j].mask;
542 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
543 ihl_offset_meq_32[j].value = rule_req->
544 filter_spec_list[i].filter_rule.
545 ihl_offset_meq_32[j].value;
546 }
547 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 =
548 rule_req->filter_spec_list[i].filter_rule.
549 num_offset_meq_128;
550 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
551 num_offset_meq_128; j++) {
552 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
553 offset_meq_128[j].offset = rule_req->
554 filter_spec_list[i].filter_rule.
555 offset_meq_128[j].offset;
556 memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
557 offset_meq_128[j].mask,
558 rule_req->filter_spec_list[i].
559 filter_rule.offset_meq_128[j].mask, 16);
560 memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
561 offset_meq_128[j].value, rule_req->
562 filter_spec_list[i].filter_rule.
563 offset_meq_128[j].value, 16);
564 }
565
566 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
567 metadata_meq32_present = rule_req->filter_spec_list[i].
568 filter_rule.metadata_meq32_present;
569 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
570 metadata_meq32.offset = rule_req->filter_spec_list[i].
571 filter_rule.metadata_meq32.offset;
572 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
573 metadata_meq32.mask = rule_req->filter_spec_list[i].
574 filter_rule.metadata_meq32.mask;
575 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.
576 value = rule_req->filter_spec_list[i].filter_rule.
577 metadata_meq32.value;
578 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
579 ipv4_frag_eq_present = rule_req->filter_spec_list[i].
580 filter_rule.ipv4_frag_eq_present;
581 }
582
583 if (rule_req->xlat_filter_indices_list_valid) {
584 if (rule_req->xlat_filter_indices_list_len > num_q6_rule) {
585 IPAWANERR("Number of xlat indices is not valid: %d\n",
586 rule_req->xlat_filter_indices_list_len);
587 goto failure;
588 }
589 IPAWANDBG("Receive %d XLAT indices: ",
590 rule_req->xlat_filter_indices_list_len);
591 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++)
592 IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]);
593 IPAWANDBG("\n");
594
595 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
596 if (rule_req->xlat_filter_indices_list[i]
597 >= num_q6_rule) {
598 IPAWANERR("Xlat rule idx is wrong: %d\n",
599 rule_req->xlat_filter_indices_list[i]);
600 goto failure;
601 } else {
602 ipa_qmi_ctx->q6_ul_filter_rule
603 [rule_req->xlat_filter_indices_list[i]]
604 .is_xlat_rule = 1;
605 IPAWANDBG("Rule %d is xlat rule\n",
606 rule_req->xlat_filter_indices_list[i]);
607 }
608 }
609 }
610 goto success;
611
612failure:
613 num_q6_rule = 0;
614 memset(ipa_qmi_ctx->q6_ul_filter_rule, 0,
615 sizeof(ipa_qmi_ctx->q6_ul_filter_rule));
Skylar Chang441cc5e2017-08-11 15:49:21 -0700616 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +0300617 return -EINVAL;
618
619success:
Skylar Chang441cc5e2017-08-11 15:49:21 -0700620 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +0300621 return 0;
622}
623
624static int wwan_add_ul_flt_rule_to_ipa(void)
625{
626 u32 pyld_sz;
627 int i, retval = 0;
628 int num_v4_rule = 0, num_v6_rule = 0;
629 struct ipa_ioc_add_flt_rule *param;
630 struct ipa_flt_rule_add flt_rule_entry;
631 struct ipa_fltr_installed_notif_req_msg_v01 *req;
632
633 if (ipa_qmi_ctx == NULL) {
634 IPAWANERR("ipa_qmi_ctx is NULL!\n");
635 return -EFAULT;
636 }
637
638 pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) +
639 sizeof(struct ipa_flt_rule_add);
640 param = kzalloc(pyld_sz, GFP_KERNEL);
641 if (!param)
642 return -ENOMEM;
643
644 req = (struct ipa_fltr_installed_notif_req_msg_v01 *)
645 kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
646 GFP_KERNEL);
647 if (!req) {
648 kfree(param);
649 return -ENOMEM;
650 }
651
652 param->commit = 1;
653 param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD;
654 param->global = false;
655 param->num_rules = (uint8_t)1;
656
657 mutex_lock(&ipa_qmi_lock);
658 for (i = 0; i < num_q6_rule; i++) {
659 param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
660 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
661 flt_rule_entry.at_rear = true;
662 flt_rule_entry.rule.action =
663 ipa_qmi_ctx->q6_ul_filter_rule[i].action;
664 flt_rule_entry.rule.rt_tbl_idx
665 = ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx;
666 flt_rule_entry.rule.retain_hdr = true;
667
668 /* debug rt-hdl*/
669 IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n",
670 i, flt_rule_entry.rule.rt_tbl_idx);
671 flt_rule_entry.rule.eq_attrib_type = true;
672 memcpy(&(flt_rule_entry.rule.eq_attrib),
673 &ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib,
674 sizeof(struct ipa_ipfltri_rule_eq));
675 memcpy(&(param->rules[0]), &flt_rule_entry,
676 sizeof(struct ipa_flt_rule_add));
677 if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
678 retval = -EFAULT;
679 IPAWANERR("add A7 UL filter rule(%d) failed\n", i);
680 } else {
681 /* store the rule handler */
682 ipa_qmi_ctx->q6_ul_filter_rule_hdl[i] =
683 param->rules[0].flt_rule_hdl;
684 }
685 }
686 mutex_unlock(&ipa_qmi_lock);
687
688 /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
689 req->source_pipe_index =
690 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
691 req->install_status = QMI_RESULT_SUCCESS_V01;
692 req->filter_index_list_len = num_q6_rule;
693 mutex_lock(&ipa_qmi_lock);
694 for (i = 0; i < num_q6_rule; i++) {
695 if (ipa_qmi_ctx->q6_ul_filter_rule[i].ip == IPA_IP_v4) {
696 req->filter_index_list[i].filter_index = num_v4_rule;
697 num_v4_rule++;
698 } else {
699 req->filter_index_list[i].filter_index = num_v6_rule;
700 num_v6_rule++;
701 }
702 req->filter_index_list[i].filter_handle =
703 ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
704 }
705 mutex_unlock(&ipa_qmi_lock);
706 if (qmi_filter_notify_send(req)) {
707 IPAWANDBG("add filter rule index on A7-RX failed\n");
708 retval = -EFAULT;
709 }
710 old_num_q6_rule = num_q6_rule;
711 IPAWANDBG("add (%d) filter rule index on A7-RX\n",
712 old_num_q6_rule);
713 kfree(param);
714 kfree(req);
715 return retval;
716}
717
718static int wwan_del_ul_flt_rule_to_ipa(void)
719{
720 u32 pyld_sz;
721 int i, retval = 0;
722 struct ipa_ioc_del_flt_rule *param;
723 struct ipa_flt_rule_del flt_rule_entry;
724
725 pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) +
726 sizeof(struct ipa_flt_rule_del);
727 param = kzalloc(pyld_sz, GFP_KERNEL);
728 if (!param) {
729 IPAWANERR("kzalloc failed\n");
730 return -ENOMEM;
731 }
732
733 param->commit = 1;
734 param->num_hdls = (uint8_t) 1;
735
736 for (i = 0; i < old_num_q6_rule; i++) {
737 param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
738 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
739 flt_rule_entry.hdl = ipa_qmi_ctx->q6_ul_filter_rule_hdl[i];
740 /* debug rt-hdl*/
741 IPAWANDBG("delete-IPA rule index(%d)\n", i);
742 memcpy(&(param->hdl[0]), &flt_rule_entry,
743 sizeof(struct ipa_flt_rule_del));
744 if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
745 IPAWANERR("del A7 UL filter rule(%d) failed\n", i);
746 kfree(param);
747 return -EFAULT;
748 }
749 }
750
751 /* set UL filter-rule add-indication */
752 a7_ul_flt_set = false;
753 old_num_q6_rule = 0;
754
755 kfree(param);
756 return retval;
757}
758
759static int find_mux_channel_index(uint32_t mux_id)
760{
761 int i;
762
763 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
764 if (mux_id == mux_channel[i].mux_id)
765 return i;
766 }
767 return MAX_NUM_OF_MUX_CHANNEL;
768}
769
770static int find_vchannel_name_index(const char *vchannel_name)
771{
772 int i;
773
774 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
775 if (strcmp(mux_channel[i].vchannel_name, vchannel_name == 0))
776 return i;
777 }
778 return MAX_NUM_OF_MUX_CHANNEL;
779}
780
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +0530781static enum ipa_upstream_type find_upstream_type(const char *upstreamIface)
782{
783 int i;
784
785 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
786 if (strcmp(mux_channel[i].vchannel_name,
787 upstreamIface) == 0)
788 return IPA_UPSTEAM_MODEM;
789 }
790
791 if (strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0)
792 return IPA_UPSTEAM_WLAN;
793 else
794 return IPA_UPSTEAM_MAX;
795}
796
Amir Levy9659e592016-10-27 18:08:27 +0300797static int wwan_register_to_ipa(int index)
798{
799 struct ipa_tx_intf tx_properties = {0};
800 struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
801 struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
802 struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
803 struct ipa_rx_intf rx_properties = {0};
804 struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
805 struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
806 struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
807 struct ipa_ext_intf ext_properties = {0};
808 struct ipa_ioc_ext_intf_prop *ext_ioc_properties;
809 u32 pyld_sz;
810 int ret = 0, i;
811
812 IPAWANDBG("index(%d) device[%s]:\n", index,
813 mux_channel[index].vchannel_name);
814 if (!mux_channel[index].mux_hdr_set) {
815 ret = ipa_add_qmap_hdr(mux_channel[index].mux_id,
816 &mux_channel[index].hdr_hdl);
817 if (ret) {
818 IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
819 return ret;
820 }
821 mux_channel[index].mux_hdr_set = true;
822 }
823 tx_properties.prop = tx_ioc_properties;
824 tx_ipv4_property = &tx_properties.prop[0];
825 tx_ipv4_property->ip = IPA_IP_v4;
826 tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
827 snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
828 A2_MUX_HDR_NAME_V4_PREF,
829 mux_channel[index].mux_id);
830 tx_ipv6_property = &tx_properties.prop[1];
831 tx_ipv6_property->ip = IPA_IP_v6;
832 tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
833 /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
834 snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
835 A2_MUX_HDR_NAME_V4_PREF,
836 mux_channel[index].mux_id);
837 tx_properties.num_props = 2;
838
839 rx_properties.prop = rx_ioc_properties;
840 rx_ipv4_property = &rx_properties.prop[0];
841 rx_ipv4_property->ip = IPA_IP_v4;
842 rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
843 rx_ipv4_property->attrib.meta_data =
844 mux_channel[index].mux_id << WWAN_METADATA_SHFT;
845 rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
846 rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
847 rx_ipv6_property = &rx_properties.prop[1];
848 rx_ipv6_property->ip = IPA_IP_v6;
849 rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
850 rx_ipv6_property->attrib.meta_data =
851 mux_channel[index].mux_id << WWAN_METADATA_SHFT;
852 rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
853 rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
854 rx_properties.num_props = 2;
855
856 pyld_sz = num_q6_rule *
857 sizeof(struct ipa_ioc_ext_intf_prop);
858 ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
859 if (!ext_ioc_properties) {
860 IPAWANERR("Error allocate memory\n");
861 return -ENOMEM;
862 }
863
864 ext_properties.prop = ext_ioc_properties;
865 ext_properties.excp_pipe_valid = true;
866 ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
867 ext_properties.num_props = num_q6_rule;
868 for (i = 0; i < num_q6_rule; i++) {
869 memcpy(&(ext_properties.prop[i]),
870 &(ipa_qmi_ctx->q6_ul_filter_rule[i]),
871 sizeof(struct ipa_ioc_ext_intf_prop));
872 ext_properties.prop[i].mux_id = mux_channel[index].mux_id;
873 IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
874 ext_properties.prop[i].ip,
875 ext_properties.prop[i].rt_tbl_idx);
876 IPAWANDBG("action: %d mux:%d\n",
877 ext_properties.prop[i].action,
878 ext_properties.prop[i].mux_id);
879 }
880 ret = ipa2_register_intf_ext(mux_channel[index].
881 vchannel_name, &tx_properties,
882 &rx_properties, &ext_properties);
883 if (ret) {
884 IPAWANERR("[%s]:ipa2_register_intf failed %d\n",
885 mux_channel[index].vchannel_name, ret);
886 goto fail;
887 }
888 mux_channel[index].ul_flt_reg = true;
889fail:
890 kfree(ext_ioc_properties);
891 return ret;
892}
893
894static void ipa_cleanup_deregister_intf(void)
895{
896 int i;
897 int ret;
898
899 for (i = 0; i < rmnet_index; i++) {
900 if (mux_channel[i].ul_flt_reg) {
901 ret = ipa2_deregister_intf(
902 mux_channel[i].vchannel_name);
903 if (ret < 0) {
904 IPAWANERR("de-register device %s(%d) failed\n",
905 mux_channel[i].vchannel_name,
906 i);
907 return;
908 }
909 IPAWANDBG("de-register device %s(%d) success\n",
910 mux_channel[i].vchannel_name,
911 i);
912 }
913 mux_channel[i].ul_flt_reg = false;
914 }
915}
916
917int wwan_update_mux_channel_prop(void)
918{
919 int ret = 0, i;
920 /* install UL filter rules */
921 if (egress_set) {
922 if (ipa_qmi_ctx &&
923 ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
924 IPAWANDBG("setup UL filter rules\n");
925 if (a7_ul_flt_set) {
926 IPAWANDBG("del previous UL filter rules\n");
927 /* delete rule hdlers */
928 ret = wwan_del_ul_flt_rule_to_ipa();
929 if (ret) {
930 IPAWANERR("failed to del old rules\n");
931 return -EINVAL;
932 }
933 IPAWANDBG("deleted old UL rules\n");
934 }
935 ret = wwan_add_ul_flt_rule_to_ipa();
936 }
937 if (ret)
938 IPAWANERR("failed to install UL rules\n");
939 else
940 a7_ul_flt_set = true;
941 }
942 /* update Tx/Rx/Ext property */
943 IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
944 if (rmnet_index == 0) {
945 IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
946 return ret;
947 }
948
949 ipa_cleanup_deregister_intf();
950
951 for (i = 0; i < rmnet_index; i++) {
952 ret = wwan_register_to_ipa(i);
953 if (ret < 0) {
954 IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
955 mux_channel[i].vchannel_name,
956 mux_channel[i].mux_id,
957 i);
958 return -ENODEV;
959 }
960 IPAWANERR("dev(%s) has registered to IPA\n",
961 mux_channel[i].vchannel_name);
962 mux_channel[i].ul_flt_reg = true;
963 }
964 return ret;
965}
966
967#ifdef INIT_COMPLETION
968#define reinit_completion(x) INIT_COMPLETION(*(x))
969#endif /* INIT_COMPLETION */
970
971static int __ipa_wwan_open(struct net_device *dev)
972{
973 struct wwan_private *wwan_ptr = netdev_priv(dev);
974
975 IPAWANDBG("[%s] __wwan_open()\n", dev->name);
976 if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
977 reinit_completion(&wwan_ptr->resource_granted_completion);
978 wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
979
980 if (ipa_rmnet_res.ipa_napi_enable)
981 napi_enable(&(wwan_ptr->napi));
982 return 0;
983}
984
985/**
986 * wwan_open() - Opens the wwan network interface. Opens logical
987 * channel on A2 MUX driver and starts the network stack queue
988 *
989 * @dev: network device
990 *
991 * Return codes:
992 * 0: success
993 * -ENODEV: Error while opening logical channel on A2 MUX driver
994 */
995static int ipa_wwan_open(struct net_device *dev)
996{
997 int rc = 0;
998
999 IPAWANDBG("[%s] wwan_open()\n", dev->name);
1000 rc = __ipa_wwan_open(dev);
1001 if (rc == 0)
1002 netif_start_queue(dev);
1003 return rc;
1004}
1005
1006static int __ipa_wwan_close(struct net_device *dev)
1007{
1008 struct wwan_private *wwan_ptr = netdev_priv(dev);
1009 int rc = 0;
1010
1011 if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
1012 wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
1013 /* do not close wwan port once up, this causes
1014 * remote side to hang if tried to open again
1015 */
1016 reinit_completion(&wwan_ptr->resource_granted_completion);
1017 if (ipa_rmnet_res.ipa_napi_enable)
1018 napi_disable(&(wwan_ptr->napi));
1019 rc = ipa2_deregister_intf(dev->name);
1020 if (rc) {
1021 IPAWANERR("[%s]: ipa2_deregister_intf failed %d\n",
1022 dev->name, rc);
1023 return rc;
1024 }
1025 return rc;
1026 } else {
1027 return -EBADF;
1028 }
1029}
1030
1031/**
1032 * ipa_wwan_stop() - Stops the wwan network interface. Closes
1033 * logical channel on A2 MUX driver and stops the network stack
1034 * queue
1035 *
1036 * @dev: network device
1037 *
1038 * Return codes:
1039 * 0: success
1040 * -ENODEV: Error while opening logical channel on A2 MUX driver
1041 */
1042static int ipa_wwan_stop(struct net_device *dev)
1043{
1044 IPAWANDBG("[%s] ipa_wwan_stop()\n", dev->name);
1045 __ipa_wwan_close(dev);
1046 netif_stop_queue(dev);
1047 return 0;
1048}
1049
1050static int ipa_wwan_change_mtu(struct net_device *dev, int new_mtu)
1051{
1052 if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
1053 return -EINVAL;
1054 IPAWANDBG("[%s] MTU change: old=%d new=%d\n",
1055 dev->name, dev->mtu, new_mtu);
1056 dev->mtu = new_mtu;
1057 return 0;
1058}
1059
1060/**
1061 * ipa_wwan_xmit() - Transmits an skb.
1062 *
1063 * @skb: skb to be transmitted
1064 * @dev: network device
1065 *
1066 * Return codes:
1067 * 0: success
1068 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1069 * later
1070 * -EFAULT: Error while transmitting the skb
1071 */
1072static int ipa_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
1073{
1074 int ret = 0;
1075 bool qmap_check;
1076 struct wwan_private *wwan_ptr = netdev_priv(dev);
1077 struct ipa_tx_meta meta;
1078
1079 if (skb->protocol != htons(ETH_P_MAP)) {
1080 IPAWANDBG
1081 ("SW filtering out none QMAP packet received from %s",
1082 current->comm);
Sunil Paidimarri6c818e82016-10-17 18:33:13 -07001083 dev_kfree_skb_any(skb);
1084 dev->stats.tx_dropped++;
Amir Levy9659e592016-10-27 18:08:27 +03001085 return NETDEV_TX_OK;
1086 }
1087
1088 qmap_check = RMNET_MAP_GET_CD_BIT(skb);
1089 if (netif_queue_stopped(dev)) {
1090 if (qmap_check &&
1091 atomic_read(&wwan_ptr->outstanding_pkts) <
1092 wwan_ptr->outstanding_high_ctl) {
1093 pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
1094 goto send;
1095 } else {
1096 pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
1097 return NETDEV_TX_BUSY;
1098 }
1099 }
1100
1101 /* checking High WM hit */
1102 if (atomic_read(&wwan_ptr->outstanding_pkts) >=
1103 wwan_ptr->outstanding_high) {
1104 if (!qmap_check) {
1105 IPAWANDBG("pending(%d)/(%d)- stop(%d), qmap_chk(%d)\n",
1106 atomic_read(&wwan_ptr->outstanding_pkts),
1107 wwan_ptr->outstanding_high,
1108 netif_queue_stopped(dev),
1109 qmap_check);
1110 netif_stop_queue(dev);
1111 return NETDEV_TX_BUSY;
1112 }
1113 }
1114
1115send:
1116 /* IPA_RM checking start */
1117 ret = ipa_rm_inactivity_timer_request_resource(
1118 IPA_RM_RESOURCE_WWAN_0_PROD);
1119 if (ret == -EINPROGRESS) {
1120 netif_stop_queue(dev);
1121 return NETDEV_TX_BUSY;
1122 }
1123 if (ret) {
1124 pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
1125 dev->name, ret);
Sunil Paidimarri6c818e82016-10-17 18:33:13 -07001126 dev_kfree_skb_any(skb);
1127 dev->stats.tx_dropped++;
Amir Levy9659e592016-10-27 18:08:27 +03001128 return -EFAULT;
1129 }
1130 /* IPA_RM checking end */
1131
1132 if (qmap_check) {
1133 memset(&meta, 0, sizeof(meta));
1134 meta.pkt_init_dst_ep_valid = true;
1135 meta.pkt_init_dst_ep_remote = true;
1136 ret = ipa2_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta);
1137 } else {
1138 ret = ipa2_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL);
1139 }
1140
1141 if (ret) {
1142 ret = NETDEV_TX_BUSY;
Amir Levy9659e592016-10-27 18:08:27 +03001143 goto out;
1144 }
1145
1146 atomic_inc(&wwan_ptr->outstanding_pkts);
1147 dev->stats.tx_packets++;
1148 dev->stats.tx_bytes += skb->len;
1149 ret = NETDEV_TX_OK;
1150out:
1151 ipa_rm_inactivity_timer_release_resource(
1152 IPA_RM_RESOURCE_WWAN_0_PROD);
1153 return ret;
1154}
1155
1156static void ipa_wwan_tx_timeout(struct net_device *dev)
1157{
1158 IPAWANERR("[%s] ipa_wwan_tx_timeout(), data stall in UL\n", dev->name);
1159}
1160
1161/**
1162 * apps_ipa_tx_complete_notify() - Rx notify
1163 *
1164 * @priv: driver context
1165 * @evt: event type
1166 * @data: data provided with event
1167 *
1168 * Check that the packet is the one we sent and release it
1169 * This function will be called in defered context in IPA wq.
1170 */
1171static void apps_ipa_tx_complete_notify(void *priv,
1172 enum ipa_dp_evt_type evt,
1173 unsigned long data)
1174{
1175 struct sk_buff *skb = (struct sk_buff *)data;
1176 struct net_device *dev = (struct net_device *)priv;
1177 struct wwan_private *wwan_ptr;
1178
1179 if (dev != ipa_netdevs[0]) {
1180 IPAWANDBG("Received pre-SSR packet completion\n");
1181 dev_kfree_skb_any(skb);
1182 return;
1183 }
1184
1185 if (evt != IPA_WRITE_DONE) {
1186 IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
1187 dev_kfree_skb_any(skb);
1188 dev->stats.tx_dropped++;
1189 return;
1190 }
1191
1192 wwan_ptr = netdev_priv(dev);
1193 atomic_dec(&wwan_ptr->outstanding_pkts);
1194 __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
1195 if (!atomic_read(&is_ssr) &&
1196 netif_queue_stopped(wwan_ptr->net) &&
1197 atomic_read(&wwan_ptr->outstanding_pkts) <
1198 (wwan_ptr->outstanding_low)) {
1199 IPAWANDBG("Outstanding low (%d) - wake up queue\n",
1200 wwan_ptr->outstanding_low);
1201 netif_wake_queue(wwan_ptr->net);
1202 }
1203 __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
1204 dev_kfree_skb_any(skb);
1205 ipa_rm_inactivity_timer_release_resource(
1206 IPA_RM_RESOURCE_WWAN_0_PROD);
1207}
1208
1209/**
1210 * apps_ipa_packet_receive_notify() - Rx notify
1211 *
1212 * @priv: driver context
1213 * @evt: event type
1214 * @data: data provided with event
1215 *
1216 * IPA will pass a packet to the Linux network stack with skb->data
1217 */
1218static void apps_ipa_packet_receive_notify(void *priv,
1219 enum ipa_dp_evt_type evt,
1220 unsigned long data)
1221{
1222 struct net_device *dev = (struct net_device *)priv;
1223
1224 if (evt == IPA_RECEIVE) {
1225 struct sk_buff *skb = (struct sk_buff *)data;
1226 int result;
1227 unsigned int packet_len = skb->len;
1228
1229 IPAWANDBG("Rx packet was received");
1230 skb->dev = ipa_netdevs[0];
1231 skb->protocol = htons(ETH_P_MAP);
1232
1233 if (ipa_rmnet_res.ipa_napi_enable) {
1234 trace_rmnet_ipa_netif_rcv_skb(dev->stats.rx_packets);
1235 result = netif_receive_skb(skb);
1236 } else {
1237 if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
1238 == 0) {
1239 trace_rmnet_ipa_netifni(dev->stats.rx_packets);
1240 result = netif_rx_ni(skb);
1241 } else {
1242 trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
1243 result = netif_rx(skb);
1244 }
1245 }
1246
1247 if (result) {
1248 pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
1249 __func__, __LINE__);
1250 dev->stats.rx_dropped++;
1251 }
1252 dev->stats.rx_packets++;
1253 dev->stats.rx_bytes += packet_len;
1254 } else if (evt == IPA_CLIENT_START_POLL)
1255 ipa_rmnet_rx_cb(priv);
1256 else if (evt == IPA_CLIENT_COMP_NAPI) {
1257 struct wwan_private *wwan_ptr = netdev_priv(dev);
1258
1259 if (ipa_rmnet_res.ipa_napi_enable)
1260 napi_complete(&(wwan_ptr->napi));
1261 } else
1262 IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
1263
1264}
1265
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001266static int handle_ingress_format(struct net_device *dev,
1267 struct rmnet_ioctl_extended_s *in)
1268{
1269 int ret = 0;
1270 struct rmnet_phys_ep_conf_s *ep_cfg;
1271
1272 IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
1273 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
1274 ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.cs_offload_en =
1275 IPA_ENABLE_CS_OFFLOAD_DL;
1276
1277 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
1278 IPAWANERR("get AGG size %d count %d\n",
1279 in->u.ingress_format.agg_size,
1280 in->u.ingress_format.agg_count);
1281
1282 ret = ipa_disable_apps_wan_cons_deaggr(
1283 in->u.ingress_format.agg_size,
1284 in->u.ingress_format.agg_count);
1285
1286 if (!ret) {
1287 ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_byte_limit =
1288 in->u.ingress_format.agg_size;
1289 ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit =
1290 in->u.ingress_format.agg_count;
1291
1292 if (ipa_rmnet_res.ipa_napi_enable) {
1293 ipa_to_apps_ep_cfg.recycle_enabled = true;
1294 ep_cfg = (struct rmnet_phys_ep_conf_s *)
1295 rcu_dereference(dev->rx_handler_data);
1296 ep_cfg->recycle = ipa_recycle_wan_skb;
1297 pr_info("Wan Recycle Enabled\n");
1298 }
1299 }
1300 }
1301
1302 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
1303 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
1304 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
1305 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
1306 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
1307
1308 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
1309 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
1310 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding =
1311 true;
1312 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
1313 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
1314 ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
1315
1316 ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
1317 ipa_to_apps_ep_cfg.notify = apps_ipa_packet_receive_notify;
1318 ipa_to_apps_ep_cfg.priv = dev;
1319
1320 ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001321 ipa_to_apps_ep_cfg.desc_fifo_sz =
1322 ipa_rmnet_res.wan_rx_desc_size * sizeof(struct sps_iovec);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001323
1324 mutex_lock(&ipa_to_apps_pipe_handle_guard);
1325 if (atomic_read(&is_ssr)) {
1326 IPAWANDBG("In SSR sequence/recovery\n");
1327 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
1328 return -EFAULT;
1329 }
1330 ret = ipa2_setup_sys_pipe(&ipa_to_apps_ep_cfg, &ipa_to_apps_hdl);
1331 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
1332
1333 if (ret)
1334 IPAWANERR("failed to configure ingress\n");
1335
1336 return ret;
1337}
1338
Amir Levy9659e592016-10-27 18:08:27 +03001339/**
1340 * ipa_wwan_ioctl() - I/O control for wwan network driver.
1341 *
1342 * @dev: network device
1343 * @ifr: ignored
1344 * @cmd: cmd to be excecuded. can be one of the following:
1345 * IPA_WWAN_IOCTL_OPEN - Open the network interface
1346 * IPA_WWAN_IOCTL_CLOSE - Close the network interface
1347 *
1348 * Return codes:
1349 * 0: success
1350 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1351 * later
1352 * -EFAULT: Error while transmitting the skb
1353 */
1354static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1355{
1356 int rc = 0;
1357 int mru = 1000, epid = 1, mux_index, len;
1358 struct ipa_msg_meta msg_meta;
1359 struct ipa_wan_msg *wan_msg = NULL;
1360 struct rmnet_ioctl_extended_s extend_ioctl_data;
1361 struct rmnet_ioctl_data_s ioctl_data;
1362
1363 IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
1364 switch (cmd) {
1365 /* Set Ethernet protocol */
1366 case RMNET_IOCTL_SET_LLP_ETHERNET:
1367 break;
1368 /* Set RAWIP protocol */
1369 case RMNET_IOCTL_SET_LLP_IP:
1370 break;
1371 /* Get link protocol */
1372 case RMNET_IOCTL_GET_LLP:
1373 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1374 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1375 sizeof(struct rmnet_ioctl_data_s)))
1376 rc = -EFAULT;
1377 break;
1378 /* Set QoS header enabled */
1379 case RMNET_IOCTL_SET_QOS_ENABLE:
1380 return -EINVAL;
1381 /* Set QoS header disabled */
1382 case RMNET_IOCTL_SET_QOS_DISABLE:
1383 break;
1384 /* Get QoS header state */
1385 case RMNET_IOCTL_GET_QOS:
1386 ioctl_data.u.operation_mode = RMNET_MODE_NONE;
1387 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1388 sizeof(struct rmnet_ioctl_data_s)))
1389 rc = -EFAULT;
1390 break;
1391 /* Get operation mode */
1392 case RMNET_IOCTL_GET_OPMODE:
1393 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1394 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1395 sizeof(struct rmnet_ioctl_data_s)))
1396 rc = -EFAULT;
1397 break;
1398 /* Open transport port */
1399 case RMNET_IOCTL_OPEN:
1400 break;
1401 /* Close transport port */
1402 case RMNET_IOCTL_CLOSE:
1403 break;
1404 /* Flow enable */
1405 case RMNET_IOCTL_FLOW_ENABLE:
1406 IPAWANDBG("Received flow enable\n");
1407 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1408 sizeof(struct rmnet_ioctl_data_s))) {
1409 rc = -EFAULT;
1410 break;
1411 }
1412 ipa_flow_control(IPA_CLIENT_USB_PROD, true,
1413 ioctl_data.u.tcm_handle);
1414 break;
1415 /* Flow disable */
1416 case RMNET_IOCTL_FLOW_DISABLE:
1417 IPAWANDBG("Received flow disable\n");
1418 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1419 sizeof(struct rmnet_ioctl_data_s))) {
1420 rc = -EFAULT;
1421 break;
1422 }
1423 ipa_flow_control(IPA_CLIENT_USB_PROD, false,
1424 ioctl_data.u.tcm_handle);
1425 break;
1426 /* Set flow handle */
1427 case RMNET_IOCTL_FLOW_SET_HNDL:
1428 break;
1429
1430 /* Extended IOCTLs */
1431 case RMNET_IOCTL_EXTENDED:
1432 IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
1433 if (copy_from_user(&extend_ioctl_data,
1434 (u8 *)ifr->ifr_ifru.ifru_data,
1435 sizeof(struct rmnet_ioctl_extended_s))) {
1436 IPAWANERR("failed to copy extended ioctl data\n");
1437 rc = -EFAULT;
1438 break;
1439 }
1440 switch (extend_ioctl_data.extended_ioctl) {
1441 /* Get features */
1442 case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
1443 IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n");
1444 extend_ioctl_data.u.data =
1445 (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL |
1446 RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT |
1447 RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT);
1448 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1449 &extend_ioctl_data,
1450 sizeof(struct rmnet_ioctl_extended_s)))
1451 rc = -EFAULT;
1452 break;
1453 /* Set MRU */
1454 case RMNET_IOCTL_SET_MRU:
1455 mru = extend_ioctl_data.u.data;
1456 IPAWANDBG("get MRU size %d\n",
1457 extend_ioctl_data.u.data);
1458 break;
1459 /* Get MRU */
1460 case RMNET_IOCTL_GET_MRU:
1461 extend_ioctl_data.u.data = mru;
1462 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1463 &extend_ioctl_data,
1464 sizeof(struct rmnet_ioctl_extended_s)))
1465 rc = -EFAULT;
1466 break;
1467 /* GET SG support */
1468 case RMNET_IOCTL_GET_SG_SUPPORT:
1469 extend_ioctl_data.u.data =
1470 ipa_rmnet_res.ipa_advertise_sg_support;
1471 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1472 &extend_ioctl_data,
1473 sizeof(struct rmnet_ioctl_extended_s)))
1474 rc = -EFAULT;
1475 break;
1476 /* Get endpoint ID */
1477 case RMNET_IOCTL_GET_EPID:
1478 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
1479 extend_ioctl_data.u.data = epid;
1480 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1481 &extend_ioctl_data,
1482 sizeof(struct rmnet_ioctl_extended_s)))
1483 rc = -EFAULT;
1484 if (copy_from_user(&extend_ioctl_data,
1485 (u8 *)ifr->ifr_ifru.ifru_data,
1486 sizeof(struct rmnet_ioctl_extended_s))) {
1487 IPAWANERR("copy extended ioctl data failed\n");
1488 rc = -EFAULT;
1489 break;
1490 }
1491 IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n",
1492 extend_ioctl_data.u.data);
1493 break;
1494 /* Endpoint pair */
1495 case RMNET_IOCTL_GET_EP_PAIR:
1496 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n");
1497 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
1498 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
1499 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
1500 ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
1501 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1502 &extend_ioctl_data,
1503 sizeof(struct rmnet_ioctl_extended_s)))
1504 rc = -EFAULT;
1505 if (copy_from_user(&extend_ioctl_data,
1506 (u8 *)ifr->ifr_ifru.ifru_data,
1507 sizeof(struct rmnet_ioctl_extended_s))) {
1508 IPAWANERR("copy extended ioctl data failed\n");
1509 rc = -EFAULT;
1510 break;
1511 }
1512 IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n",
1513 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
1514 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
1515 break;
1516 /* Get driver name */
1517 case RMNET_IOCTL_GET_DRIVER_NAME:
1518 memcpy(&extend_ioctl_data.u.if_name,
1519 ipa_netdevs[0]->name,
1520 sizeof(IFNAMSIZ));
1521 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1522 &extend_ioctl_data,
1523 sizeof(struct rmnet_ioctl_extended_s)))
1524 rc = -EFAULT;
1525 break;
1526 /* Add MUX ID */
1527 case RMNET_IOCTL_ADD_MUX_CHANNEL:
1528 mux_index = find_mux_channel_index(
1529 extend_ioctl_data.u.rmnet_mux_val.mux_id);
1530 if (mux_index < MAX_NUM_OF_MUX_CHANNEL) {
1531 IPAWANDBG("already setup mux(%d)\n",
1532 extend_ioctl_data.u.
1533 rmnet_mux_val.mux_id);
1534 return rc;
1535 }
Skylar Chang8438ba52017-03-15 21:27:35 -07001536 mutex_lock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001537 if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
1538 IPAWANERR("Exceed mux_channel limit(%d)\n",
1539 rmnet_index);
Skylar Chang8438ba52017-03-15 21:27:35 -07001540 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001541 return -EFAULT;
1542 }
1543 IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
1544 extend_ioctl_data.u.rmnet_mux_val.mux_id,
1545 extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
1546 /* cache the mux name and id */
1547 mux_channel[rmnet_index].mux_id =
1548 extend_ioctl_data.u.rmnet_mux_val.mux_id;
1549 memcpy(mux_channel[rmnet_index].vchannel_name,
1550 extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
1551 sizeof(mux_channel[rmnet_index].vchannel_name));
Skylar Changba7c5112017-04-14 19:23:05 -07001552 mux_channel[rmnet_index].vchannel_name[
1553 IFNAMSIZ - 1] = '\0';
1554
Amir Levy9659e592016-10-27 18:08:27 +03001555 IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
1556 mux_channel[rmnet_index].vchannel_name,
1557 mux_channel[rmnet_index].mux_id,
1558 rmnet_index);
1559 /* check if UL filter rules coming*/
1560 if (num_q6_rule != 0) {
1561 IPAWANERR("dev(%s) register to IPA\n",
1562 extend_ioctl_data.u.rmnet_mux_val.
1563 vchannel_name);
1564 rc = wwan_register_to_ipa(rmnet_index);
1565 if (rc < 0) {
1566 IPAWANERR("device %s reg IPA failed\n",
1567 extend_ioctl_data.u.
1568 rmnet_mux_val.vchannel_name);
Skylar Chang8438ba52017-03-15 21:27:35 -07001569 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001570 return -ENODEV;
1571 }
1572 mux_channel[rmnet_index].mux_channel_set = true;
1573 mux_channel[rmnet_index].ul_flt_reg = true;
1574 } else {
1575 IPAWANDBG("dev(%s) haven't registered to IPA\n",
1576 extend_ioctl_data.u.
1577 rmnet_mux_val.vchannel_name);
1578 mux_channel[rmnet_index].mux_channel_set = true;
1579 mux_channel[rmnet_index].ul_flt_reg = false;
1580 }
1581 rmnet_index++;
Skylar Chang8438ba52017-03-15 21:27:35 -07001582 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001583 break;
1584 case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
1585 IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
1586 if ((extend_ioctl_data.u.data) &
1587 RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
1588 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 8;
1589 apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
1590 cs_offload_en =
1591 IPA_ENABLE_CS_OFFLOAD_UL;
1592 apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
1593 cs_metadata_hdr_offset = 1;
1594 } else {
1595 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
1596 }
1597 if ((extend_ioctl_data.u.data) &
1598 RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
1599 apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
1600 IPA_ENABLE_AGGR;
1601 else
1602 apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
1603 IPA_BYPASS_AGGR;
1604 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
1605 hdr_ofst_metadata_valid = 1;
1606 /* modem want offset at 0! */
1607 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
1608 apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.dst =
1609 IPA_CLIENT_APPS_LAN_WAN_PROD;
1610 apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.mode = IPA_BASIC;
1611
1612 apps_to_ipa_ep_cfg.client =
1613 IPA_CLIENT_APPS_LAN_WAN_PROD;
1614 apps_to_ipa_ep_cfg.notify =
1615 apps_ipa_tx_complete_notify;
1616 apps_to_ipa_ep_cfg.desc_fifo_sz =
1617 IPA_SYS_TX_DATA_DESC_FIFO_SZ;
1618 apps_to_ipa_ep_cfg.priv = dev;
1619
1620 rc = ipa2_setup_sys_pipe(&apps_to_ipa_ep_cfg,
1621 &apps_to_ipa_hdl);
1622 if (rc)
1623 IPAWANERR("failed to config egress endpoint\n");
1624
1625 if (num_q6_rule != 0) {
1626 /* already got Q6 UL filter rules*/
1627 if (ipa_qmi_ctx &&
1628 ipa_qmi_ctx->modem_cfg_emb_pipe_flt
Skylar Chang441cc5e2017-08-11 15:49:21 -07001629 == false) {
1630 /* protect num_q6_rule */
1631 mutex_lock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001632 rc = wwan_add_ul_flt_rule_to_ipa();
Skylar Chang441cc5e2017-08-11 15:49:21 -07001633 mutex_unlock(&add_mux_channel_lock);
1634 } else
Amir Levy9659e592016-10-27 18:08:27 +03001635 rc = 0;
1636 egress_set = true;
1637 if (rc)
1638 IPAWANERR("install UL rules failed\n");
1639 else
1640 a7_ul_flt_set = true;
1641 } else {
1642 /* wait Q6 UL filter rules*/
1643 egress_set = true;
1644 IPAWANDBG("no UL-rules, egress_set(%d)\n",
1645 egress_set);
1646 }
1647 break;
1648 case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001649 rc = handle_ingress_format(dev, &extend_ioctl_data);
Amir Levy9659e592016-10-27 18:08:27 +03001650 break;
1651 case RMNET_IOCTL_SET_XLAT_DEV_INFO:
1652 wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
1653 GFP_KERNEL);
1654 if (!wan_msg) {
1655 IPAWANERR("Failed to allocate memory.\n");
1656 return -ENOMEM;
1657 }
1658 len = sizeof(wan_msg->upstream_ifname) >
1659 sizeof(extend_ioctl_data.u.if_name) ?
1660 sizeof(extend_ioctl_data.u.if_name) :
1661 sizeof(wan_msg->upstream_ifname);
1662 strlcpy(wan_msg->upstream_ifname,
1663 extend_ioctl_data.u.if_name, len);
1664 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
1665 msg_meta.msg_type = WAN_XLAT_CONNECT;
1666 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
1667 rc = ipa2_send_msg(&msg_meta, wan_msg,
1668 ipa_wwan_msg_free_cb);
1669 if (rc) {
1670 IPAWANERR("Failed to send XLAT_CONNECT msg\n");
1671 kfree(wan_msg);
1672 }
1673 break;
1674 /* Get agg count */
1675 case RMNET_IOCTL_GET_AGGREGATION_COUNT:
1676 break;
1677 /* Set agg count */
1678 case RMNET_IOCTL_SET_AGGREGATION_COUNT:
1679 break;
1680 /* Get agg size */
1681 case RMNET_IOCTL_GET_AGGREGATION_SIZE:
1682 break;
1683 /* Set agg size */
1684 case RMNET_IOCTL_SET_AGGREGATION_SIZE:
1685 break;
1686 /* Do flow control */
1687 case RMNET_IOCTL_FLOW_CONTROL:
1688 break;
1689 /* For legacy use */
1690 case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL:
1691 break;
1692 /* Get HW/SW map */
1693 case RMNET_IOCTL_GET_HWSW_MAP:
1694 break;
1695 /* Set RX Headroom */
1696 case RMNET_IOCTL_SET_RX_HEADROOM:
1697 break;
1698 default:
1699 IPAWANERR("[%s] unsupported extended cmd[%d]",
1700 dev->name,
1701 extend_ioctl_data.extended_ioctl);
1702 rc = -EINVAL;
1703 }
1704 break;
1705 default:
1706 IPAWANERR("[%s] unsupported cmd[%d]",
1707 dev->name, cmd);
1708 rc = -EINVAL;
1709 }
1710 return rc;
1711}
1712
1713static const struct net_device_ops ipa_wwan_ops_ip = {
1714 .ndo_open = ipa_wwan_open,
1715 .ndo_stop = ipa_wwan_stop,
1716 .ndo_start_xmit = ipa_wwan_xmit,
1717 .ndo_tx_timeout = ipa_wwan_tx_timeout,
1718 .ndo_do_ioctl = ipa_wwan_ioctl,
1719 .ndo_change_mtu = ipa_wwan_change_mtu,
1720 .ndo_set_mac_address = 0,
1721 .ndo_validate_addr = 0,
1722};
1723
1724/**
1725 * wwan_setup() - Setups the wwan network driver.
1726 *
1727 * @dev: network device
1728 *
1729 * Return codes:
1730 * None
1731 */
1732
1733static void ipa_wwan_setup(struct net_device *dev)
1734{
1735 dev->netdev_ops = &ipa_wwan_ops_ip;
1736 ether_setup(dev);
1737 /* set this after calling ether_setup */
1738 dev->header_ops = 0; /* No header */
1739 dev->type = ARPHRD_RAWIP;
1740 dev->hard_header_len = 0;
1741 dev->mtu = WWAN_DATA_LEN;
1742 dev->addr_len = 0;
1743 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
1744 dev->needed_headroom = HEADROOM_FOR_QMAP;
1745 dev->needed_tailroom = TAILROOM;
1746 dev->watchdog_timeo = 1000;
1747}
1748
1749/* IPA_RM related functions start*/
1750static void q6_prod_rm_request_resource(struct work_struct *work);
1751static DECLARE_DELAYED_WORK(q6_con_rm_request, q6_prod_rm_request_resource);
1752static void q6_prod_rm_release_resource(struct work_struct *work);
1753static DECLARE_DELAYED_WORK(q6_con_rm_release, q6_prod_rm_release_resource);
1754
1755static void q6_prod_rm_request_resource(struct work_struct *work)
1756{
1757 int ret = 0;
1758
1759 ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
1760 if (ret < 0 && ret != -EINPROGRESS) {
1761 IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
1762 ret);
1763 return;
1764 }
1765}
1766
1767static int q6_rm_request_resource(void)
1768{
1769 queue_delayed_work(ipa_rm_q6_workqueue,
1770 &q6_con_rm_request, 0);
1771 return 0;
1772}
1773
1774static void q6_prod_rm_release_resource(struct work_struct *work)
1775{
1776 int ret = 0;
1777
1778 ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
1779 if (ret < 0 && ret != -EINPROGRESS) {
1780 IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
1781 ret);
1782 return;
1783 }
1784}
1785
1786
1787static int q6_rm_release_resource(void)
1788{
1789 queue_delayed_work(ipa_rm_q6_workqueue,
1790 &q6_con_rm_release, 0);
1791 return 0;
1792}
1793
1794
1795static void q6_rm_notify_cb(void *user_data,
1796 enum ipa_rm_event event,
1797 unsigned long data)
1798{
1799 switch (event) {
1800 case IPA_RM_RESOURCE_GRANTED:
1801 IPAWANDBG("%s: Q6_PROD GRANTED CB\n", __func__);
1802 break;
1803 case IPA_RM_RESOURCE_RELEASED:
1804 IPAWANDBG("%s: Q6_PROD RELEASED CB\n", __func__);
1805 break;
1806 default:
1807 return;
1808 }
1809}
1810static int q6_initialize_rm(void)
1811{
1812 struct ipa_rm_create_params create_params;
1813 struct ipa_rm_perf_profile profile;
1814 int result;
1815
1816 /* Initialize IPA_RM workqueue */
1817 ipa_rm_q6_workqueue = create_singlethread_workqueue("clnt_req");
1818 if (!ipa_rm_q6_workqueue)
1819 return -ENOMEM;
1820
1821 memset(&create_params, 0, sizeof(create_params));
1822 create_params.name = IPA_RM_RESOURCE_Q6_PROD;
1823 create_params.reg_params.notify_cb = &q6_rm_notify_cb;
1824 result = ipa_rm_create_resource(&create_params);
1825 if (result)
1826 goto create_rsrc_err1;
1827 memset(&create_params, 0, sizeof(create_params));
1828 create_params.name = IPA_RM_RESOURCE_Q6_CONS;
1829 create_params.release_resource = &q6_rm_release_resource;
1830 create_params.request_resource = &q6_rm_request_resource;
1831 result = ipa_rm_create_resource(&create_params);
1832 if (result)
1833 goto create_rsrc_err2;
1834 /* add dependency*/
1835 result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
1836 IPA_RM_RESOURCE_APPS_CONS);
1837 if (result)
1838 goto add_dpnd_err;
1839 /* setup Performance profile */
1840 memset(&profile, 0, sizeof(profile));
1841 profile.max_supported_bandwidth_mbps = 100;
1842 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
1843 &profile);
1844 if (result)
1845 goto set_perf_err;
1846 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
1847 &profile);
1848 if (result)
1849 goto set_perf_err;
1850 return result;
1851
1852set_perf_err:
1853 ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1854 IPA_RM_RESOURCE_APPS_CONS);
1855add_dpnd_err:
1856 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1857 if (result < 0)
1858 IPAWANERR("Error deleting resource %d, ret=%d\n",
1859 IPA_RM_RESOURCE_Q6_CONS, result);
1860create_rsrc_err2:
1861 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1862 if (result < 0)
1863 IPAWANERR("Error deleting resource %d, ret=%d\n",
1864 IPA_RM_RESOURCE_Q6_PROD, result);
1865create_rsrc_err1:
1866 destroy_workqueue(ipa_rm_q6_workqueue);
1867 return result;
1868}
1869
1870void q6_deinitialize_rm(void)
1871{
1872 int ret;
1873
1874 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1875 IPA_RM_RESOURCE_APPS_CONS);
1876 if (ret < 0)
1877 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
1878 IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
1879 ret);
1880 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1881 if (ret < 0)
1882 IPAWANERR("Error deleting resource %d, ret=%d\n",
1883 IPA_RM_RESOURCE_Q6_CONS, ret);
1884 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1885 if (ret < 0)
1886 IPAWANERR("Error deleting resource %d, ret=%d\n",
1887 IPA_RM_RESOURCE_Q6_PROD, ret);
Mohammed Javid4e3015d2017-07-31 13:27:18 +05301888
1889 if (ipa_rm_q6_workqueue)
1890 destroy_workqueue(ipa_rm_q6_workqueue);
Amir Levy9659e592016-10-27 18:08:27 +03001891}
1892
1893static void wake_tx_queue(struct work_struct *work)
1894{
1895 if (ipa_netdevs[0]) {
1896 __netif_tx_lock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
1897 netif_wake_queue(ipa_netdevs[0]);
1898 __netif_tx_unlock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
1899 }
1900}
1901
1902/**
1903 * ipa_rm_resource_granted() - Called upon
1904 * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
1905 *
1906 * @work: work object supplied ny workqueue
1907 *
1908 * Return codes:
1909 * None
1910 */
1911static void ipa_rm_resource_granted(void *dev)
1912{
1913 IPAWANDBG("Resource Granted - starting queue\n");
1914 schedule_work(&ipa_tx_wakequeue_work);
1915}
1916
1917/**
1918 * ipa_rm_notify() - Callback function for RM events. Handles
1919 * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
1920 * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
1921 * workqueue.
1922 *
1923 * @dev: network device
1924 * @event: IPA RM event
1925 * @data: Additional data provided by IPA RM
1926 *
1927 * Return codes:
1928 * None
1929 */
1930static void ipa_rm_notify(void *dev, enum ipa_rm_event event,
1931 unsigned long data)
1932{
1933 struct wwan_private *wwan_ptr = netdev_priv(dev);
1934
1935 pr_debug("%s: event %d\n", __func__, event);
1936 switch (event) {
1937 case IPA_RM_RESOURCE_GRANTED:
1938 if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
1939 complete_all(&wwan_ptr->resource_granted_completion);
1940 break;
1941 }
1942 ipa_rm_resource_granted(dev);
1943 break;
1944 case IPA_RM_RESOURCE_RELEASED:
1945 break;
1946 default:
1947 pr_err("%s: unknown event %d\n", __func__, event);
1948 break;
1949 }
1950}
1951
1952/* IPA_RM related functions end*/
1953
1954static int ssr_notifier_cb(struct notifier_block *this,
1955 unsigned long code,
1956 void *data);
1957
1958static struct notifier_block ssr_notifier = {
1959 .notifier_call = ssr_notifier_cb,
1960};
1961
1962static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
1963 struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
1964{
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001965 int result;
1966
1967 ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
Amir Levy9659e592016-10-27 18:08:27 +03001968 ipa_rmnet_drv_res->ipa_rmnet_ssr =
1969 of_property_read_bool(pdev->dev.of_node,
1970 "qcom,rmnet-ipa-ssr");
1971 pr_info("IPA SSR support = %s\n",
1972 ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False");
1973 ipa_rmnet_drv_res->ipa_loaduC =
1974 of_property_read_bool(pdev->dev.of_node,
1975 "qcom,ipa-loaduC");
1976 pr_info("IPA ipa-loaduC = %s\n",
1977 ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
1978
1979 ipa_rmnet_drv_res->ipa_advertise_sg_support =
1980 of_property_read_bool(pdev->dev.of_node,
1981 "qcom,ipa-advertise-sg-support");
1982 pr_info("IPA SG support = %s\n",
1983 ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
1984
1985 ipa_rmnet_drv_res->ipa_napi_enable =
1986 of_property_read_bool(pdev->dev.of_node,
1987 "qcom,ipa-napi-enable");
1988 pr_info("IPA Napi Enable = %s\n",
1989 ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001990
1991 /* Get IPA WAN RX desc fifo size */
1992 result = of_property_read_u32(pdev->dev.of_node,
1993 "qcom,wan-rx-desc-size",
1994 &ipa_rmnet_drv_res->wan_rx_desc_size);
1995 if (result)
1996 pr_info("using default for wan-rx-desc-size = %u\n",
1997 ipa_rmnet_drv_res->wan_rx_desc_size);
1998 else
1999 IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
2000 ipa_rmnet_drv_res->wan_rx_desc_size);
2001
Amir Levy9659e592016-10-27 18:08:27 +03002002 return 0;
2003}
2004
2005struct ipa_rmnet_context ipa_rmnet_ctx;
2006
2007/**
2008 * ipa_wwan_probe() - Initialized the module and registers as a
2009 * network interface to the network stack
2010 *
2011 * Return codes:
2012 * 0: success
2013 * -ENOMEM: No memory available
2014 * -EFAULT: Internal error
2015 * -ENODEV: IPA driver not loaded
2016 */
2017static int ipa_wwan_probe(struct platform_device *pdev)
2018{
2019 int ret, i;
2020 struct net_device *dev;
2021 struct wwan_private *wwan_ptr;
2022 struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */
2023 struct ipa_rm_perf_profile profile; /* IPA_RM */
2024
2025 pr_info("rmnet_ipa started initialization\n");
2026
2027 if (!ipa2_is_ready()) {
2028 IPAWANERR("IPA driver not loaded\n");
2029 return -ENODEV;
2030 }
2031
2032 ret = get_ipa_rmnet_dts_configuration(pdev, &ipa_rmnet_res);
2033 ipa_rmnet_ctx.ipa_rmnet_ssr = ipa_rmnet_res.ipa_rmnet_ssr;
2034
2035 ret = ipa_init_q6_smem();
2036 if (ret) {
2037 IPAWANERR("ipa_init_q6_smem failed!\n");
2038 return ret;
2039 }
2040
2041 /* initialize tx/rx enpoint setup */
2042 memset(&apps_to_ipa_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
2043 memset(&ipa_to_apps_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
2044
2045 /* initialize ex property setup */
2046 num_q6_rule = 0;
2047 old_num_q6_rule = 0;
2048 rmnet_index = 0;
2049 egress_set = false;
2050 a7_ul_flt_set = false;
2051 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
2052 memset(&mux_channel[i], 0, sizeof(struct rmnet_mux_val));
2053
2054 /* start A7 QMI service/client */
2055 if (ipa_rmnet_res.ipa_loaduC)
2056 /* Android platform loads uC */
2057 ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
2058 else
2059 /* LE platform not loads uC */
2060 ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
2061
2062 /* construct default WAN RT tbl for IPACM */
2063 ret = ipa_setup_a7_qmap_hdr();
2064 if (ret)
2065 goto setup_a7_qmap_hdr_err;
2066 ret = ipa_setup_dflt_wan_rt_tables();
2067 if (ret)
2068 goto setup_dflt_wan_rt_tables_err;
2069
2070 if (!atomic_read(&is_ssr)) {
2071 /* Start transport-driver fd ioctl for ipacm for first init */
2072 ret = wan_ioctl_init();
2073 if (ret)
2074 goto wan_ioctl_init_err;
2075 } else {
2076 /* Enable sending QMI messages after SSR */
2077 wan_ioctl_enable_qmi_messages();
2078 }
2079
2080 /* initialize wan-driver netdev */
2081 dev = alloc_netdev(sizeof(struct wwan_private),
2082 IPA_WWAN_DEV_NAME,
2083 NET_NAME_UNKNOWN,
2084 ipa_wwan_setup);
2085 if (!dev) {
2086 IPAWANERR("no memory for netdev\n");
2087 ret = -ENOMEM;
2088 goto alloc_netdev_err;
2089 }
2090 ipa_netdevs[0] = dev;
2091 wwan_ptr = netdev_priv(dev);
2092 memset(wwan_ptr, 0, sizeof(*wwan_ptr));
2093 IPAWANDBG("wwan_ptr (private) = %p", wwan_ptr);
2094 wwan_ptr->net = dev;
2095 wwan_ptr->outstanding_high_ctl = DEFAULT_OUTSTANDING_HIGH_CTL;
2096 wwan_ptr->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
2097 wwan_ptr->outstanding_low = DEFAULT_OUTSTANDING_LOW;
2098 atomic_set(&wwan_ptr->outstanding_pkts, 0);
2099 spin_lock_init(&wwan_ptr->lock);
2100 init_completion(&wwan_ptr->resource_granted_completion);
2101
2102 if (!atomic_read(&is_ssr)) {
2103 /* IPA_RM configuration starts */
2104 ret = q6_initialize_rm();
2105 if (ret) {
2106 IPAWANERR("%s: q6_initialize_rm failed, ret: %d\n",
2107 __func__, ret);
2108 goto q6_init_err;
2109 }
2110 }
2111
2112 memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
2113 ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
2114 ipa_rm_params.reg_params.user_data = dev;
2115 ipa_rm_params.reg_params.notify_cb = ipa_rm_notify;
2116 ret = ipa_rm_create_resource(&ipa_rm_params);
2117 if (ret) {
2118 pr_err("%s: unable to create resourse %d in IPA RM\n",
2119 __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
2120 goto create_rsrc_err;
2121 }
2122 ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
2123 IPA_RM_INACTIVITY_TIMER);
2124 if (ret) {
2125 pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
2126 __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
2127 goto timer_init_err;
2128 }
2129 /* add dependency */
2130 ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2131 IPA_RM_RESOURCE_Q6_CONS);
2132 if (ret)
2133 goto add_dpnd_err;
2134 /* setup Performance profile */
2135 memset(&profile, 0, sizeof(profile));
2136 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
2137 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
2138 &profile);
2139 if (ret)
2140 goto set_perf_err;
2141 /* IPA_RM configuration ends */
2142
2143 /* Enable SG support in netdevice. */
2144 if (ipa_rmnet_res.ipa_advertise_sg_support)
2145 dev->hw_features |= NETIF_F_SG;
2146
2147 /* Enable NAPI support in netdevice. */
2148 if (ipa_rmnet_res.ipa_napi_enable) {
2149 netif_napi_add(dev, &(wwan_ptr->napi),
2150 ipa_rmnet_poll, NAPI_WEIGHT);
2151 }
2152
2153 ret = register_netdev(dev);
2154 if (ret) {
2155 IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
2156 0, ret);
2157 goto set_perf_err;
2158 }
2159
2160 IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n",
2161 ipa_netdevs[0]->name);
2162 if (ret) {
2163 IPAWANERR("default configuration failed rc=%d\n",
2164 ret);
2165 goto config_err;
2166 }
2167 atomic_set(&is_initialized, 1);
2168 if (!atomic_read(&is_ssr)) {
2169 /* offline charging mode */
2170 ipa2_proxy_clk_unvote();
2171 }
2172 atomic_set(&is_ssr, 0);
2173
2174 pr_info("rmnet_ipa completed initialization\n");
2175 return 0;
2176config_err:
2177 if (ipa_rmnet_res.ipa_napi_enable)
2178 netif_napi_del(&(wwan_ptr->napi));
2179 unregister_netdev(ipa_netdevs[0]);
2180set_perf_err:
2181 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2182 IPA_RM_RESOURCE_Q6_CONS);
2183 if (ret)
2184 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2185 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2186 ret);
2187add_dpnd_err:
2188 ret = ipa_rm_inactivity_timer_destroy(
2189 IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
2190 if (ret)
2191 IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
2192 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2193timer_init_err:
2194 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2195 if (ret)
2196 IPAWANERR("Error deleting resource %d, ret=%d\n",
2197 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2198create_rsrc_err:
Mohammed Javid4e3015d2017-07-31 13:27:18 +05302199
2200 if (!atomic_read(&is_ssr))
2201 q6_deinitialize_rm();
2202
Amir Levy9659e592016-10-27 18:08:27 +03002203q6_init_err:
2204 free_netdev(ipa_netdevs[0]);
2205 ipa_netdevs[0] = NULL;
2206alloc_netdev_err:
2207 wan_ioctl_deinit();
2208wan_ioctl_init_err:
2209 ipa_del_dflt_wan_rt_tables();
2210setup_dflt_wan_rt_tables_err:
2211 ipa_del_a7_qmap_hdr();
2212setup_a7_qmap_hdr_err:
2213 ipa_qmi_service_exit();
2214 atomic_set(&is_ssr, 0);
2215 return ret;
2216}
2217
2218static int ipa_wwan_remove(struct platform_device *pdev)
2219{
2220 int ret;
2221 struct wwan_private *wwan_ptr;
2222
2223 wwan_ptr = netdev_priv(ipa_netdevs[0]);
2224
2225 pr_info("rmnet_ipa started deinitialization\n");
2226 mutex_lock(&ipa_to_apps_pipe_handle_guard);
2227 ret = ipa2_teardown_sys_pipe(ipa_to_apps_hdl);
2228 if (ret < 0)
2229 IPAWANERR("Failed to teardown IPA->APPS pipe\n");
2230 else
2231 ipa_to_apps_hdl = -1;
2232 if (ipa_rmnet_res.ipa_napi_enable)
2233 netif_napi_del(&(wwan_ptr->napi));
2234 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
2235 unregister_netdev(ipa_netdevs[0]);
2236 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2237 IPA_RM_RESOURCE_Q6_CONS);
2238 if (ret < 0)
2239 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2240 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2241 ret);
2242 ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
2243 if (ret < 0)
2244 IPAWANERR(
2245 "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
2246 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2247 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2248 if (ret < 0)
2249 IPAWANERR("Error deleting resource %d, ret=%d\n",
2250 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2251 cancel_work_sync(&ipa_tx_wakequeue_work);
2252 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2253 free_netdev(ipa_netdevs[0]);
2254 ipa_netdevs[0] = NULL;
2255 /* No need to remove wwan_ioctl during SSR */
2256 if (!atomic_read(&is_ssr))
2257 wan_ioctl_deinit();
2258 ipa_del_dflt_wan_rt_tables();
2259 ipa_del_a7_qmap_hdr();
2260 ipa_del_mux_qmap_hdrs();
2261 if (ipa_qmi_ctx && ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false)
2262 wwan_del_ul_flt_rule_to_ipa();
2263 ipa_cleanup_deregister_intf();
2264 atomic_set(&is_initialized, 0);
2265 pr_info("rmnet_ipa completed deinitialization\n");
2266 return 0;
2267}
2268
2269/**
2270* rmnet_ipa_ap_suspend() - suspend callback for runtime_pm
2271* @dev: pointer to device
2272*
2273* This callback will be invoked by the runtime_pm framework when an AP suspend
2274* operation is invoked, usually by pressing a suspend button.
2275*
2276* Returns -EAGAIN to runtime_pm framework in case there are pending packets
2277* in the Tx queue. This will postpone the suspend operation until all the
2278* pending packets will be transmitted.
2279*
2280* In case there are no packets to send, releases the WWAN0_PROD entity.
2281* As an outcome, the number of IPA active clients should be decremented
2282* until IPA clocks can be gated.
2283*/
2284static int rmnet_ipa_ap_suspend(struct device *dev)
2285{
2286 struct net_device *netdev = ipa_netdevs[0];
2287 struct wwan_private *wwan_ptr = netdev_priv(netdev);
2288
2289 IPAWANDBG("Enter...\n");
2290 /* Do not allow A7 to suspend in case there are oustanding packets */
2291 if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
2292 IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
2293 return -EAGAIN;
2294 }
2295
2296 /* Make sure that there is no Tx operation ongoing */
2297 netif_tx_lock_bh(netdev);
2298 ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2299 netif_tx_unlock_bh(netdev);
2300 IPAWANDBG("Exit\n");
2301
2302 return 0;
2303}
2304
2305/**
2306* rmnet_ipa_ap_resume() - resume callback for runtime_pm
2307* @dev: pointer to device
2308*
2309* This callback will be invoked by the runtime_pm framework when an AP resume
2310* operation is invoked.
2311*
2312* Enables the network interface queue and returns success to the
2313* runtime_pm framework.
2314*/
2315static int rmnet_ipa_ap_resume(struct device *dev)
2316{
2317 struct net_device *netdev = ipa_netdevs[0];
2318
2319 IPAWANDBG("Enter...\n");
2320 netif_wake_queue(netdev);
2321 IPAWANDBG("Exit\n");
2322
2323 return 0;
2324}
2325
2326static void ipa_stop_polling_stats(void)
2327{
2328 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2329 ipa_rmnet_ctx.polling_interval = 0;
2330}
2331
2332static const struct of_device_id rmnet_ipa_dt_match[] = {
2333 {.compatible = "qcom,rmnet-ipa"},
2334 {},
2335};
2336MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
2337
2338static const struct dev_pm_ops rmnet_ipa_pm_ops = {
2339 .suspend_noirq = rmnet_ipa_ap_suspend,
2340 .resume_noirq = rmnet_ipa_ap_resume,
2341};
2342
2343static struct platform_driver rmnet_ipa_driver = {
2344 .driver = {
2345 .name = "rmnet_ipa",
2346 .owner = THIS_MODULE,
2347 .pm = &rmnet_ipa_pm_ops,
2348 .of_match_table = rmnet_ipa_dt_match,
2349 },
2350 .probe = ipa_wwan_probe,
2351 .remove = ipa_wwan_remove,
2352};
2353
Skylar Chang09e0e252017-03-20 14:51:29 -07002354/**
2355 * rmnet_ipa_send_ssr_notification(bool ssr_done) - send SSR notification
2356 *
2357 * This function sends the SSR notification before modem shutdown and
2358 * after_powerup from SSR framework, to user-space module
2359 */
2360static void rmnet_ipa_send_ssr_notification(bool ssr_done)
2361{
2362 struct ipa_msg_meta msg_meta;
2363 int rc;
2364
2365 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2366 if (ssr_done)
2367 msg_meta.msg_type = IPA_SSR_AFTER_POWERUP;
2368 else
2369 msg_meta.msg_type = IPA_SSR_BEFORE_SHUTDOWN;
2370 rc = ipa_send_msg(&msg_meta, NULL, NULL);
2371 if (rc) {
2372 IPAWANERR("ipa_send_msg failed: %d\n", rc);
2373 return;
2374 }
2375}
2376
Amir Levy9659e592016-10-27 18:08:27 +03002377static int ssr_notifier_cb(struct notifier_block *this,
2378 unsigned long code,
2379 void *data)
2380{
2381 if (ipa_rmnet_ctx.ipa_rmnet_ssr) {
2382 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2383 pr_info("IPA received MPSS BEFORE_SHUTDOWN\n");
Skylar Chang09e0e252017-03-20 14:51:29 -07002384 /* send SSR before-shutdown notification to IPACM */
2385 rmnet_ipa_send_ssr_notification(false);
Amir Levy9659e592016-10-27 18:08:27 +03002386 atomic_set(&is_ssr, 1);
2387 ipa_q6_pre_shutdown_cleanup();
2388 if (ipa_netdevs[0])
2389 netif_stop_queue(ipa_netdevs[0]);
2390 ipa_qmi_stop_workqueues();
2391 wan_ioctl_stop_qmi_messages();
2392 ipa_stop_polling_stats();
2393 if (atomic_read(&is_initialized))
2394 platform_driver_unregister(&rmnet_ipa_driver);
2395 pr_info("IPA BEFORE_SHUTDOWN handling is complete\n");
2396 return NOTIFY_DONE;
2397 }
2398 if (code == SUBSYS_AFTER_SHUTDOWN) {
2399 pr_info("IPA received MPSS AFTER_SHUTDOWN\n");
2400 if (atomic_read(&is_ssr))
2401 ipa_q6_post_shutdown_cleanup();
2402 pr_info("IPA AFTER_SHUTDOWN handling is complete\n");
2403 return NOTIFY_DONE;
2404 }
2405 if (code == SUBSYS_AFTER_POWERUP) {
2406 pr_info("IPA received MPSS AFTER_POWERUP\n");
2407 if (!atomic_read(&is_initialized)
2408 && atomic_read(&is_ssr))
2409 platform_driver_register(&rmnet_ipa_driver);
2410 pr_info("IPA AFTER_POWERUP handling is complete\n");
2411 return NOTIFY_DONE;
2412 }
2413 if (code == SUBSYS_BEFORE_POWERUP) {
2414 pr_info("IPA received MPSS BEFORE_POWERUP\n");
2415 if (atomic_read(&is_ssr))
2416 /* clean up cached QMI msg/handlers */
2417 ipa_qmi_service_exit();
2418 ipa2_proxy_clk_vote();
2419 pr_info("IPA BEFORE_POWERUP handling is complete\n");
2420 return NOTIFY_DONE;
2421 }
2422 }
2423 return NOTIFY_DONE;
2424}
2425
2426/**
2427 * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa2_send_msg
2428 * @buff: pointer to buffer containing the message
2429 * @len: message len
2430 * @type: message type
2431 *
2432 * This function is invoked when ipa2_send_msg is complete (Provided as a
2433 * free function pointer along with the message).
2434 */
2435static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
2436{
2437 if (!buff) {
2438 IPAWANERR("Null buffer\n");
2439 return;
2440 }
2441
2442 if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
2443 type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
2444 IPAWANERR("Wrong type given. buff %p type %d\n",
2445 buff, type);
2446 }
2447 kfree(buff);
2448}
2449
2450/**
2451 * rmnet_ipa_get_stats_and_update(bool reset) - Gets pipe stats from Modem
2452 *
2453 * This function queries the IPA Modem driver for the pipe stats
2454 * via QMI, and updates the user space IPA entity.
2455 */
2456static void rmnet_ipa_get_stats_and_update(bool reset)
2457{
2458 struct ipa_get_data_stats_req_msg_v01 req;
2459 struct ipa_get_data_stats_resp_msg_v01 *resp;
2460 struct ipa_msg_meta msg_meta;
2461 int rc;
2462
2463 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2464 GFP_KERNEL);
2465 if (!resp) {
2466 IPAWANERR("Can't allocate memory for stats message\n");
2467 return;
2468 }
2469
2470 memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2471 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2472
2473 req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2474 if (reset == true) {
2475 req.reset_stats_valid = true;
2476 req.reset_stats = true;
2477 IPAWANERR("Get the latest pipe-stats and reset it\n");
2478 }
2479
2480 rc = ipa_qmi_get_data_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002481 if (rc) {
2482 IPAWANERR("ipa_qmi_get_data_stats failed: %d\n", rc);
2483 kfree(resp);
2484 return;
2485 }
Amir Levy9659e592016-10-27 18:08:27 +03002486
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002487 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2488 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
2489 msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01);
2490 rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2491 if (rc) {
2492 IPAWANERR("ipa2_send_msg failed: %d\n", rc);
2493 kfree(resp);
2494 return;
Amir Levy9659e592016-10-27 18:08:27 +03002495 }
2496}
2497
2498/**
2499 * tethering_stats_poll_queue() - Stats polling function
2500 * @work - Work entry
2501 *
2502 * This function is scheduled periodically (per the interval) in
2503 * order to poll the IPA Modem driver for the pipe stats.
2504 */
2505static void tethering_stats_poll_queue(struct work_struct *work)
2506{
2507 rmnet_ipa_get_stats_and_update(false);
2508
2509 /* Schedule again only if there's an active polling interval */
2510 if (ipa_rmnet_ctx.polling_interval != 0)
2511 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work,
2512 msecs_to_jiffies(ipa_rmnet_ctx.polling_interval*1000));
2513}
2514
2515/**
2516 * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem
2517 *
2518 * This function retrieves the data usage (used quota) from the IPA Modem driver
2519 * via QMI, and updates IPA user space entity.
2520 */
2521static void rmnet_ipa_get_network_stats_and_update(void)
2522{
2523 struct ipa_get_apn_data_stats_req_msg_v01 req;
2524 struct ipa_get_apn_data_stats_resp_msg_v01 *resp;
2525 struct ipa_msg_meta msg_meta;
2526 int rc;
2527
2528 resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
2529 GFP_KERNEL);
2530 if (!resp) {
2531 IPAWANERR("Can't allocate memory for network stats message\n");
2532 return;
2533 }
2534
2535 memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01));
2536 memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01));
2537
2538 req.mux_id_list_valid = true;
2539 req.mux_id_list_len = 1;
2540 req.mux_id_list[0] = ipa_rmnet_ctx.metered_mux_id;
2541
2542 rc = ipa_qmi_get_network_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002543 if (rc) {
2544 IPAWANERR("ipa_qmi_get_network_stats failed %d\n", rc);
2545 kfree(resp);
2546 return;
2547 }
Amir Levy9659e592016-10-27 18:08:27 +03002548
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002549 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2550 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
2551 msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
2552 rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2553 if (rc) {
2554 IPAWANERR("ipa2_send_msg failed: %d\n", rc);
2555 kfree(resp);
2556 return;
Amir Levy9659e592016-10-27 18:08:27 +03002557 }
2558}
2559
2560/**
Skylar Chang09e0e252017-03-20 14:51:29 -07002561 * rmnet_ipa_send_quota_reach_ind() - send quota_reach notification from
2562 * IPA Modem
2563 * This function sends the quota_reach indication from the IPA Modem driver
2564 * via QMI, to user-space module
2565 */
2566static void rmnet_ipa_send_quota_reach_ind(void)
2567{
2568 struct ipa_msg_meta msg_meta;
2569 int rc;
2570
2571 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2572 msg_meta.msg_type = IPA_QUOTA_REACH;
2573 rc = ipa_send_msg(&msg_meta, NULL, NULL);
2574 if (rc) {
2575 IPAWANERR("ipa_send_msg failed: %d\n", rc);
2576 return;
2577 }
2578}
2579
2580/**
Amir Levy9659e592016-10-27 18:08:27 +03002581 * rmnet_ipa_poll_tethering_stats() - Tethering stats polling IOCTL handler
2582 * @data - IOCTL data
2583 *
2584 * This function handles WAN_IOC_POLL_TETHERING_STATS.
2585 * In case polling interval received is 0, polling will stop
2586 * (If there's a polling in progress, it will allow it to finish), and then will
2587 * fetch network stats, and update the IPA user space.
2588 *
2589 * Return codes:
2590 * 0: Success
2591 */
2592int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
2593{
2594 ipa_rmnet_ctx.polling_interval = data->polling_interval_secs;
2595
2596 cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work);
2597
2598 if (ipa_rmnet_ctx.polling_interval == 0) {
2599 ipa_qmi_stop_data_qouta();
2600 rmnet_ipa_get_network_stats_and_update();
2601 rmnet_ipa_get_stats_and_update(true);
2602 return 0;
2603 }
2604
2605 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0);
2606 return 0;
2607}
2608
2609/**
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302610 * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler
Amir Levy9659e592016-10-27 18:08:27 +03002611 * @data - IOCTL data
2612 *
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302613 * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface.
Amir Levy9659e592016-10-27 18:08:27 +03002614 * It translates the given interface name to the Modem MUX ID and
2615 * sends the request of the quota to the IPA Modem driver via QMI.
2616 *
2617 * Return codes:
2618 * 0: Success
2619 * -EFAULT: Invalid interface name provided
2620 * other: See ipa_qmi_set_data_quota
2621 */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302622static int rmnet_ipa_set_data_quota_modem(struct wan_ioctl_set_data_quota *data)
Amir Levy9659e592016-10-27 18:08:27 +03002623{
2624 u32 mux_id;
2625 int index;
2626 struct ipa_set_data_usage_quota_req_msg_v01 req;
2627
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302628 /* stop quota */
2629 if (!data->set_quota)
2630 ipa_qmi_stop_data_qouta();
2631
Skylar Changcde17ed2017-06-21 16:51:26 -07002632 /* prevent string buffer overflows */
2633 data->interface_name[IFNAMSIZ-1] = '\0';
2634
Amir Levy9659e592016-10-27 18:08:27 +03002635 index = find_vchannel_name_index(data->interface_name);
2636 IPAWANERR("iface name %s, quota %lu\n",
2637 data->interface_name,
2638 (unsigned long int) data->quota_mbytes);
2639
2640 if (index == MAX_NUM_OF_MUX_CHANNEL) {
2641 IPAWANERR("%s is an invalid iface name\n",
2642 data->interface_name);
2643 return -EFAULT;
2644 }
2645
2646 mux_id = mux_channel[index].mux_id;
2647
2648 ipa_rmnet_ctx.metered_mux_id = mux_id;
2649
2650 memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
2651 req.apn_quota_list_valid = true;
2652 req.apn_quota_list_len = 1;
2653 req.apn_quota_list[0].mux_id = mux_id;
2654 req.apn_quota_list[0].num_Mbytes = data->quota_mbytes;
2655
2656 return ipa_qmi_set_data_quota(&req);
2657}
2658
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302659static int rmnet_ipa_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
2660{
2661 struct ipa_set_wifi_quota wifi_quota;
2662 int rc = 0;
2663
2664 memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota));
2665 wifi_quota.set_quota = data->set_quota;
2666 wifi_quota.quota_bytes = data->quota_mbytes;
2667 IPAWANDBG("iface name %s, quota %lu\n",
2668 data->interface_name,
2669 (unsigned long int) data->quota_mbytes);
2670
2671 rc = ipa2_set_wlan_quota(&wifi_quota);
2672 /* check if wlan-fw takes this quota-set */
2673 if (!wifi_quota.set_valid)
2674 rc = -EFAULT;
2675 return rc;
2676}
2677
2678/**
2679 * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
2680 * @data - IOCTL data
2681 *
2682 * This function handles WAN_IOC_SET_DATA_QUOTA.
2683 * It translates the given interface name to the Modem MUX ID and
2684 * sends the request of the quota to the IPA Modem driver via QMI.
2685 *
2686 * Return codes:
2687 * 0: Success
2688 * -EFAULT: Invalid interface name provided
2689 * other: See ipa_qmi_set_data_quota
2690 */
2691int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data)
2692{
2693 enum ipa_upstream_type upstream_type;
2694 int rc = 0;
2695
2696 /* get IPA backhaul type */
2697 upstream_type = find_upstream_type(data->interface_name);
2698
2699 if (upstream_type == IPA_UPSTEAM_MAX) {
2700 IPAWANERR("upstream iface %s not supported\n",
2701 data->interface_name);
2702 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2703 rc = rmnet_ipa_set_data_quota_wifi(data);
2704 if (rc) {
2705 IPAWANERR("set quota on wifi failed\n");
2706 return rc;
2707 }
2708 } else {
2709 rc = rmnet_ipa_set_data_quota_modem(data);
2710 if (rc) {
2711 IPAWANERR("set quota on modem failed\n");
2712 return rc;
2713 }
2714 }
2715 return rc;
2716}
2717
Amir Levy9659e592016-10-27 18:08:27 +03002718 /* rmnet_ipa_set_tether_client_pipe() -
2719 * @data - IOCTL data
2720 *
2721 * This function handles WAN_IOC_SET_DATA_QUOTA.
2722 * It translates the given interface name to the Modem MUX ID and
2723 * sends the request of the quota to the IPA Modem driver via QMI.
2724 *
2725 * Return codes:
2726 * 0: Success
Skylar Chang345c8142016-11-30 14:41:24 -08002727 * -EFAULT: Invalid src/dst pipes provided
Amir Levy9659e592016-10-27 18:08:27 +03002728 * other: See ipa_qmi_set_data_quota
2729 */
2730int rmnet_ipa_set_tether_client_pipe(
2731 struct wan_ioctl_set_tether_client_pipe *data)
2732{
2733 int number, i;
2734
Skylar Chang345c8142016-11-30 14:41:24 -08002735 /* error checking if ul_src_pipe_len valid or not*/
2736 if (data->ul_src_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
2737 data->ul_src_pipe_len < 0) {
2738 IPAWANERR("UL src pipes %d exceeding max %d\n",
2739 data->ul_src_pipe_len,
2740 QMI_IPA_MAX_PIPES_V01);
2741 return -EFAULT;
2742 }
2743 /* error checking if dl_dst_pipe_len valid or not*/
2744 if (data->dl_dst_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
2745 data->dl_dst_pipe_len < 0) {
2746 IPAWANERR("DL dst pipes %d exceeding max %d\n",
2747 data->dl_dst_pipe_len,
2748 QMI_IPA_MAX_PIPES_V01);
2749 return -EFAULT;
2750 }
2751
Amir Levy9659e592016-10-27 18:08:27 +03002752 IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
2753 data->ipa_client,
2754 data->ul_src_pipe_len,
2755 data->dl_dst_pipe_len,
2756 data->reset_client);
2757 number = data->ul_src_pipe_len;
2758 for (i = 0; i < number; i++) {
2759 IPAWANDBG("UL index-%d pipe %d\n", i,
2760 data->ul_src_pipe_list[i]);
2761 if (data->reset_client)
2762 ipa_set_client(data->ul_src_pipe_list[i],
2763 0, false);
2764 else
2765 ipa_set_client(data->ul_src_pipe_list[i],
2766 data->ipa_client, true);
2767 }
2768 number = data->dl_dst_pipe_len;
2769 for (i = 0; i < number; i++) {
2770 IPAWANDBG("DL index-%d pipe %d\n", i,
2771 data->dl_dst_pipe_list[i]);
2772 if (data->reset_client)
2773 ipa_set_client(data->dl_dst_pipe_list[i],
2774 0, false);
2775 else
2776 ipa_set_client(data->dl_dst_pipe_list[i],
2777 data->ipa_client, false);
2778 }
2779 return 0;
2780}
2781
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302782static int rmnet_ipa_query_tethering_stats_wifi(
2783 struct wan_ioctl_query_tether_stats *data, bool reset)
2784{
2785 struct ipa_get_wdi_sap_stats *sap_stats;
2786 int rc;
2787
2788 sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats),
2789 GFP_KERNEL);
2790 if (!sap_stats)
2791 return -ENOMEM;
2792
2793 sap_stats->reset_stats = reset;
2794 IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats);
2795
2796 rc = ipa2_get_wlan_stats(sap_stats);
2797 if (rc) {
2798 kfree(sap_stats);
2799 return rc;
2800 } else if (reset) {
2801 kfree(sap_stats);
2802 return 0;
2803 }
2804
2805 if (sap_stats->stats_valid) {
2806 data->ipv4_tx_packets = sap_stats->ipv4_tx_packets;
2807 data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes;
2808 data->ipv4_rx_packets = sap_stats->ipv4_rx_packets;
2809 data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes;
2810 data->ipv6_tx_packets = sap_stats->ipv6_tx_packets;
2811 data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes;
2812 data->ipv6_rx_packets = sap_stats->ipv6_rx_packets;
2813 data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes;
2814 }
2815
2816 IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2817 (unsigned long int) data->ipv4_rx_packets,
2818 (unsigned long int) data->ipv6_rx_packets,
2819 (unsigned long int) data->ipv4_rx_bytes,
2820 (unsigned long int) data->ipv6_rx_bytes);
2821 IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2822 (unsigned long int) data->ipv4_tx_packets,
2823 (unsigned long int) data->ipv6_tx_packets,
2824 (unsigned long int) data->ipv4_tx_bytes,
2825 (unsigned long int) data->ipv6_tx_bytes);
2826
2827 kfree(sap_stats);
2828 return rc;
2829}
2830
2831int rmnet_ipa_query_tethering_stats_modem(
2832 struct wan_ioctl_query_tether_stats *data,
2833 bool reset
2834)
Amir Levy9659e592016-10-27 18:08:27 +03002835{
2836 struct ipa_get_data_stats_req_msg_v01 *req;
2837 struct ipa_get_data_stats_resp_msg_v01 *resp;
2838 int pipe_len, rc;
2839
2840 req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
2841 GFP_KERNEL);
2842 if (!req) {
2843 IPAWANERR("failed to allocate memory for stats message\n");
2844 return -ENOMEM;
2845 }
2846 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2847 GFP_KERNEL);
2848 if (!resp) {
2849 IPAWANERR("failed to allocate memory for stats message\n");
2850 kfree(req);
2851 return -ENOMEM;
2852 }
2853 memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2854 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2855
2856 req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2857 if (reset) {
2858 req->reset_stats_valid = true;
2859 req->reset_stats = true;
2860 IPAWANERR("reset the pipe stats\n");
2861 } else {
2862 /* print tethered-client enum */
2863 IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
2864 }
2865
2866 rc = ipa_qmi_get_data_stats(req, resp);
2867 if (rc) {
2868 IPAWANERR("can't get ipa_qmi_get_data_stats\n");
2869 kfree(req);
2870 kfree(resp);
2871 return rc;
Mohammed Javid2cee34a2017-06-14 12:40:34 +05302872 } else if (data == NULL) {
2873 kfree(req);
2874 kfree(resp);
2875 return 0;
Amir Levy9659e592016-10-27 18:08:27 +03002876 }
2877
2878 if (resp->dl_dst_pipe_stats_list_valid) {
2879 for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len;
2880 pipe_len++) {
2881 IPAWANDBG("Check entry(%d) dl_dst_pipe(%d)\n",
2882 pipe_len, resp->dl_dst_pipe_stats_list
2883 [pipe_len].pipe_index);
2884 IPAWANDBG("dl_p_v4(%lu)v6(%lu) dl_b_v4(%lu)v6(%lu)\n",
2885 (unsigned long int) resp->
2886 dl_dst_pipe_stats_list[pipe_len].
2887 num_ipv4_packets,
2888 (unsigned long int) resp->
2889 dl_dst_pipe_stats_list[pipe_len].
2890 num_ipv6_packets,
2891 (unsigned long int) resp->
2892 dl_dst_pipe_stats_list[pipe_len].
2893 num_ipv4_bytes,
2894 (unsigned long int) resp->
2895 dl_dst_pipe_stats_list[pipe_len].
2896 num_ipv6_bytes);
2897 if (ipa_get_client_uplink(resp->
2898 dl_dst_pipe_stats_list[pipe_len].
2899 pipe_index) == false) {
2900 if (data->ipa_client == ipa_get_client(resp->
2901 dl_dst_pipe_stats_list[pipe_len].
2902 pipe_index)) {
2903 /* update the DL stats */
2904 data->ipv4_rx_packets += resp->
2905 dl_dst_pipe_stats_list[pipe_len].
2906 num_ipv4_packets;
2907 data->ipv6_rx_packets += resp->
2908 dl_dst_pipe_stats_list[pipe_len].
2909 num_ipv6_packets;
2910 data->ipv4_rx_bytes += resp->
2911 dl_dst_pipe_stats_list[pipe_len].
2912 num_ipv4_bytes;
2913 data->ipv6_rx_bytes += resp->
2914 dl_dst_pipe_stats_list[pipe_len].
2915 num_ipv6_bytes;
2916 }
2917 }
2918 }
2919 }
2920 IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2921 (unsigned long int) data->ipv4_rx_packets,
2922 (unsigned long int) data->ipv6_rx_packets,
2923 (unsigned long int) data->ipv4_rx_bytes,
2924 (unsigned long int) data->ipv6_rx_bytes);
2925
2926 if (resp->ul_src_pipe_stats_list_valid) {
2927 for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len;
2928 pipe_len++) {
2929 IPAWANDBG("Check entry(%d) ul_dst_pipe(%d)\n",
2930 pipe_len,
2931 resp->ul_src_pipe_stats_list[pipe_len].
2932 pipe_index);
2933 IPAWANDBG("ul_p_v4(%lu)v6(%lu)ul_b_v4(%lu)v6(%lu)\n",
2934 (unsigned long int) resp->
2935 ul_src_pipe_stats_list[pipe_len].
2936 num_ipv4_packets,
2937 (unsigned long int) resp->
2938 ul_src_pipe_stats_list[pipe_len].
2939 num_ipv6_packets,
2940 (unsigned long int) resp->
2941 ul_src_pipe_stats_list[pipe_len].
2942 num_ipv4_bytes,
2943 (unsigned long int) resp->
2944 ul_src_pipe_stats_list[pipe_len].
2945 num_ipv6_bytes);
2946 if (ipa_get_client_uplink(resp->
2947 ul_src_pipe_stats_list[pipe_len].
2948 pipe_index) == true) {
2949 if (data->ipa_client == ipa_get_client(resp->
2950 ul_src_pipe_stats_list[pipe_len].
2951 pipe_index)) {
2952 /* update the DL stats */
2953 data->ipv4_tx_packets += resp->
2954 ul_src_pipe_stats_list[pipe_len].
2955 num_ipv4_packets;
2956 data->ipv6_tx_packets += resp->
2957 ul_src_pipe_stats_list[pipe_len].
2958 num_ipv6_packets;
2959 data->ipv4_tx_bytes += resp->
2960 ul_src_pipe_stats_list[pipe_len].
2961 num_ipv4_bytes;
2962 data->ipv6_tx_bytes += resp->
2963 ul_src_pipe_stats_list[pipe_len].
2964 num_ipv6_bytes;
2965 }
2966 }
2967 }
2968 }
2969 IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2970 (unsigned long int) data->ipv4_tx_packets,
2971 (unsigned long int) data->ipv6_tx_packets,
2972 (unsigned long int) data->ipv4_tx_bytes,
2973 (unsigned long int) data->ipv6_tx_bytes);
2974 kfree(req);
2975 kfree(resp);
2976 return 0;
2977}
2978
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302979int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
2980 bool reset)
2981{
2982 enum ipa_upstream_type upstream_type;
2983 int rc = 0;
2984
2985 /* get IPA backhaul type */
2986 upstream_type = find_upstream_type(data->upstreamIface);
2987
2988 if (upstream_type == IPA_UPSTEAM_MAX) {
2989 IPAWANERR("upstreamIface %s not supported\n",
2990 data->upstreamIface);
2991 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2992 IPAWANDBG_LOW(" query wifi-backhaul stats\n");
2993 rc = rmnet_ipa_query_tethering_stats_wifi(
2994 data, false);
2995 if (rc) {
2996 IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
2997 return rc;
2998 }
2999 } else {
3000 IPAWANDBG_LOW(" query modem-backhaul stats\n");
3001 rc = rmnet_ipa_query_tethering_stats_modem(
3002 data, false);
3003 if (rc) {
3004 IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
3005 return rc;
3006 }
3007 }
3008 return rc;
3009}
3010
Skylar Chang09e0e252017-03-20 14:51:29 -07003011int rmnet_ipa_query_tethering_stats_all(
3012 struct wan_ioctl_query_tether_stats_all *data)
3013{
3014 struct wan_ioctl_query_tether_stats tether_stats;
3015 enum ipa_upstream_type upstream_type;
3016 int rc = 0;
3017
3018 memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
3019 /* get IPA backhaul type */
3020 upstream_type = find_upstream_type(data->upstreamIface);
3021
3022 if (upstream_type == IPA_UPSTEAM_MAX) {
3023 IPAWANERR(" Wrong upstreamIface name %s\n",
3024 data->upstreamIface);
3025 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
3026 IPAWANDBG_LOW(" query wifi-backhaul stats\n");
3027 rc = rmnet_ipa_query_tethering_stats_wifi(
3028 &tether_stats, data->reset_stats);
3029 if (rc) {
3030 IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
3031 return rc;
3032 }
3033 data->tx_bytes = tether_stats.ipv4_tx_bytes
3034 + tether_stats.ipv6_tx_bytes;
3035 data->rx_bytes = tether_stats.ipv4_rx_bytes
3036 + tether_stats.ipv6_rx_bytes;
3037 } else {
3038 IPAWANDBG_LOW(" query modem-backhaul stats\n");
3039 tether_stats.ipa_client = data->ipa_client;
3040 rc = rmnet_ipa_query_tethering_stats_modem(
3041 &tether_stats, data->reset_stats);
3042 if (rc) {
3043 IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
3044 return rc;
3045 }
3046 data->tx_bytes = tether_stats.ipv4_tx_bytes
3047 + tether_stats.ipv6_tx_bytes;
3048 data->rx_bytes = tether_stats.ipv4_rx_bytes
3049 + tether_stats.ipv6_rx_bytes;
3050 }
3051 return rc;
3052}
3053
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303054int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
3055{
3056 enum ipa_upstream_type upstream_type;
Mohammed Javid2cee34a2017-06-14 12:40:34 +05303057 struct wan_ioctl_query_tether_stats tether_stats;
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303058 int rc = 0;
3059
Mohammed Javid2cee34a2017-06-14 12:40:34 +05303060 memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
3061
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303062 /* get IPA backhaul type */
3063 upstream_type = find_upstream_type(data->upstreamIface);
3064
3065 if (upstream_type == IPA_UPSTEAM_MAX) {
3066 IPAWANERR("upstream iface %s not supported\n",
3067 data->upstreamIface);
3068 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
3069 IPAWANDBG(" reset wifi-backhaul stats\n");
3070 rc = rmnet_ipa_query_tethering_stats_wifi(
3071 NULL, true);
3072 if (rc) {
3073 IPAWANERR("reset WLAN stats failed\n");
3074 return rc;
3075 }
3076 } else {
3077 IPAWANDBG(" reset modem-backhaul stats\n");
3078 rc = rmnet_ipa_query_tethering_stats_modem(
Mohammed Javid2cee34a2017-06-14 12:40:34 +05303079 &tether_stats, true);
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303080 if (rc) {
3081 IPAWANERR("reset MODEM stats failed\n");
3082 return rc;
3083 }
3084 }
3085 return rc;
3086}
3087
3088
Amir Levy9659e592016-10-27 18:08:27 +03003089/**
3090 * ipa_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
3091 * @mux_id - The MUX ID on which the quota has been reached
3092 *
3093 * This function broadcasts a Netlink event using the kobject of the
3094 * rmnet_ipa interface in order to alert the user space that the quota
3095 * on the specific interface which matches the mux_id has been reached.
3096 *
3097 */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303098void ipa_broadcast_quota_reach_ind(u32 mux_id,
3099 enum ipa_upstream_type upstream_type)
Amir Levy9659e592016-10-27 18:08:27 +03003100{
3101 char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
3102 char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
3103 char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
3104 char *envp[IPA_UEVENT_NUM_EVNP] = {
3105 alert_msg, iface_name_l, iface_name_m, NULL };
3106 int res;
3107 int index;
3108
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303109 /* check upstream_type*/
3110 if (upstream_type == IPA_UPSTEAM_MAX) {
3111 IPAWANERR("upstreamIface type %d not supported\n",
3112 upstream_type);
Amir Levy9659e592016-10-27 18:08:27 +03003113 return;
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303114 } else if (upstream_type == IPA_UPSTEAM_MODEM) {
3115 index = find_mux_channel_index(mux_id);
3116 if (index == MAX_NUM_OF_MUX_CHANNEL) {
3117 IPAWANERR("%u is an mux ID\n", mux_id);
3118 return;
3119 }
Amir Levy9659e592016-10-27 18:08:27 +03003120 }
3121
3122 res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
3123 "ALERT_NAME=%s", "quotaReachedAlert");
3124 if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
3125 IPAWANERR("message too long (%d)", res);
3126 return;
3127 }
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303128
Amir Levy9659e592016-10-27 18:08:27 +03003129 /* posting msg for L-release for CNE */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303130 if (upstream_type == IPA_UPSTEAM_MODEM) {
Amir Levy9659e592016-10-27 18:08:27 +03003131 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303132 "UPSTREAM=%s", mux_channel[index].vchannel_name);
3133 } else {
3134 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
3135 "UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
3136 }
Amir Levy9659e592016-10-27 18:08:27 +03003137 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
3138 IPAWANERR("message too long (%d)", res);
3139 return;
3140 }
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303141
Amir Levy9659e592016-10-27 18:08:27 +03003142 /* posting msg for M-release for CNE */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303143 if (upstream_type == IPA_UPSTEAM_MODEM) {
Amir Levy9659e592016-10-27 18:08:27 +03003144 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303145 "INTERFACE=%s", mux_channel[index].vchannel_name);
3146 } else {
3147 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
3148 "INTERFACE=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
3149 }
Amir Levy9659e592016-10-27 18:08:27 +03003150 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
3151 IPAWANERR("message too long (%d)", res);
3152 return;
3153 }
3154
3155 IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
3156 alert_msg, iface_name_l, iface_name_m);
3157 kobject_uevent_env(&(ipa_netdevs[0]->dev.kobj), KOBJ_CHANGE, envp);
Skylar Chang09e0e252017-03-20 14:51:29 -07003158
3159 rmnet_ipa_send_quota_reach_ind();
Amir Levy9659e592016-10-27 18:08:27 +03003160}
3161
3162/**
3163 * ipa_q6_handshake_complete() - Perform operations once Q6 is up
3164 * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR.
3165 *
3166 * This function is invoked once the handshake between the IPA AP driver
3167 * and IPA Q6 driver is complete. At this point, it is possible to perform
3168 * operations which can't be performed until IPA Q6 driver is up.
3169 *
3170 */
3171void ipa_q6_handshake_complete(bool ssr_bootup)
3172{
3173 /* It is required to recover the network stats after SSR recovery */
3174 if (ssr_bootup) {
3175 /*
3176 * In case the uC is required to be loaded by the Modem,
3177 * the proxy vote will be removed only when uC loading is
3178 * complete and indication is received by the AP. After SSR,
3179 * uC is already loaded. Therefore, proxy vote can be removed
3180 * once Modem init is complete.
3181 */
3182 ipa2_proxy_clk_unvote();
3183
Skylar Chang09e0e252017-03-20 14:51:29 -07003184 /* send SSR power-up notification to IPACM */
3185 rmnet_ipa_send_ssr_notification(true);
3186
Amir Levy9659e592016-10-27 18:08:27 +03003187 /*
3188 * It is required to recover the network stats after
3189 * SSR recovery
3190 */
3191 rmnet_ipa_get_network_stats_and_update();
3192
3193 /* Enable holb monitoring on Q6 pipes. */
3194 ipa_q6_monitor_holb_mitigation(true);
3195 }
3196}
3197
3198static int __init ipa_wwan_init(void)
3199{
3200 atomic_set(&is_initialized, 0);
3201 atomic_set(&is_ssr, 0);
3202
3203 mutex_init(&ipa_to_apps_pipe_handle_guard);
Skylar Chang8438ba52017-03-15 21:27:35 -07003204 mutex_init(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03003205 ipa_to_apps_hdl = -1;
3206
3207 ipa_qmi_init();
3208
3209 /* Register for Modem SSR */
3210 subsys_notify_handle = subsys_notif_register_notifier(SUBSYS_MODEM,
3211 &ssr_notifier);
3212 if (!IS_ERR(subsys_notify_handle))
3213 return platform_driver_register(&rmnet_ipa_driver);
3214 else
3215 return (int)PTR_ERR(subsys_notify_handle);
3216}
3217
3218static void __exit ipa_wwan_cleanup(void)
3219{
3220 int ret;
3221
3222 ipa_qmi_cleanup();
3223 mutex_destroy(&ipa_to_apps_pipe_handle_guard);
Skylar Chang8438ba52017-03-15 21:27:35 -07003224 mutex_destroy(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03003225 ret = subsys_notif_unregister_notifier(subsys_notify_handle,
3226 &ssr_notifier);
3227 if (ret)
3228 IPAWANERR(
3229 "Error subsys_notif_unregister_notifier system %s, ret=%d\n",
3230 SUBSYS_MODEM, ret);
3231 platform_driver_unregister(&rmnet_ipa_driver);
3232}
3233
3234static void ipa_wwan_msg_free_cb(void *buff, u32 len, u32 type)
3235{
3236 if (!buff)
3237 IPAWANERR("Null buffer.\n");
3238 kfree(buff);
3239}
3240
3241static void ipa_rmnet_rx_cb(void *priv)
3242{
3243 struct net_device *dev = priv;
3244 struct wwan_private *wwan_ptr;
3245
3246 IPAWANDBG("\n");
3247
3248 if (dev != ipa_netdevs[0]) {
3249 IPAWANERR("Not matching with netdev\n");
3250 return;
3251 }
3252
3253 wwan_ptr = netdev_priv(dev);
3254 napi_schedule(&(wwan_ptr->napi));
3255}
3256
3257static int ipa_rmnet_poll(struct napi_struct *napi, int budget)
3258{
3259 int rcvd_pkts = 0;
3260
3261 rcvd_pkts = ipa_rx_poll(ipa_to_apps_hdl, NAPI_WEIGHT);
3262 IPAWANDBG("rcvd packets: %d\n", rcvd_pkts);
3263 return rcvd_pkts;
3264}
3265
3266late_initcall(ipa_wwan_init);
3267module_exit(ipa_wwan_cleanup);
3268MODULE_DESCRIPTION("WWAN Network Interface");
3269MODULE_LICENSE("GPL v2");