blob: 217f49c3a796128538023393e6a10c594c0f1a49 [file] [log] [blame]
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * WWAN Transport Network Driver.
15 */
16
17#include <linux/completion.h>
18#include <linux/errno.h>
19#include <linux/if_arp.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/of_device.h>
26#include <linux/string.h>
27#include <linux/skbuff.h>
28#include <linux/version.h>
29#include <linux/workqueue.h>
30#include <net/pkt_sched.h>
31#include <soc/qcom/subsystem_restart.h>
32#include <soc/qcom/subsystem_notif.h>
33#include "ipa_qmi_service.h"
34#include <linux/rmnet_ipa_fd_ioctl.h>
35#include <linux/ipa.h>
36#include <uapi/linux/net_map.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020037#include <uapi/linux/msm_rmnet.h>
38#include <net/rmnet_config.h>
Amir Levy9659e592016-10-27 18:08:27 +030039
40#include "ipa_trace.h"
41
42#define WWAN_METADATA_SHFT 24
43#define WWAN_METADATA_MASK 0xFF000000
44#define WWAN_DATA_LEN 2000
45#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
46#define HEADROOM_FOR_QMAP 8 /* for mux header */
47#define TAILROOM 0 /* for padding by mux layer */
48#define MAX_NUM_OF_MUX_CHANNEL 10 /* max mux channels */
49#define UL_FILTER_RULE_HANDLE_START 69
50#define DEFAULT_OUTSTANDING_HIGH_CTL 96
51#define DEFAULT_OUTSTANDING_HIGH 64
52#define DEFAULT_OUTSTANDING_LOW 32
53
54#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +053055#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0"
56
Amir Levy9659e592016-10-27 18:08:27 +030057#define IPA_WWAN_DEVICE_COUNT (1)
58
59#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
60
61#define INVALID_MUX_ID 0xFF
62#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
63#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
64#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
65
66#define NAPI_WEIGHT 60
Sunil Paidimarri226cf032016-10-14 13:33:08 -070067#define IPA_WWAN_CONS_DESC_FIFO_SZ 1024
Amir Levy9659e592016-10-27 18:08:27 +030068
69static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
70static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
71static u32 qmap_hdr_hdl, dflt_v4_wan_rt_hdl, dflt_v6_wan_rt_hdl;
72static struct rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
73static int num_q6_rule, old_num_q6_rule;
74static int rmnet_index;
75static bool egress_set, a7_ul_flt_set;
76static struct workqueue_struct *ipa_rm_q6_workqueue; /* IPA_RM workqueue*/
77static atomic_t is_initialized;
78static atomic_t is_ssr;
79static void *subsys_notify_handle;
80
81u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */
82static struct mutex ipa_to_apps_pipe_handle_guard;
Skylar Chang8438ba52017-03-15 21:27:35 -070083static struct mutex add_mux_channel_lock;
Amir Levy9659e592016-10-27 18:08:27 +030084static int wwan_add_ul_flt_rule_to_ipa(void);
85static int wwan_del_ul_flt_rule_to_ipa(void);
86static void ipa_wwan_msg_free_cb(void*, u32, u32);
87static void ipa_rmnet_rx_cb(void *priv);
88static int ipa_rmnet_poll(struct napi_struct *napi, int budget);
89
90static void wake_tx_queue(struct work_struct *work);
91static DECLARE_WORK(ipa_tx_wakequeue_work, wake_tx_queue);
92
93static void tethering_stats_poll_queue(struct work_struct *work);
94static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work,
95 tethering_stats_poll_queue);
96
97enum wwan_device_status {
98 WWAN_DEVICE_INACTIVE = 0,
99 WWAN_DEVICE_ACTIVE = 1
100};
101
102struct ipa_rmnet_plat_drv_res {
103 bool ipa_rmnet_ssr;
104 bool ipa_loaduC;
105 bool ipa_advertise_sg_support;
106 bool ipa_napi_enable;
Sunil Paidimarri226cf032016-10-14 13:33:08 -0700107 u32 wan_rx_desc_size;
Amir Levy9659e592016-10-27 18:08:27 +0300108};
109
110static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
111/**
112 * struct wwan_private - WWAN private data
113 * @net: network interface struct implemented by this driver
114 * @stats: iface statistics
115 * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
116 * @outstanding_high: number of outstanding packets allowed
117 * @outstanding_low: number of outstanding packets which shall cause
118 * @ch_id: channel id
119 * @lock: spinlock for mutual exclusion
120 * @device_status: holds device status
121 *
122 * WWAN private - holds all relevant info about WWAN driver
123 */
124struct wwan_private {
125 struct net_device *net;
126 struct net_device_stats stats;
127 atomic_t outstanding_pkts;
128 int outstanding_high_ctl;
129 int outstanding_high;
130 int outstanding_low;
131 uint32_t ch_id;
132 spinlock_t lock;
133 struct completion resource_granted_completion;
134 enum wwan_device_status device_status;
135 struct napi_struct napi;
136};
137
138/**
139* ipa_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
140*
141* Return codes:
142* 0: success
143* -ENOMEM: failed to allocate memory
144* -EPERM: failed to add the tables
145*/
146static int ipa_setup_a7_qmap_hdr(void)
147{
148 struct ipa_ioc_add_hdr *hdr;
149 struct ipa_hdr_add *hdr_entry;
150 u32 pyld_sz;
151 int ret;
152
153 /* install the basic exception header */
154 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
155 sizeof(struct ipa_hdr_add);
156 hdr = kzalloc(pyld_sz, GFP_KERNEL);
157 if (!hdr) {
158 IPAWANERR("fail to alloc exception hdr\n");
159 return -ENOMEM;
160 }
161 hdr->num_hdrs = 1;
162 hdr->commit = 1;
163 hdr_entry = &hdr->hdr[0];
164
165 strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
166 IPA_RESOURCE_NAME_MAX);
167 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
168
169 if (ipa2_add_hdr(hdr)) {
170 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
171 ret = -EPERM;
172 goto bail;
173 }
174
175 if (hdr_entry->status) {
176 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
177 ret = -EPERM;
178 goto bail;
179 }
180 qmap_hdr_hdl = hdr_entry->hdr_hdl;
181
182 ret = 0;
183bail:
184 kfree(hdr);
185 return ret;
186}
187
188static void ipa_del_a7_qmap_hdr(void)
189{
190 struct ipa_ioc_del_hdr *del_hdr;
191 struct ipa_hdr_del *hdl_entry;
192 u32 pyld_sz;
193 int ret;
194
195 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
196 sizeof(struct ipa_hdr_del);
197 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
198 if (!del_hdr) {
199 IPAWANERR("fail to alloc exception hdr_del\n");
200 return;
201 }
202
203 del_hdr->commit = 1;
204 del_hdr->num_hdls = 1;
205 hdl_entry = &del_hdr->hdl[0];
206 hdl_entry->hdl = qmap_hdr_hdl;
207
208 ret = ipa2_del_hdr(del_hdr);
209 if (ret || hdl_entry->status)
210 IPAWANERR("ipa2_del_hdr failed\n");
211 else
212 IPAWANDBG("hdrs deletion done\n");
213
214 qmap_hdr_hdl = 0;
215 kfree(del_hdr);
216}
217
218static void ipa_del_qmap_hdr(uint32_t hdr_hdl)
219{
220 struct ipa_ioc_del_hdr *del_hdr;
221 struct ipa_hdr_del *hdl_entry;
222 u32 pyld_sz;
223 int ret;
224
225 if (hdr_hdl == 0) {
226 IPAWANERR("Invalid hdr_hdl provided\n");
227 return;
228 }
229
230 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
231 sizeof(struct ipa_hdr_del);
232 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
233 if (!del_hdr) {
234 IPAWANERR("fail to alloc exception hdr_del\n");
235 return;
236 }
237
238 del_hdr->commit = 1;
239 del_hdr->num_hdls = 1;
240 hdl_entry = &del_hdr->hdl[0];
241 hdl_entry->hdl = hdr_hdl;
242
243 ret = ipa2_del_hdr(del_hdr);
244 if (ret || hdl_entry->status)
245 IPAWANERR("ipa2_del_hdr failed\n");
246 else
247 IPAWANDBG("header deletion done\n");
248
249 qmap_hdr_hdl = 0;
250 kfree(del_hdr);
251}
252
253static void ipa_del_mux_qmap_hdrs(void)
254{
255 int index;
256
257 for (index = 0; index < rmnet_index; index++) {
258 ipa_del_qmap_hdr(mux_channel[index].hdr_hdl);
259 mux_channel[index].hdr_hdl = 0;
260 }
261}
262
263static int ipa_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
264{
265 struct ipa_ioc_add_hdr *hdr;
266 struct ipa_hdr_add *hdr_entry;
267 char hdr_name[IPA_RESOURCE_NAME_MAX];
268 u32 pyld_sz;
269 int ret;
270
271 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
272 sizeof(struct ipa_hdr_add);
273 hdr = kzalloc(pyld_sz, GFP_KERNEL);
274 if (!hdr) {
275 IPAWANERR("fail to alloc exception hdr\n");
276 return -ENOMEM;
277 }
278 hdr->num_hdrs = 1;
279 hdr->commit = 1;
280 hdr_entry = &hdr->hdr[0];
281
282 snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
283 A2_MUX_HDR_NAME_V4_PREF,
284 mux_id);
285 strlcpy(hdr_entry->name, hdr_name,
286 IPA_RESOURCE_NAME_MAX);
287
288 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
289 hdr_entry->hdr[1] = (uint8_t) mux_id;
290 IPAWANDBG("header (%s) with mux-id: (%d)\n",
291 hdr_name,
292 hdr_entry->hdr[1]);
293 if (ipa2_add_hdr(hdr)) {
294 IPAWANERR("fail to add IPA_QMAP hdr\n");
295 ret = -EPERM;
296 goto bail;
297 }
298
299 if (hdr_entry->status) {
300 IPAWANERR("fail to add IPA_QMAP hdr\n");
301 ret = -EPERM;
302 goto bail;
303 }
304
305 ret = 0;
306 *hdr_hdl = hdr_entry->hdr_hdl;
307bail:
308 kfree(hdr);
309 return ret;
310}
311
312/**
313* ipa_setup_dflt_wan_rt_tables() - Setup default wan routing tables
314*
315* Return codes:
316* 0: success
317* -ENOMEM: failed to allocate memory
318* -EPERM: failed to add the tables
319*/
320static int ipa_setup_dflt_wan_rt_tables(void)
321{
322 struct ipa_ioc_add_rt_rule *rt_rule;
323 struct ipa_rt_rule_add *rt_rule_entry;
324
325 rt_rule =
326 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
327 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
328 if (!rt_rule) {
329 IPAWANERR("fail to alloc mem\n");
330 return -ENOMEM;
331 }
332 /* setup a default v4 route to point to Apps */
333 rt_rule->num_rules = 1;
334 rt_rule->commit = 1;
335 rt_rule->ip = IPA_IP_v4;
336 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
337 IPA_RESOURCE_NAME_MAX);
338
339 rt_rule_entry = &rt_rule->rules[0];
340 rt_rule_entry->at_rear = 1;
341 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
342 rt_rule_entry->rule.hdr_hdl = qmap_hdr_hdl;
343
344 if (ipa2_add_rt_rule(rt_rule)) {
345 IPAWANERR("fail to add dflt_wan v4 rule\n");
346 kfree(rt_rule);
347 return -EPERM;
348 }
349
350 IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
351 dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
352
353 /* setup a default v6 route to point to A5 */
354 rt_rule->ip = IPA_IP_v6;
355 if (ipa2_add_rt_rule(rt_rule)) {
356 IPAWANERR("fail to add dflt_wan v6 rule\n");
357 kfree(rt_rule);
358 return -EPERM;
359 }
360 IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
361 dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
362
363 kfree(rt_rule);
364 return 0;
365}
366
367static void ipa_del_dflt_wan_rt_tables(void)
368{
369 struct ipa_ioc_del_rt_rule *rt_rule;
370 struct ipa_rt_rule_del *rt_rule_entry;
371 int len;
372
373 len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
374 sizeof(struct ipa_rt_rule_del);
375 rt_rule = kzalloc(len, GFP_KERNEL);
376 if (!rt_rule) {
377 IPAWANERR("unable to allocate memory for del route rule\n");
378 return;
379 }
380
381 memset(rt_rule, 0, len);
382 rt_rule->commit = 1;
383 rt_rule->num_hdls = 1;
384 rt_rule->ip = IPA_IP_v4;
385
386 rt_rule_entry = &rt_rule->hdl[0];
387 rt_rule_entry->status = -1;
388 rt_rule_entry->hdl = dflt_v4_wan_rt_hdl;
389
390 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
391 rt_rule_entry->hdl, IPA_IP_v4);
392 if (ipa2_del_rt_rule(rt_rule) ||
393 (rt_rule_entry->status)) {
394 IPAWANERR("Routing rule deletion failed!\n");
395 }
396
397 rt_rule->ip = IPA_IP_v6;
398 rt_rule_entry->hdl = dflt_v6_wan_rt_hdl;
399 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
400 rt_rule_entry->hdl, IPA_IP_v6);
401 if (ipa2_del_rt_rule(rt_rule) ||
402 (rt_rule_entry->status)) {
403 IPAWANERR("Routing rule deletion failed!\n");
404 }
405
406 kfree(rt_rule);
407}
408
409int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
410 *rule_req, uint32_t *rule_hdl)
411{
412 int i, j;
413
Skylar Chang441cc5e2017-08-11 15:49:21 -0700414 /* prevent multi-threads accessing num_q6_rule */
415 mutex_lock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +0300416 if (rule_req->filter_spec_list_valid == true) {
417 num_q6_rule = rule_req->filter_spec_list_len;
418 IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule);
419 } else {
420 num_q6_rule = 0;
421 IPAWANERR("got no UL rules from modem\n");
Skylar Chang441cc5e2017-08-11 15:49:21 -0700422 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +0300423 return -EINVAL;
424 }
425
426 /* copy UL filter rules from Modem*/
427 for (i = 0; i < num_q6_rule; i++) {
428 /* check if rules overside the cache*/
429 if (i == MAX_NUM_Q6_RULE) {
430 IPAWANERR("Reaching (%d) max cache ",
431 MAX_NUM_Q6_RULE);
432 IPAWANERR(" however total (%d)\n",
433 num_q6_rule);
434 goto failure;
435 }
436 /* construct UL_filter_rule handler QMI use-cas */
437 ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl =
438 UL_FILTER_RULE_HANDLE_START + i;
439 rule_hdl[i] = ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
440 ipa_qmi_ctx->q6_ul_filter_rule[i].ip =
441 rule_req->filter_spec_list[i].ip_type;
442 ipa_qmi_ctx->q6_ul_filter_rule[i].action =
443 rule_req->filter_spec_list[i].filter_action;
444 if (rule_req->filter_spec_list[i].is_routing_table_index_valid
445 == true)
446 ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
447 rule_req->filter_spec_list[i].route_table_index;
448 if (rule_req->filter_spec_list[i].is_mux_id_valid == true)
449 ipa_qmi_ctx->q6_ul_filter_rule[i].mux_id =
450 rule_req->filter_spec_list[i].mux_id;
451 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap =
452 rule_req->filter_spec_list[i].filter_rule.
453 rule_eq_bitmap;
454 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present =
455 rule_req->filter_spec_list[i].filter_rule.
456 tos_eq_present;
457 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq =
458 rule_req->filter_spec_list[i].filter_rule.tos_eq;
459 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
460 protocol_eq_present = rule_req->filter_spec_list[i].
461 filter_rule.protocol_eq_present;
462 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq =
463 rule_req->filter_spec_list[i].filter_rule.
464 protocol_eq;
465
466 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
467 num_ihl_offset_range_16 = rule_req->filter_spec_list[i].
468 filter_rule.num_ihl_offset_range_16;
469 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
470 num_ihl_offset_range_16; j++) {
471 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
472 ihl_offset_range_16[j].offset = rule_req->
473 filter_spec_list[i].filter_rule.
474 ihl_offset_range_16[j].offset;
475 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
476 ihl_offset_range_16[j].range_low = rule_req->
477 filter_spec_list[i].filter_rule.
478 ihl_offset_range_16[j].range_low;
479 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
480 ihl_offset_range_16[j].range_high = rule_req->
481 filter_spec_list[i].filter_rule.
482 ihl_offset_range_16[j].range_high;
483 }
484 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 =
485 rule_req->filter_spec_list[i].filter_rule.
486 num_offset_meq_32;
487 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
488 num_offset_meq_32; j++) {
489 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
490 offset_meq_32[j].offset = rule_req->filter_spec_list[i].
491 filter_rule.offset_meq_32[j].offset;
492 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
493 offset_meq_32[j].mask = rule_req->filter_spec_list[i].
494 filter_rule.offset_meq_32[j].mask;
495 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
496 offset_meq_32[j].value = rule_req->filter_spec_list[i].
497 filter_rule.offset_meq_32[j].value;
498 }
499
500 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present =
501 rule_req->filter_spec_list[i].filter_rule.tc_eq_present;
502 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq =
503 rule_req->filter_spec_list[i].filter_rule.tc_eq;
504 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present =
505 rule_req->filter_spec_list[i].filter_rule.
506 flow_eq_present;
507 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq =
508 rule_req->filter_spec_list[i].filter_rule.flow_eq;
509 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
510 ihl_offset_eq_16_present = rule_req->filter_spec_list[i].
511 filter_rule.ihl_offset_eq_16_present;
512 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
513 ihl_offset_eq_16.offset = rule_req->filter_spec_list[i].
514 filter_rule.ihl_offset_eq_16.offset;
515 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
516 ihl_offset_eq_16.value = rule_req->filter_spec_list[i].
517 filter_rule.ihl_offset_eq_16.value;
518
519 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
520 ihl_offset_eq_32_present = rule_req->filter_spec_list[i].
521 filter_rule.ihl_offset_eq_32_present;
522 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
523 ihl_offset_eq_32.offset = rule_req->filter_spec_list[i].
524 filter_rule.ihl_offset_eq_32.offset;
525 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
526 ihl_offset_eq_32.value = rule_req->filter_spec_list[i].
527 filter_rule.ihl_offset_eq_32.value;
528
529 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
530 num_ihl_offset_meq_32 = rule_req->filter_spec_list[i].
531 filter_rule.num_ihl_offset_meq_32;
532 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].
533 eq_attrib.num_ihl_offset_meq_32; j++) {
534 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
535 ihl_offset_meq_32[j].offset = rule_req->
536 filter_spec_list[i].filter_rule.
537 ihl_offset_meq_32[j].offset;
538 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
539 ihl_offset_meq_32[j].mask = rule_req->
540 filter_spec_list[i].filter_rule.
541 ihl_offset_meq_32[j].mask;
542 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
543 ihl_offset_meq_32[j].value = rule_req->
544 filter_spec_list[i].filter_rule.
545 ihl_offset_meq_32[j].value;
546 }
547 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 =
548 rule_req->filter_spec_list[i].filter_rule.
549 num_offset_meq_128;
550 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
551 num_offset_meq_128; j++) {
552 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
553 offset_meq_128[j].offset = rule_req->
554 filter_spec_list[i].filter_rule.
555 offset_meq_128[j].offset;
556 memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
557 offset_meq_128[j].mask,
558 rule_req->filter_spec_list[i].
559 filter_rule.offset_meq_128[j].mask, 16);
560 memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
561 offset_meq_128[j].value, rule_req->
562 filter_spec_list[i].filter_rule.
563 offset_meq_128[j].value, 16);
564 }
565
566 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
567 metadata_meq32_present = rule_req->filter_spec_list[i].
568 filter_rule.metadata_meq32_present;
569 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
570 metadata_meq32.offset = rule_req->filter_spec_list[i].
571 filter_rule.metadata_meq32.offset;
572 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
573 metadata_meq32.mask = rule_req->filter_spec_list[i].
574 filter_rule.metadata_meq32.mask;
575 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.
576 value = rule_req->filter_spec_list[i].filter_rule.
577 metadata_meq32.value;
578 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
579 ipv4_frag_eq_present = rule_req->filter_spec_list[i].
580 filter_rule.ipv4_frag_eq_present;
581 }
582
583 if (rule_req->xlat_filter_indices_list_valid) {
584 if (rule_req->xlat_filter_indices_list_len > num_q6_rule) {
585 IPAWANERR("Number of xlat indices is not valid: %d\n",
586 rule_req->xlat_filter_indices_list_len);
587 goto failure;
588 }
589 IPAWANDBG("Receive %d XLAT indices: ",
590 rule_req->xlat_filter_indices_list_len);
591 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++)
592 IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]);
593 IPAWANDBG("\n");
594
595 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
596 if (rule_req->xlat_filter_indices_list[i]
597 >= num_q6_rule) {
598 IPAWANERR("Xlat rule idx is wrong: %d\n",
599 rule_req->xlat_filter_indices_list[i]);
600 goto failure;
601 } else {
602 ipa_qmi_ctx->q6_ul_filter_rule
603 [rule_req->xlat_filter_indices_list[i]]
604 .is_xlat_rule = 1;
605 IPAWANDBG("Rule %d is xlat rule\n",
606 rule_req->xlat_filter_indices_list[i]);
607 }
608 }
609 }
610 goto success;
611
612failure:
613 num_q6_rule = 0;
614 memset(ipa_qmi_ctx->q6_ul_filter_rule, 0,
615 sizeof(ipa_qmi_ctx->q6_ul_filter_rule));
Skylar Chang441cc5e2017-08-11 15:49:21 -0700616 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +0300617 return -EINVAL;
618
619success:
Skylar Chang441cc5e2017-08-11 15:49:21 -0700620 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +0300621 return 0;
622}
623
624static int wwan_add_ul_flt_rule_to_ipa(void)
625{
626 u32 pyld_sz;
627 int i, retval = 0;
628 int num_v4_rule = 0, num_v6_rule = 0;
629 struct ipa_ioc_add_flt_rule *param;
630 struct ipa_flt_rule_add flt_rule_entry;
631 struct ipa_fltr_installed_notif_req_msg_v01 *req;
632
633 if (ipa_qmi_ctx == NULL) {
634 IPAWANERR("ipa_qmi_ctx is NULL!\n");
635 return -EFAULT;
636 }
637
638 pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) +
639 sizeof(struct ipa_flt_rule_add);
640 param = kzalloc(pyld_sz, GFP_KERNEL);
641 if (!param)
642 return -ENOMEM;
643
644 req = (struct ipa_fltr_installed_notif_req_msg_v01 *)
645 kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
646 GFP_KERNEL);
647 if (!req) {
648 kfree(param);
649 return -ENOMEM;
650 }
651
Mohammed Javidbf4c8022017-08-07 23:15:48 +0530652 memset(req, 0, sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
653
Amir Levy9659e592016-10-27 18:08:27 +0300654 param->commit = 1;
655 param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD;
656 param->global = false;
657 param->num_rules = (uint8_t)1;
658
659 mutex_lock(&ipa_qmi_lock);
660 for (i = 0; i < num_q6_rule; i++) {
661 param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
662 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
663 flt_rule_entry.at_rear = true;
664 flt_rule_entry.rule.action =
665 ipa_qmi_ctx->q6_ul_filter_rule[i].action;
666 flt_rule_entry.rule.rt_tbl_idx
667 = ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx;
668 flt_rule_entry.rule.retain_hdr = true;
669
670 /* debug rt-hdl*/
671 IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n",
672 i, flt_rule_entry.rule.rt_tbl_idx);
673 flt_rule_entry.rule.eq_attrib_type = true;
674 memcpy(&(flt_rule_entry.rule.eq_attrib),
675 &ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib,
676 sizeof(struct ipa_ipfltri_rule_eq));
677 memcpy(&(param->rules[0]), &flt_rule_entry,
678 sizeof(struct ipa_flt_rule_add));
679 if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
680 retval = -EFAULT;
681 IPAWANERR("add A7 UL filter rule(%d) failed\n", i);
682 } else {
683 /* store the rule handler */
684 ipa_qmi_ctx->q6_ul_filter_rule_hdl[i] =
685 param->rules[0].flt_rule_hdl;
686 }
687 }
688 mutex_unlock(&ipa_qmi_lock);
689
690 /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
691 req->source_pipe_index =
692 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
693 req->install_status = QMI_RESULT_SUCCESS_V01;
694 req->filter_index_list_len = num_q6_rule;
695 mutex_lock(&ipa_qmi_lock);
696 for (i = 0; i < num_q6_rule; i++) {
697 if (ipa_qmi_ctx->q6_ul_filter_rule[i].ip == IPA_IP_v4) {
698 req->filter_index_list[i].filter_index = num_v4_rule;
699 num_v4_rule++;
700 } else {
701 req->filter_index_list[i].filter_index = num_v6_rule;
702 num_v6_rule++;
703 }
704 req->filter_index_list[i].filter_handle =
705 ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
706 }
707 mutex_unlock(&ipa_qmi_lock);
708 if (qmi_filter_notify_send(req)) {
709 IPAWANDBG("add filter rule index on A7-RX failed\n");
710 retval = -EFAULT;
711 }
712 old_num_q6_rule = num_q6_rule;
713 IPAWANDBG("add (%d) filter rule index on A7-RX\n",
714 old_num_q6_rule);
715 kfree(param);
716 kfree(req);
717 return retval;
718}
719
720static int wwan_del_ul_flt_rule_to_ipa(void)
721{
722 u32 pyld_sz;
723 int i, retval = 0;
724 struct ipa_ioc_del_flt_rule *param;
725 struct ipa_flt_rule_del flt_rule_entry;
726
727 pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) +
728 sizeof(struct ipa_flt_rule_del);
729 param = kzalloc(pyld_sz, GFP_KERNEL);
730 if (!param) {
731 IPAWANERR("kzalloc failed\n");
732 return -ENOMEM;
733 }
734
735 param->commit = 1;
736 param->num_hdls = (uint8_t) 1;
737
738 for (i = 0; i < old_num_q6_rule; i++) {
739 param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
740 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
741 flt_rule_entry.hdl = ipa_qmi_ctx->q6_ul_filter_rule_hdl[i];
742 /* debug rt-hdl*/
743 IPAWANDBG("delete-IPA rule index(%d)\n", i);
744 memcpy(&(param->hdl[0]), &flt_rule_entry,
745 sizeof(struct ipa_flt_rule_del));
746 if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
747 IPAWANERR("del A7 UL filter rule(%d) failed\n", i);
748 kfree(param);
749 return -EFAULT;
750 }
751 }
752
753 /* set UL filter-rule add-indication */
754 a7_ul_flt_set = false;
755 old_num_q6_rule = 0;
756
757 kfree(param);
758 return retval;
759}
760
761static int find_mux_channel_index(uint32_t mux_id)
762{
763 int i;
764
765 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
766 if (mux_id == mux_channel[i].mux_id)
767 return i;
768 }
769 return MAX_NUM_OF_MUX_CHANNEL;
770}
771
772static int find_vchannel_name_index(const char *vchannel_name)
773{
774 int i;
775
776 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
777 if (strcmp(mux_channel[i].vchannel_name, vchannel_name == 0))
778 return i;
779 }
780 return MAX_NUM_OF_MUX_CHANNEL;
781}
782
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +0530783static enum ipa_upstream_type find_upstream_type(const char *upstreamIface)
784{
785 int i;
786
787 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
788 if (strcmp(mux_channel[i].vchannel_name,
789 upstreamIface) == 0)
790 return IPA_UPSTEAM_MODEM;
791 }
792
793 if (strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0)
794 return IPA_UPSTEAM_WLAN;
795 else
796 return IPA_UPSTEAM_MAX;
797}
798
Amir Levy9659e592016-10-27 18:08:27 +0300799static int wwan_register_to_ipa(int index)
800{
801 struct ipa_tx_intf tx_properties = {0};
802 struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
803 struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
804 struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
805 struct ipa_rx_intf rx_properties = {0};
806 struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
807 struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
808 struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
809 struct ipa_ext_intf ext_properties = {0};
810 struct ipa_ioc_ext_intf_prop *ext_ioc_properties;
811 u32 pyld_sz;
812 int ret = 0, i;
813
814 IPAWANDBG("index(%d) device[%s]:\n", index,
815 mux_channel[index].vchannel_name);
816 if (!mux_channel[index].mux_hdr_set) {
817 ret = ipa_add_qmap_hdr(mux_channel[index].mux_id,
818 &mux_channel[index].hdr_hdl);
819 if (ret) {
820 IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
821 return ret;
822 }
823 mux_channel[index].mux_hdr_set = true;
824 }
825 tx_properties.prop = tx_ioc_properties;
826 tx_ipv4_property = &tx_properties.prop[0];
827 tx_ipv4_property->ip = IPA_IP_v4;
828 tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
829 snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
830 A2_MUX_HDR_NAME_V4_PREF,
831 mux_channel[index].mux_id);
832 tx_ipv6_property = &tx_properties.prop[1];
833 tx_ipv6_property->ip = IPA_IP_v6;
834 tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
835 /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
836 snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
837 A2_MUX_HDR_NAME_V4_PREF,
838 mux_channel[index].mux_id);
839 tx_properties.num_props = 2;
840
841 rx_properties.prop = rx_ioc_properties;
842 rx_ipv4_property = &rx_properties.prop[0];
843 rx_ipv4_property->ip = IPA_IP_v4;
844 rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
845 rx_ipv4_property->attrib.meta_data =
846 mux_channel[index].mux_id << WWAN_METADATA_SHFT;
847 rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
848 rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
849 rx_ipv6_property = &rx_properties.prop[1];
850 rx_ipv6_property->ip = IPA_IP_v6;
851 rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
852 rx_ipv6_property->attrib.meta_data =
853 mux_channel[index].mux_id << WWAN_METADATA_SHFT;
854 rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
855 rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
856 rx_properties.num_props = 2;
857
858 pyld_sz = num_q6_rule *
859 sizeof(struct ipa_ioc_ext_intf_prop);
860 ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
861 if (!ext_ioc_properties) {
862 IPAWANERR("Error allocate memory\n");
863 return -ENOMEM;
864 }
865
866 ext_properties.prop = ext_ioc_properties;
867 ext_properties.excp_pipe_valid = true;
868 ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
869 ext_properties.num_props = num_q6_rule;
870 for (i = 0; i < num_q6_rule; i++) {
871 memcpy(&(ext_properties.prop[i]),
872 &(ipa_qmi_ctx->q6_ul_filter_rule[i]),
873 sizeof(struct ipa_ioc_ext_intf_prop));
874 ext_properties.prop[i].mux_id = mux_channel[index].mux_id;
875 IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
876 ext_properties.prop[i].ip,
877 ext_properties.prop[i].rt_tbl_idx);
878 IPAWANDBG("action: %d mux:%d\n",
879 ext_properties.prop[i].action,
880 ext_properties.prop[i].mux_id);
881 }
882 ret = ipa2_register_intf_ext(mux_channel[index].
883 vchannel_name, &tx_properties,
884 &rx_properties, &ext_properties);
885 if (ret) {
886 IPAWANERR("[%s]:ipa2_register_intf failed %d\n",
887 mux_channel[index].vchannel_name, ret);
888 goto fail;
889 }
890 mux_channel[index].ul_flt_reg = true;
891fail:
892 kfree(ext_ioc_properties);
893 return ret;
894}
895
896static void ipa_cleanup_deregister_intf(void)
897{
898 int i;
899 int ret;
900
901 for (i = 0; i < rmnet_index; i++) {
902 if (mux_channel[i].ul_flt_reg) {
903 ret = ipa2_deregister_intf(
904 mux_channel[i].vchannel_name);
905 if (ret < 0) {
906 IPAWANERR("de-register device %s(%d) failed\n",
907 mux_channel[i].vchannel_name,
908 i);
909 return;
910 }
911 IPAWANDBG("de-register device %s(%d) success\n",
912 mux_channel[i].vchannel_name,
913 i);
914 }
915 mux_channel[i].ul_flt_reg = false;
916 }
917}
918
919int wwan_update_mux_channel_prop(void)
920{
921 int ret = 0, i;
922 /* install UL filter rules */
923 if (egress_set) {
924 if (ipa_qmi_ctx &&
925 ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
926 IPAWANDBG("setup UL filter rules\n");
927 if (a7_ul_flt_set) {
928 IPAWANDBG("del previous UL filter rules\n");
929 /* delete rule hdlers */
930 ret = wwan_del_ul_flt_rule_to_ipa();
931 if (ret) {
932 IPAWANERR("failed to del old rules\n");
933 return -EINVAL;
934 }
935 IPAWANDBG("deleted old UL rules\n");
936 }
937 ret = wwan_add_ul_flt_rule_to_ipa();
938 }
939 if (ret)
940 IPAWANERR("failed to install UL rules\n");
941 else
942 a7_ul_flt_set = true;
943 }
944 /* update Tx/Rx/Ext property */
945 IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
946 if (rmnet_index == 0) {
947 IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
948 return ret;
949 }
950
951 ipa_cleanup_deregister_intf();
952
953 for (i = 0; i < rmnet_index; i++) {
954 ret = wwan_register_to_ipa(i);
955 if (ret < 0) {
956 IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
957 mux_channel[i].vchannel_name,
958 mux_channel[i].mux_id,
959 i);
960 return -ENODEV;
961 }
962 IPAWANERR("dev(%s) has registered to IPA\n",
963 mux_channel[i].vchannel_name);
964 mux_channel[i].ul_flt_reg = true;
965 }
966 return ret;
967}
968
969#ifdef INIT_COMPLETION
970#define reinit_completion(x) INIT_COMPLETION(*(x))
971#endif /* INIT_COMPLETION */
972
973static int __ipa_wwan_open(struct net_device *dev)
974{
975 struct wwan_private *wwan_ptr = netdev_priv(dev);
976
977 IPAWANDBG("[%s] __wwan_open()\n", dev->name);
978 if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
979 reinit_completion(&wwan_ptr->resource_granted_completion);
980 wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
981
982 if (ipa_rmnet_res.ipa_napi_enable)
983 napi_enable(&(wwan_ptr->napi));
984 return 0;
985}
986
987/**
988 * wwan_open() - Opens the wwan network interface. Opens logical
989 * channel on A2 MUX driver and starts the network stack queue
990 *
991 * @dev: network device
992 *
993 * Return codes:
994 * 0: success
995 * -ENODEV: Error while opening logical channel on A2 MUX driver
996 */
997static int ipa_wwan_open(struct net_device *dev)
998{
999 int rc = 0;
1000
1001 IPAWANDBG("[%s] wwan_open()\n", dev->name);
1002 rc = __ipa_wwan_open(dev);
1003 if (rc == 0)
1004 netif_start_queue(dev);
1005 return rc;
1006}
1007
1008static int __ipa_wwan_close(struct net_device *dev)
1009{
1010 struct wwan_private *wwan_ptr = netdev_priv(dev);
1011 int rc = 0;
1012
1013 if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
1014 wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
1015 /* do not close wwan port once up, this causes
1016 * remote side to hang if tried to open again
1017 */
1018 reinit_completion(&wwan_ptr->resource_granted_completion);
1019 if (ipa_rmnet_res.ipa_napi_enable)
1020 napi_disable(&(wwan_ptr->napi));
1021 rc = ipa2_deregister_intf(dev->name);
1022 if (rc) {
1023 IPAWANERR("[%s]: ipa2_deregister_intf failed %d\n",
1024 dev->name, rc);
1025 return rc;
1026 }
1027 return rc;
1028 } else {
1029 return -EBADF;
1030 }
1031}
1032
1033/**
1034 * ipa_wwan_stop() - Stops the wwan network interface. Closes
1035 * logical channel on A2 MUX driver and stops the network stack
1036 * queue
1037 *
1038 * @dev: network device
1039 *
1040 * Return codes:
1041 * 0: success
1042 * -ENODEV: Error while opening logical channel on A2 MUX driver
1043 */
1044static int ipa_wwan_stop(struct net_device *dev)
1045{
1046 IPAWANDBG("[%s] ipa_wwan_stop()\n", dev->name);
1047 __ipa_wwan_close(dev);
1048 netif_stop_queue(dev);
1049 return 0;
1050}
1051
1052static int ipa_wwan_change_mtu(struct net_device *dev, int new_mtu)
1053{
1054 if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
1055 return -EINVAL;
1056 IPAWANDBG("[%s] MTU change: old=%d new=%d\n",
1057 dev->name, dev->mtu, new_mtu);
1058 dev->mtu = new_mtu;
1059 return 0;
1060}
1061
1062/**
1063 * ipa_wwan_xmit() - Transmits an skb.
1064 *
1065 * @skb: skb to be transmitted
1066 * @dev: network device
1067 *
1068 * Return codes:
1069 * 0: success
1070 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1071 * later
1072 * -EFAULT: Error while transmitting the skb
1073 */
1074static int ipa_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
1075{
1076 int ret = 0;
1077 bool qmap_check;
1078 struct wwan_private *wwan_ptr = netdev_priv(dev);
1079 struct ipa_tx_meta meta;
1080
1081 if (skb->protocol != htons(ETH_P_MAP)) {
1082 IPAWANDBG
1083 ("SW filtering out none QMAP packet received from %s",
1084 current->comm);
Sunil Paidimarri6c818e82016-10-17 18:33:13 -07001085 dev_kfree_skb_any(skb);
1086 dev->stats.tx_dropped++;
Amir Levy9659e592016-10-27 18:08:27 +03001087 return NETDEV_TX_OK;
1088 }
1089
1090 qmap_check = RMNET_MAP_GET_CD_BIT(skb);
1091 if (netif_queue_stopped(dev)) {
1092 if (qmap_check &&
1093 atomic_read(&wwan_ptr->outstanding_pkts) <
1094 wwan_ptr->outstanding_high_ctl) {
1095 pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
1096 goto send;
1097 } else {
1098 pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
1099 return NETDEV_TX_BUSY;
1100 }
1101 }
1102
1103 /* checking High WM hit */
1104 if (atomic_read(&wwan_ptr->outstanding_pkts) >=
1105 wwan_ptr->outstanding_high) {
1106 if (!qmap_check) {
1107 IPAWANDBG("pending(%d)/(%d)- stop(%d), qmap_chk(%d)\n",
1108 atomic_read(&wwan_ptr->outstanding_pkts),
1109 wwan_ptr->outstanding_high,
1110 netif_queue_stopped(dev),
1111 qmap_check);
1112 netif_stop_queue(dev);
1113 return NETDEV_TX_BUSY;
1114 }
1115 }
1116
1117send:
1118 /* IPA_RM checking start */
1119 ret = ipa_rm_inactivity_timer_request_resource(
1120 IPA_RM_RESOURCE_WWAN_0_PROD);
1121 if (ret == -EINPROGRESS) {
1122 netif_stop_queue(dev);
1123 return NETDEV_TX_BUSY;
1124 }
1125 if (ret) {
1126 pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
1127 dev->name, ret);
Sunil Paidimarri6c818e82016-10-17 18:33:13 -07001128 dev_kfree_skb_any(skb);
1129 dev->stats.tx_dropped++;
Amir Levy9659e592016-10-27 18:08:27 +03001130 return -EFAULT;
1131 }
1132 /* IPA_RM checking end */
1133
1134 if (qmap_check) {
1135 memset(&meta, 0, sizeof(meta));
1136 meta.pkt_init_dst_ep_valid = true;
1137 meta.pkt_init_dst_ep_remote = true;
1138 ret = ipa2_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta);
1139 } else {
1140 ret = ipa2_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL);
1141 }
1142
1143 if (ret) {
1144 ret = NETDEV_TX_BUSY;
Amir Levy9659e592016-10-27 18:08:27 +03001145 goto out;
1146 }
1147
1148 atomic_inc(&wwan_ptr->outstanding_pkts);
1149 dev->stats.tx_packets++;
1150 dev->stats.tx_bytes += skb->len;
1151 ret = NETDEV_TX_OK;
1152out:
1153 ipa_rm_inactivity_timer_release_resource(
1154 IPA_RM_RESOURCE_WWAN_0_PROD);
1155 return ret;
1156}
1157
1158static void ipa_wwan_tx_timeout(struct net_device *dev)
1159{
1160 IPAWANERR("[%s] ipa_wwan_tx_timeout(), data stall in UL\n", dev->name);
1161}
1162
1163/**
1164 * apps_ipa_tx_complete_notify() - Rx notify
1165 *
1166 * @priv: driver context
1167 * @evt: event type
1168 * @data: data provided with event
1169 *
1170 * Check that the packet is the one we sent and release it
1171 * This function will be called in defered context in IPA wq.
1172 */
1173static void apps_ipa_tx_complete_notify(void *priv,
1174 enum ipa_dp_evt_type evt,
1175 unsigned long data)
1176{
1177 struct sk_buff *skb = (struct sk_buff *)data;
1178 struct net_device *dev = (struct net_device *)priv;
1179 struct wwan_private *wwan_ptr;
1180
1181 if (dev != ipa_netdevs[0]) {
1182 IPAWANDBG("Received pre-SSR packet completion\n");
1183 dev_kfree_skb_any(skb);
1184 return;
1185 }
1186
1187 if (evt != IPA_WRITE_DONE) {
1188 IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
1189 dev_kfree_skb_any(skb);
1190 dev->stats.tx_dropped++;
1191 return;
1192 }
1193
1194 wwan_ptr = netdev_priv(dev);
1195 atomic_dec(&wwan_ptr->outstanding_pkts);
1196 __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
1197 if (!atomic_read(&is_ssr) &&
1198 netif_queue_stopped(wwan_ptr->net) &&
1199 atomic_read(&wwan_ptr->outstanding_pkts) <
1200 (wwan_ptr->outstanding_low)) {
1201 IPAWANDBG("Outstanding low (%d) - wake up queue\n",
1202 wwan_ptr->outstanding_low);
1203 netif_wake_queue(wwan_ptr->net);
1204 }
1205 __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
1206 dev_kfree_skb_any(skb);
1207 ipa_rm_inactivity_timer_release_resource(
1208 IPA_RM_RESOURCE_WWAN_0_PROD);
1209}
1210
1211/**
1212 * apps_ipa_packet_receive_notify() - Rx notify
1213 *
1214 * @priv: driver context
1215 * @evt: event type
1216 * @data: data provided with event
1217 *
1218 * IPA will pass a packet to the Linux network stack with skb->data
1219 */
1220static void apps_ipa_packet_receive_notify(void *priv,
1221 enum ipa_dp_evt_type evt,
1222 unsigned long data)
1223{
1224 struct net_device *dev = (struct net_device *)priv;
1225
1226 if (evt == IPA_RECEIVE) {
1227 struct sk_buff *skb = (struct sk_buff *)data;
1228 int result;
1229 unsigned int packet_len = skb->len;
1230
1231 IPAWANDBG("Rx packet was received");
1232 skb->dev = ipa_netdevs[0];
1233 skb->protocol = htons(ETH_P_MAP);
1234
1235 if (ipa_rmnet_res.ipa_napi_enable) {
1236 trace_rmnet_ipa_netif_rcv_skb(dev->stats.rx_packets);
1237 result = netif_receive_skb(skb);
1238 } else {
1239 if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
1240 == 0) {
1241 trace_rmnet_ipa_netifni(dev->stats.rx_packets);
1242 result = netif_rx_ni(skb);
1243 } else {
1244 trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
1245 result = netif_rx(skb);
1246 }
1247 }
1248
1249 if (result) {
1250 pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
1251 __func__, __LINE__);
1252 dev->stats.rx_dropped++;
1253 }
1254 dev->stats.rx_packets++;
1255 dev->stats.rx_bytes += packet_len;
1256 } else if (evt == IPA_CLIENT_START_POLL)
1257 ipa_rmnet_rx_cb(priv);
1258 else if (evt == IPA_CLIENT_COMP_NAPI) {
1259 struct wwan_private *wwan_ptr = netdev_priv(dev);
1260
1261 if (ipa_rmnet_res.ipa_napi_enable)
1262 napi_complete(&(wwan_ptr->napi));
1263 } else
1264 IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
1265
1266}
1267
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001268static int handle_ingress_format(struct net_device *dev,
1269 struct rmnet_ioctl_extended_s *in)
1270{
1271 int ret = 0;
1272 struct rmnet_phys_ep_conf_s *ep_cfg;
1273
1274 IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
1275 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
1276 ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.cs_offload_en =
1277 IPA_ENABLE_CS_OFFLOAD_DL;
1278
1279 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
1280 IPAWANERR("get AGG size %d count %d\n",
1281 in->u.ingress_format.agg_size,
1282 in->u.ingress_format.agg_count);
1283
1284 ret = ipa_disable_apps_wan_cons_deaggr(
1285 in->u.ingress_format.agg_size,
1286 in->u.ingress_format.agg_count);
1287
1288 if (!ret) {
1289 ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_byte_limit =
1290 in->u.ingress_format.agg_size;
1291 ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit =
1292 in->u.ingress_format.agg_count;
1293
1294 if (ipa_rmnet_res.ipa_napi_enable) {
1295 ipa_to_apps_ep_cfg.recycle_enabled = true;
1296 ep_cfg = (struct rmnet_phys_ep_conf_s *)
1297 rcu_dereference(dev->rx_handler_data);
1298 ep_cfg->recycle = ipa_recycle_wan_skb;
1299 pr_info("Wan Recycle Enabled\n");
1300 }
1301 }
1302 }
1303
1304 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
1305 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
1306 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
1307 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
1308 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
1309
1310 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
1311 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
1312 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding =
1313 true;
1314 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
1315 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
1316 ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
1317
1318 ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
1319 ipa_to_apps_ep_cfg.notify = apps_ipa_packet_receive_notify;
1320 ipa_to_apps_ep_cfg.priv = dev;
1321
1322 ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001323 ipa_to_apps_ep_cfg.desc_fifo_sz =
1324 ipa_rmnet_res.wan_rx_desc_size * sizeof(struct sps_iovec);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001325
1326 mutex_lock(&ipa_to_apps_pipe_handle_guard);
1327 if (atomic_read(&is_ssr)) {
1328 IPAWANDBG("In SSR sequence/recovery\n");
1329 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
1330 return -EFAULT;
1331 }
1332 ret = ipa2_setup_sys_pipe(&ipa_to_apps_ep_cfg, &ipa_to_apps_hdl);
1333 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
1334
1335 if (ret)
1336 IPAWANERR("failed to configure ingress\n");
1337
1338 return ret;
1339}
1340
Amir Levy9659e592016-10-27 18:08:27 +03001341/**
1342 * ipa_wwan_ioctl() - I/O control for wwan network driver.
1343 *
1344 * @dev: network device
1345 * @ifr: ignored
1346 * @cmd: cmd to be excecuded. can be one of the following:
1347 * IPA_WWAN_IOCTL_OPEN - Open the network interface
1348 * IPA_WWAN_IOCTL_CLOSE - Close the network interface
1349 *
1350 * Return codes:
1351 * 0: success
1352 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1353 * later
1354 * -EFAULT: Error while transmitting the skb
1355 */
1356static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1357{
1358 int rc = 0;
1359 int mru = 1000, epid = 1, mux_index, len;
1360 struct ipa_msg_meta msg_meta;
1361 struct ipa_wan_msg *wan_msg = NULL;
1362 struct rmnet_ioctl_extended_s extend_ioctl_data;
1363 struct rmnet_ioctl_data_s ioctl_data;
1364
1365 IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
1366 switch (cmd) {
1367 /* Set Ethernet protocol */
1368 case RMNET_IOCTL_SET_LLP_ETHERNET:
1369 break;
1370 /* Set RAWIP protocol */
1371 case RMNET_IOCTL_SET_LLP_IP:
1372 break;
1373 /* Get link protocol */
1374 case RMNET_IOCTL_GET_LLP:
1375 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1376 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1377 sizeof(struct rmnet_ioctl_data_s)))
1378 rc = -EFAULT;
1379 break;
1380 /* Set QoS header enabled */
1381 case RMNET_IOCTL_SET_QOS_ENABLE:
1382 return -EINVAL;
1383 /* Set QoS header disabled */
1384 case RMNET_IOCTL_SET_QOS_DISABLE:
1385 break;
1386 /* Get QoS header state */
1387 case RMNET_IOCTL_GET_QOS:
1388 ioctl_data.u.operation_mode = RMNET_MODE_NONE;
1389 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1390 sizeof(struct rmnet_ioctl_data_s)))
1391 rc = -EFAULT;
1392 break;
1393 /* Get operation mode */
1394 case RMNET_IOCTL_GET_OPMODE:
1395 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1396 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1397 sizeof(struct rmnet_ioctl_data_s)))
1398 rc = -EFAULT;
1399 break;
1400 /* Open transport port */
1401 case RMNET_IOCTL_OPEN:
1402 break;
1403 /* Close transport port */
1404 case RMNET_IOCTL_CLOSE:
1405 break;
1406 /* Flow enable */
1407 case RMNET_IOCTL_FLOW_ENABLE:
1408 IPAWANDBG("Received flow enable\n");
1409 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1410 sizeof(struct rmnet_ioctl_data_s))) {
1411 rc = -EFAULT;
1412 break;
1413 }
1414 ipa_flow_control(IPA_CLIENT_USB_PROD, true,
1415 ioctl_data.u.tcm_handle);
1416 break;
1417 /* Flow disable */
1418 case RMNET_IOCTL_FLOW_DISABLE:
1419 IPAWANDBG("Received flow disable\n");
1420 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1421 sizeof(struct rmnet_ioctl_data_s))) {
1422 rc = -EFAULT;
1423 break;
1424 }
1425 ipa_flow_control(IPA_CLIENT_USB_PROD, false,
1426 ioctl_data.u.tcm_handle);
1427 break;
1428 /* Set flow handle */
1429 case RMNET_IOCTL_FLOW_SET_HNDL:
1430 break;
1431
1432 /* Extended IOCTLs */
1433 case RMNET_IOCTL_EXTENDED:
1434 IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
1435 if (copy_from_user(&extend_ioctl_data,
1436 (u8 *)ifr->ifr_ifru.ifru_data,
1437 sizeof(struct rmnet_ioctl_extended_s))) {
1438 IPAWANERR("failed to copy extended ioctl data\n");
1439 rc = -EFAULT;
1440 break;
1441 }
1442 switch (extend_ioctl_data.extended_ioctl) {
1443 /* Get features */
1444 case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
1445 IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n");
1446 extend_ioctl_data.u.data =
1447 (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL |
1448 RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT |
1449 RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT);
1450 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1451 &extend_ioctl_data,
1452 sizeof(struct rmnet_ioctl_extended_s)))
1453 rc = -EFAULT;
1454 break;
1455 /* Set MRU */
1456 case RMNET_IOCTL_SET_MRU:
1457 mru = extend_ioctl_data.u.data;
1458 IPAWANDBG("get MRU size %d\n",
1459 extend_ioctl_data.u.data);
1460 break;
1461 /* Get MRU */
1462 case RMNET_IOCTL_GET_MRU:
1463 extend_ioctl_data.u.data = mru;
1464 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1465 &extend_ioctl_data,
1466 sizeof(struct rmnet_ioctl_extended_s)))
1467 rc = -EFAULT;
1468 break;
1469 /* GET SG support */
1470 case RMNET_IOCTL_GET_SG_SUPPORT:
1471 extend_ioctl_data.u.data =
1472 ipa_rmnet_res.ipa_advertise_sg_support;
1473 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1474 &extend_ioctl_data,
1475 sizeof(struct rmnet_ioctl_extended_s)))
1476 rc = -EFAULT;
1477 break;
1478 /* Get endpoint ID */
1479 case RMNET_IOCTL_GET_EPID:
1480 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
1481 extend_ioctl_data.u.data = epid;
1482 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1483 &extend_ioctl_data,
1484 sizeof(struct rmnet_ioctl_extended_s)))
1485 rc = -EFAULT;
1486 if (copy_from_user(&extend_ioctl_data,
1487 (u8 *)ifr->ifr_ifru.ifru_data,
1488 sizeof(struct rmnet_ioctl_extended_s))) {
1489 IPAWANERR("copy extended ioctl data failed\n");
1490 rc = -EFAULT;
1491 break;
1492 }
1493 IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n",
1494 extend_ioctl_data.u.data);
1495 break;
1496 /* Endpoint pair */
1497 case RMNET_IOCTL_GET_EP_PAIR:
1498 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n");
1499 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
1500 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
1501 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
1502 ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
1503 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1504 &extend_ioctl_data,
1505 sizeof(struct rmnet_ioctl_extended_s)))
1506 rc = -EFAULT;
1507 if (copy_from_user(&extend_ioctl_data,
1508 (u8 *)ifr->ifr_ifru.ifru_data,
1509 sizeof(struct rmnet_ioctl_extended_s))) {
1510 IPAWANERR("copy extended ioctl data failed\n");
1511 rc = -EFAULT;
1512 break;
1513 }
1514 IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n",
1515 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
1516 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
1517 break;
1518 /* Get driver name */
1519 case RMNET_IOCTL_GET_DRIVER_NAME:
1520 memcpy(&extend_ioctl_data.u.if_name,
Mohammed Javidbf4c8022017-08-07 23:15:48 +05301521 ipa_netdevs[0]->name, IFNAMSIZ);
1522 extend_ioctl_data.u.if_name[IFNAMSIZ - 1] = '\0';
Amir Levy9659e592016-10-27 18:08:27 +03001523 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1524 &extend_ioctl_data,
1525 sizeof(struct rmnet_ioctl_extended_s)))
1526 rc = -EFAULT;
1527 break;
1528 /* Add MUX ID */
1529 case RMNET_IOCTL_ADD_MUX_CHANNEL:
1530 mux_index = find_mux_channel_index(
1531 extend_ioctl_data.u.rmnet_mux_val.mux_id);
1532 if (mux_index < MAX_NUM_OF_MUX_CHANNEL) {
1533 IPAWANDBG("already setup mux(%d)\n",
1534 extend_ioctl_data.u.
1535 rmnet_mux_val.mux_id);
1536 return rc;
1537 }
Skylar Chang8438ba52017-03-15 21:27:35 -07001538 mutex_lock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001539 if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
1540 IPAWANERR("Exceed mux_channel limit(%d)\n",
1541 rmnet_index);
Skylar Chang8438ba52017-03-15 21:27:35 -07001542 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001543 return -EFAULT;
1544 }
1545 IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
1546 extend_ioctl_data.u.rmnet_mux_val.mux_id,
1547 extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
1548 /* cache the mux name and id */
1549 mux_channel[rmnet_index].mux_id =
1550 extend_ioctl_data.u.rmnet_mux_val.mux_id;
1551 memcpy(mux_channel[rmnet_index].vchannel_name,
1552 extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
1553 sizeof(mux_channel[rmnet_index].vchannel_name));
Skylar Changba7c5112017-04-14 19:23:05 -07001554 mux_channel[rmnet_index].vchannel_name[
1555 IFNAMSIZ - 1] = '\0';
1556
Amir Levy9659e592016-10-27 18:08:27 +03001557 IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
1558 mux_channel[rmnet_index].vchannel_name,
1559 mux_channel[rmnet_index].mux_id,
1560 rmnet_index);
1561 /* check if UL filter rules coming*/
1562 if (num_q6_rule != 0) {
1563 IPAWANERR("dev(%s) register to IPA\n",
1564 extend_ioctl_data.u.rmnet_mux_val.
1565 vchannel_name);
1566 rc = wwan_register_to_ipa(rmnet_index);
1567 if (rc < 0) {
1568 IPAWANERR("device %s reg IPA failed\n",
1569 extend_ioctl_data.u.
1570 rmnet_mux_val.vchannel_name);
Skylar Chang8438ba52017-03-15 21:27:35 -07001571 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001572 return -ENODEV;
1573 }
1574 mux_channel[rmnet_index].mux_channel_set = true;
1575 mux_channel[rmnet_index].ul_flt_reg = true;
1576 } else {
1577 IPAWANDBG("dev(%s) haven't registered to IPA\n",
1578 extend_ioctl_data.u.
1579 rmnet_mux_val.vchannel_name);
1580 mux_channel[rmnet_index].mux_channel_set = true;
1581 mux_channel[rmnet_index].ul_flt_reg = false;
1582 }
1583 rmnet_index++;
Skylar Chang8438ba52017-03-15 21:27:35 -07001584 mutex_unlock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001585 break;
1586 case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
1587 IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
1588 if ((extend_ioctl_data.u.data) &
1589 RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
1590 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 8;
1591 apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
1592 cs_offload_en =
1593 IPA_ENABLE_CS_OFFLOAD_UL;
1594 apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
1595 cs_metadata_hdr_offset = 1;
1596 } else {
1597 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
1598 }
1599 if ((extend_ioctl_data.u.data) &
1600 RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
1601 apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
1602 IPA_ENABLE_AGGR;
1603 else
1604 apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
1605 IPA_BYPASS_AGGR;
1606 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
1607 hdr_ofst_metadata_valid = 1;
1608 /* modem want offset at 0! */
1609 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
1610 apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.dst =
1611 IPA_CLIENT_APPS_LAN_WAN_PROD;
1612 apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.mode = IPA_BASIC;
1613
1614 apps_to_ipa_ep_cfg.client =
1615 IPA_CLIENT_APPS_LAN_WAN_PROD;
1616 apps_to_ipa_ep_cfg.notify =
1617 apps_ipa_tx_complete_notify;
1618 apps_to_ipa_ep_cfg.desc_fifo_sz =
1619 IPA_SYS_TX_DATA_DESC_FIFO_SZ;
1620 apps_to_ipa_ep_cfg.priv = dev;
1621
1622 rc = ipa2_setup_sys_pipe(&apps_to_ipa_ep_cfg,
1623 &apps_to_ipa_hdl);
1624 if (rc)
1625 IPAWANERR("failed to config egress endpoint\n");
1626
1627 if (num_q6_rule != 0) {
1628 /* already got Q6 UL filter rules*/
1629 if (ipa_qmi_ctx &&
1630 ipa_qmi_ctx->modem_cfg_emb_pipe_flt
Skylar Chang441cc5e2017-08-11 15:49:21 -07001631 == false) {
1632 /* protect num_q6_rule */
1633 mutex_lock(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03001634 rc = wwan_add_ul_flt_rule_to_ipa();
Skylar Chang441cc5e2017-08-11 15:49:21 -07001635 mutex_unlock(&add_mux_channel_lock);
1636 } else
Amir Levy9659e592016-10-27 18:08:27 +03001637 rc = 0;
1638 egress_set = true;
1639 if (rc)
1640 IPAWANERR("install UL rules failed\n");
1641 else
1642 a7_ul_flt_set = true;
1643 } else {
1644 /* wait Q6 UL filter rules*/
1645 egress_set = true;
1646 IPAWANDBG("no UL-rules, egress_set(%d)\n",
1647 egress_set);
1648 }
1649 break;
1650 case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001651 rc = handle_ingress_format(dev, &extend_ioctl_data);
Amir Levy9659e592016-10-27 18:08:27 +03001652 break;
1653 case RMNET_IOCTL_SET_XLAT_DEV_INFO:
1654 wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
1655 GFP_KERNEL);
1656 if (!wan_msg) {
1657 IPAWANERR("Failed to allocate memory.\n");
1658 return -ENOMEM;
1659 }
1660 len = sizeof(wan_msg->upstream_ifname) >
1661 sizeof(extend_ioctl_data.u.if_name) ?
1662 sizeof(extend_ioctl_data.u.if_name) :
1663 sizeof(wan_msg->upstream_ifname);
1664 strlcpy(wan_msg->upstream_ifname,
1665 extend_ioctl_data.u.if_name, len);
Mohammed Javidbf4c8022017-08-07 23:15:48 +05301666 wan_msg->upstream_ifname[len - 1] = '\0';
Amir Levy9659e592016-10-27 18:08:27 +03001667 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
1668 msg_meta.msg_type = WAN_XLAT_CONNECT;
1669 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
1670 rc = ipa2_send_msg(&msg_meta, wan_msg,
1671 ipa_wwan_msg_free_cb);
1672 if (rc) {
1673 IPAWANERR("Failed to send XLAT_CONNECT msg\n");
1674 kfree(wan_msg);
1675 }
1676 break;
1677 /* Get agg count */
1678 case RMNET_IOCTL_GET_AGGREGATION_COUNT:
1679 break;
1680 /* Set agg count */
1681 case RMNET_IOCTL_SET_AGGREGATION_COUNT:
1682 break;
1683 /* Get agg size */
1684 case RMNET_IOCTL_GET_AGGREGATION_SIZE:
1685 break;
1686 /* Set agg size */
1687 case RMNET_IOCTL_SET_AGGREGATION_SIZE:
1688 break;
1689 /* Do flow control */
1690 case RMNET_IOCTL_FLOW_CONTROL:
1691 break;
1692 /* For legacy use */
1693 case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL:
1694 break;
1695 /* Get HW/SW map */
1696 case RMNET_IOCTL_GET_HWSW_MAP:
1697 break;
1698 /* Set RX Headroom */
1699 case RMNET_IOCTL_SET_RX_HEADROOM:
1700 break;
1701 default:
1702 IPAWANERR("[%s] unsupported extended cmd[%d]",
1703 dev->name,
1704 extend_ioctl_data.extended_ioctl);
1705 rc = -EINVAL;
1706 }
1707 break;
1708 default:
1709 IPAWANERR("[%s] unsupported cmd[%d]",
1710 dev->name, cmd);
1711 rc = -EINVAL;
1712 }
1713 return rc;
1714}
1715
1716static const struct net_device_ops ipa_wwan_ops_ip = {
1717 .ndo_open = ipa_wwan_open,
1718 .ndo_stop = ipa_wwan_stop,
1719 .ndo_start_xmit = ipa_wwan_xmit,
1720 .ndo_tx_timeout = ipa_wwan_tx_timeout,
1721 .ndo_do_ioctl = ipa_wwan_ioctl,
1722 .ndo_change_mtu = ipa_wwan_change_mtu,
1723 .ndo_set_mac_address = 0,
1724 .ndo_validate_addr = 0,
1725};
1726
1727/**
1728 * wwan_setup() - Setups the wwan network driver.
1729 *
1730 * @dev: network device
1731 *
1732 * Return codes:
1733 * None
1734 */
1735
1736static void ipa_wwan_setup(struct net_device *dev)
1737{
1738 dev->netdev_ops = &ipa_wwan_ops_ip;
1739 ether_setup(dev);
1740 /* set this after calling ether_setup */
1741 dev->header_ops = 0; /* No header */
1742 dev->type = ARPHRD_RAWIP;
1743 dev->hard_header_len = 0;
1744 dev->mtu = WWAN_DATA_LEN;
1745 dev->addr_len = 0;
1746 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
1747 dev->needed_headroom = HEADROOM_FOR_QMAP;
1748 dev->needed_tailroom = TAILROOM;
1749 dev->watchdog_timeo = 1000;
1750}
1751
1752/* IPA_RM related functions start*/
1753static void q6_prod_rm_request_resource(struct work_struct *work);
1754static DECLARE_DELAYED_WORK(q6_con_rm_request, q6_prod_rm_request_resource);
1755static void q6_prod_rm_release_resource(struct work_struct *work);
1756static DECLARE_DELAYED_WORK(q6_con_rm_release, q6_prod_rm_release_resource);
1757
1758static void q6_prod_rm_request_resource(struct work_struct *work)
1759{
1760 int ret = 0;
1761
1762 ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
1763 if (ret < 0 && ret != -EINPROGRESS) {
1764 IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
1765 ret);
1766 return;
1767 }
1768}
1769
1770static int q6_rm_request_resource(void)
1771{
1772 queue_delayed_work(ipa_rm_q6_workqueue,
1773 &q6_con_rm_request, 0);
1774 return 0;
1775}
1776
1777static void q6_prod_rm_release_resource(struct work_struct *work)
1778{
1779 int ret = 0;
1780
1781 ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
1782 if (ret < 0 && ret != -EINPROGRESS) {
1783 IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
1784 ret);
1785 return;
1786 }
1787}
1788
1789
1790static int q6_rm_release_resource(void)
1791{
1792 queue_delayed_work(ipa_rm_q6_workqueue,
1793 &q6_con_rm_release, 0);
1794 return 0;
1795}
1796
1797
1798static void q6_rm_notify_cb(void *user_data,
1799 enum ipa_rm_event event,
1800 unsigned long data)
1801{
1802 switch (event) {
1803 case IPA_RM_RESOURCE_GRANTED:
1804 IPAWANDBG("%s: Q6_PROD GRANTED CB\n", __func__);
1805 break;
1806 case IPA_RM_RESOURCE_RELEASED:
1807 IPAWANDBG("%s: Q6_PROD RELEASED CB\n", __func__);
1808 break;
1809 default:
1810 return;
1811 }
1812}
1813static int q6_initialize_rm(void)
1814{
1815 struct ipa_rm_create_params create_params;
1816 struct ipa_rm_perf_profile profile;
1817 int result;
1818
1819 /* Initialize IPA_RM workqueue */
1820 ipa_rm_q6_workqueue = create_singlethread_workqueue("clnt_req");
1821 if (!ipa_rm_q6_workqueue)
1822 return -ENOMEM;
1823
1824 memset(&create_params, 0, sizeof(create_params));
1825 create_params.name = IPA_RM_RESOURCE_Q6_PROD;
1826 create_params.reg_params.notify_cb = &q6_rm_notify_cb;
1827 result = ipa_rm_create_resource(&create_params);
1828 if (result)
1829 goto create_rsrc_err1;
1830 memset(&create_params, 0, sizeof(create_params));
1831 create_params.name = IPA_RM_RESOURCE_Q6_CONS;
1832 create_params.release_resource = &q6_rm_release_resource;
1833 create_params.request_resource = &q6_rm_request_resource;
1834 result = ipa_rm_create_resource(&create_params);
1835 if (result)
1836 goto create_rsrc_err2;
1837 /* add dependency*/
1838 result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
1839 IPA_RM_RESOURCE_APPS_CONS);
1840 if (result)
1841 goto add_dpnd_err;
1842 /* setup Performance profile */
1843 memset(&profile, 0, sizeof(profile));
1844 profile.max_supported_bandwidth_mbps = 100;
1845 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
1846 &profile);
1847 if (result)
1848 goto set_perf_err;
1849 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
1850 &profile);
1851 if (result)
1852 goto set_perf_err;
1853 return result;
1854
1855set_perf_err:
1856 ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1857 IPA_RM_RESOURCE_APPS_CONS);
1858add_dpnd_err:
1859 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1860 if (result < 0)
1861 IPAWANERR("Error deleting resource %d, ret=%d\n",
1862 IPA_RM_RESOURCE_Q6_CONS, result);
1863create_rsrc_err2:
1864 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1865 if (result < 0)
1866 IPAWANERR("Error deleting resource %d, ret=%d\n",
1867 IPA_RM_RESOURCE_Q6_PROD, result);
1868create_rsrc_err1:
1869 destroy_workqueue(ipa_rm_q6_workqueue);
1870 return result;
1871}
1872
1873void q6_deinitialize_rm(void)
1874{
1875 int ret;
1876
1877 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1878 IPA_RM_RESOURCE_APPS_CONS);
1879 if (ret < 0)
1880 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
1881 IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
1882 ret);
1883 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1884 if (ret < 0)
1885 IPAWANERR("Error deleting resource %d, ret=%d\n",
1886 IPA_RM_RESOURCE_Q6_CONS, ret);
1887 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1888 if (ret < 0)
1889 IPAWANERR("Error deleting resource %d, ret=%d\n",
1890 IPA_RM_RESOURCE_Q6_PROD, ret);
Mohammed Javid4e3015d2017-07-31 13:27:18 +05301891
1892 if (ipa_rm_q6_workqueue)
1893 destroy_workqueue(ipa_rm_q6_workqueue);
Amir Levy9659e592016-10-27 18:08:27 +03001894}
1895
1896static void wake_tx_queue(struct work_struct *work)
1897{
1898 if (ipa_netdevs[0]) {
1899 __netif_tx_lock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
1900 netif_wake_queue(ipa_netdevs[0]);
1901 __netif_tx_unlock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
1902 }
1903}
1904
1905/**
1906 * ipa_rm_resource_granted() - Called upon
1907 * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
1908 *
1909 * @work: work object supplied ny workqueue
1910 *
1911 * Return codes:
1912 * None
1913 */
1914static void ipa_rm_resource_granted(void *dev)
1915{
1916 IPAWANDBG("Resource Granted - starting queue\n");
1917 schedule_work(&ipa_tx_wakequeue_work);
1918}
1919
1920/**
1921 * ipa_rm_notify() - Callback function for RM events. Handles
1922 * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
1923 * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
1924 * workqueue.
1925 *
1926 * @dev: network device
1927 * @event: IPA RM event
1928 * @data: Additional data provided by IPA RM
1929 *
1930 * Return codes:
1931 * None
1932 */
1933static void ipa_rm_notify(void *dev, enum ipa_rm_event event,
1934 unsigned long data)
1935{
1936 struct wwan_private *wwan_ptr = netdev_priv(dev);
1937
1938 pr_debug("%s: event %d\n", __func__, event);
1939 switch (event) {
1940 case IPA_RM_RESOURCE_GRANTED:
1941 if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
1942 complete_all(&wwan_ptr->resource_granted_completion);
1943 break;
1944 }
1945 ipa_rm_resource_granted(dev);
1946 break;
1947 case IPA_RM_RESOURCE_RELEASED:
1948 break;
1949 default:
1950 pr_err("%s: unknown event %d\n", __func__, event);
1951 break;
1952 }
1953}
1954
1955/* IPA_RM related functions end*/
1956
1957static int ssr_notifier_cb(struct notifier_block *this,
1958 unsigned long code,
1959 void *data);
1960
1961static struct notifier_block ssr_notifier = {
1962 .notifier_call = ssr_notifier_cb,
1963};
1964
1965static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
1966 struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
1967{
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001968 int result;
1969
1970 ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
Amir Levy9659e592016-10-27 18:08:27 +03001971 ipa_rmnet_drv_res->ipa_rmnet_ssr =
1972 of_property_read_bool(pdev->dev.of_node,
1973 "qcom,rmnet-ipa-ssr");
1974 pr_info("IPA SSR support = %s\n",
1975 ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False");
1976 ipa_rmnet_drv_res->ipa_loaduC =
1977 of_property_read_bool(pdev->dev.of_node,
1978 "qcom,ipa-loaduC");
1979 pr_info("IPA ipa-loaduC = %s\n",
1980 ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
1981
1982 ipa_rmnet_drv_res->ipa_advertise_sg_support =
1983 of_property_read_bool(pdev->dev.of_node,
1984 "qcom,ipa-advertise-sg-support");
1985 pr_info("IPA SG support = %s\n",
1986 ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
1987
1988 ipa_rmnet_drv_res->ipa_napi_enable =
1989 of_property_read_bool(pdev->dev.of_node,
1990 "qcom,ipa-napi-enable");
1991 pr_info("IPA Napi Enable = %s\n",
1992 ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001993
1994 /* Get IPA WAN RX desc fifo size */
1995 result = of_property_read_u32(pdev->dev.of_node,
1996 "qcom,wan-rx-desc-size",
1997 &ipa_rmnet_drv_res->wan_rx_desc_size);
1998 if (result)
1999 pr_info("using default for wan-rx-desc-size = %u\n",
2000 ipa_rmnet_drv_res->wan_rx_desc_size);
2001 else
2002 IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
2003 ipa_rmnet_drv_res->wan_rx_desc_size);
2004
Amir Levy9659e592016-10-27 18:08:27 +03002005 return 0;
2006}
2007
2008struct ipa_rmnet_context ipa_rmnet_ctx;
2009
2010/**
2011 * ipa_wwan_probe() - Initialized the module and registers as a
2012 * network interface to the network stack
2013 *
2014 * Return codes:
2015 * 0: success
2016 * -ENOMEM: No memory available
2017 * -EFAULT: Internal error
2018 * -ENODEV: IPA driver not loaded
2019 */
2020static int ipa_wwan_probe(struct platform_device *pdev)
2021{
2022 int ret, i;
2023 struct net_device *dev;
2024 struct wwan_private *wwan_ptr;
2025 struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */
2026 struct ipa_rm_perf_profile profile; /* IPA_RM */
2027
2028 pr_info("rmnet_ipa started initialization\n");
2029
2030 if (!ipa2_is_ready()) {
2031 IPAWANERR("IPA driver not loaded\n");
2032 return -ENODEV;
2033 }
2034
2035 ret = get_ipa_rmnet_dts_configuration(pdev, &ipa_rmnet_res);
2036 ipa_rmnet_ctx.ipa_rmnet_ssr = ipa_rmnet_res.ipa_rmnet_ssr;
2037
2038 ret = ipa_init_q6_smem();
2039 if (ret) {
2040 IPAWANERR("ipa_init_q6_smem failed!\n");
2041 return ret;
2042 }
2043
2044 /* initialize tx/rx enpoint setup */
2045 memset(&apps_to_ipa_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
2046 memset(&ipa_to_apps_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
2047
2048 /* initialize ex property setup */
2049 num_q6_rule = 0;
2050 old_num_q6_rule = 0;
2051 rmnet_index = 0;
2052 egress_set = false;
2053 a7_ul_flt_set = false;
2054 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
2055 memset(&mux_channel[i], 0, sizeof(struct rmnet_mux_val));
2056
2057 /* start A7 QMI service/client */
2058 if (ipa_rmnet_res.ipa_loaduC)
2059 /* Android platform loads uC */
2060 ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
2061 else
2062 /* LE platform not loads uC */
2063 ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
2064
2065 /* construct default WAN RT tbl for IPACM */
2066 ret = ipa_setup_a7_qmap_hdr();
2067 if (ret)
2068 goto setup_a7_qmap_hdr_err;
2069 ret = ipa_setup_dflt_wan_rt_tables();
2070 if (ret)
2071 goto setup_dflt_wan_rt_tables_err;
2072
2073 if (!atomic_read(&is_ssr)) {
2074 /* Start transport-driver fd ioctl for ipacm for first init */
2075 ret = wan_ioctl_init();
2076 if (ret)
2077 goto wan_ioctl_init_err;
2078 } else {
2079 /* Enable sending QMI messages after SSR */
2080 wan_ioctl_enable_qmi_messages();
2081 }
2082
2083 /* initialize wan-driver netdev */
2084 dev = alloc_netdev(sizeof(struct wwan_private),
2085 IPA_WWAN_DEV_NAME,
2086 NET_NAME_UNKNOWN,
2087 ipa_wwan_setup);
2088 if (!dev) {
2089 IPAWANERR("no memory for netdev\n");
2090 ret = -ENOMEM;
2091 goto alloc_netdev_err;
2092 }
2093 ipa_netdevs[0] = dev;
2094 wwan_ptr = netdev_priv(dev);
2095 memset(wwan_ptr, 0, sizeof(*wwan_ptr));
2096 IPAWANDBG("wwan_ptr (private) = %p", wwan_ptr);
2097 wwan_ptr->net = dev;
2098 wwan_ptr->outstanding_high_ctl = DEFAULT_OUTSTANDING_HIGH_CTL;
2099 wwan_ptr->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
2100 wwan_ptr->outstanding_low = DEFAULT_OUTSTANDING_LOW;
2101 atomic_set(&wwan_ptr->outstanding_pkts, 0);
2102 spin_lock_init(&wwan_ptr->lock);
2103 init_completion(&wwan_ptr->resource_granted_completion);
2104
2105 if (!atomic_read(&is_ssr)) {
2106 /* IPA_RM configuration starts */
2107 ret = q6_initialize_rm();
2108 if (ret) {
2109 IPAWANERR("%s: q6_initialize_rm failed, ret: %d\n",
2110 __func__, ret);
2111 goto q6_init_err;
2112 }
2113 }
2114
2115 memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
2116 ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
2117 ipa_rm_params.reg_params.user_data = dev;
2118 ipa_rm_params.reg_params.notify_cb = ipa_rm_notify;
2119 ret = ipa_rm_create_resource(&ipa_rm_params);
2120 if (ret) {
2121 pr_err("%s: unable to create resourse %d in IPA RM\n",
2122 __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
2123 goto create_rsrc_err;
2124 }
2125 ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
2126 IPA_RM_INACTIVITY_TIMER);
2127 if (ret) {
2128 pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
2129 __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
2130 goto timer_init_err;
2131 }
2132 /* add dependency */
2133 ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2134 IPA_RM_RESOURCE_Q6_CONS);
2135 if (ret)
2136 goto add_dpnd_err;
2137 /* setup Performance profile */
2138 memset(&profile, 0, sizeof(profile));
2139 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
2140 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
2141 &profile);
2142 if (ret)
2143 goto set_perf_err;
2144 /* IPA_RM configuration ends */
2145
2146 /* Enable SG support in netdevice. */
2147 if (ipa_rmnet_res.ipa_advertise_sg_support)
2148 dev->hw_features |= NETIF_F_SG;
2149
2150 /* Enable NAPI support in netdevice. */
2151 if (ipa_rmnet_res.ipa_napi_enable) {
2152 netif_napi_add(dev, &(wwan_ptr->napi),
2153 ipa_rmnet_poll, NAPI_WEIGHT);
2154 }
2155
2156 ret = register_netdev(dev);
2157 if (ret) {
2158 IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
2159 0, ret);
2160 goto set_perf_err;
2161 }
2162
2163 IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n",
2164 ipa_netdevs[0]->name);
2165 if (ret) {
2166 IPAWANERR("default configuration failed rc=%d\n",
2167 ret);
2168 goto config_err;
2169 }
2170 atomic_set(&is_initialized, 1);
2171 if (!atomic_read(&is_ssr)) {
2172 /* offline charging mode */
2173 ipa2_proxy_clk_unvote();
2174 }
2175 atomic_set(&is_ssr, 0);
2176
2177 pr_info("rmnet_ipa completed initialization\n");
2178 return 0;
2179config_err:
2180 if (ipa_rmnet_res.ipa_napi_enable)
2181 netif_napi_del(&(wwan_ptr->napi));
2182 unregister_netdev(ipa_netdevs[0]);
2183set_perf_err:
2184 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2185 IPA_RM_RESOURCE_Q6_CONS);
2186 if (ret)
2187 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2188 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2189 ret);
2190add_dpnd_err:
2191 ret = ipa_rm_inactivity_timer_destroy(
2192 IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
2193 if (ret)
2194 IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
2195 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2196timer_init_err:
2197 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2198 if (ret)
2199 IPAWANERR("Error deleting resource %d, ret=%d\n",
2200 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2201create_rsrc_err:
Mohammed Javid4e3015d2017-07-31 13:27:18 +05302202
2203 if (!atomic_read(&is_ssr))
2204 q6_deinitialize_rm();
2205
Amir Levy9659e592016-10-27 18:08:27 +03002206q6_init_err:
2207 free_netdev(ipa_netdevs[0]);
2208 ipa_netdevs[0] = NULL;
2209alloc_netdev_err:
2210 wan_ioctl_deinit();
2211wan_ioctl_init_err:
2212 ipa_del_dflt_wan_rt_tables();
2213setup_dflt_wan_rt_tables_err:
2214 ipa_del_a7_qmap_hdr();
2215setup_a7_qmap_hdr_err:
2216 ipa_qmi_service_exit();
2217 atomic_set(&is_ssr, 0);
2218 return ret;
2219}
2220
2221static int ipa_wwan_remove(struct platform_device *pdev)
2222{
2223 int ret;
2224 struct wwan_private *wwan_ptr;
2225
2226 wwan_ptr = netdev_priv(ipa_netdevs[0]);
2227
2228 pr_info("rmnet_ipa started deinitialization\n");
2229 mutex_lock(&ipa_to_apps_pipe_handle_guard);
2230 ret = ipa2_teardown_sys_pipe(ipa_to_apps_hdl);
2231 if (ret < 0)
2232 IPAWANERR("Failed to teardown IPA->APPS pipe\n");
2233 else
2234 ipa_to_apps_hdl = -1;
2235 if (ipa_rmnet_res.ipa_napi_enable)
2236 netif_napi_del(&(wwan_ptr->napi));
2237 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
2238 unregister_netdev(ipa_netdevs[0]);
2239 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2240 IPA_RM_RESOURCE_Q6_CONS);
2241 if (ret < 0)
2242 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2243 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2244 ret);
2245 ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
2246 if (ret < 0)
2247 IPAWANERR(
2248 "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
2249 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2250 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2251 if (ret < 0)
2252 IPAWANERR("Error deleting resource %d, ret=%d\n",
2253 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2254 cancel_work_sync(&ipa_tx_wakequeue_work);
2255 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2256 free_netdev(ipa_netdevs[0]);
2257 ipa_netdevs[0] = NULL;
2258 /* No need to remove wwan_ioctl during SSR */
2259 if (!atomic_read(&is_ssr))
2260 wan_ioctl_deinit();
2261 ipa_del_dflt_wan_rt_tables();
2262 ipa_del_a7_qmap_hdr();
2263 ipa_del_mux_qmap_hdrs();
2264 if (ipa_qmi_ctx && ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false)
2265 wwan_del_ul_flt_rule_to_ipa();
2266 ipa_cleanup_deregister_intf();
2267 atomic_set(&is_initialized, 0);
2268 pr_info("rmnet_ipa completed deinitialization\n");
2269 return 0;
2270}
2271
2272/**
2273* rmnet_ipa_ap_suspend() - suspend callback for runtime_pm
2274* @dev: pointer to device
2275*
2276* This callback will be invoked by the runtime_pm framework when an AP suspend
2277* operation is invoked, usually by pressing a suspend button.
2278*
2279* Returns -EAGAIN to runtime_pm framework in case there are pending packets
2280* in the Tx queue. This will postpone the suspend operation until all the
2281* pending packets will be transmitted.
2282*
2283* In case there are no packets to send, releases the WWAN0_PROD entity.
2284* As an outcome, the number of IPA active clients should be decremented
2285* until IPA clocks can be gated.
2286*/
2287static int rmnet_ipa_ap_suspend(struct device *dev)
2288{
2289 struct net_device *netdev = ipa_netdevs[0];
2290 struct wwan_private *wwan_ptr = netdev_priv(netdev);
2291
2292 IPAWANDBG("Enter...\n");
2293 /* Do not allow A7 to suspend in case there are oustanding packets */
2294 if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
2295 IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
2296 return -EAGAIN;
2297 }
2298
2299 /* Make sure that there is no Tx operation ongoing */
2300 netif_tx_lock_bh(netdev);
2301 ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2302 netif_tx_unlock_bh(netdev);
2303 IPAWANDBG("Exit\n");
2304
2305 return 0;
2306}
2307
2308/**
2309* rmnet_ipa_ap_resume() - resume callback for runtime_pm
2310* @dev: pointer to device
2311*
2312* This callback will be invoked by the runtime_pm framework when an AP resume
2313* operation is invoked.
2314*
2315* Enables the network interface queue and returns success to the
2316* runtime_pm framework.
2317*/
2318static int rmnet_ipa_ap_resume(struct device *dev)
2319{
2320 struct net_device *netdev = ipa_netdevs[0];
2321
2322 IPAWANDBG("Enter...\n");
2323 netif_wake_queue(netdev);
2324 IPAWANDBG("Exit\n");
2325
2326 return 0;
2327}
2328
2329static void ipa_stop_polling_stats(void)
2330{
2331 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2332 ipa_rmnet_ctx.polling_interval = 0;
2333}
2334
2335static const struct of_device_id rmnet_ipa_dt_match[] = {
2336 {.compatible = "qcom,rmnet-ipa"},
2337 {},
2338};
2339MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
2340
2341static const struct dev_pm_ops rmnet_ipa_pm_ops = {
2342 .suspend_noirq = rmnet_ipa_ap_suspend,
2343 .resume_noirq = rmnet_ipa_ap_resume,
2344};
2345
2346static struct platform_driver rmnet_ipa_driver = {
2347 .driver = {
2348 .name = "rmnet_ipa",
2349 .owner = THIS_MODULE,
2350 .pm = &rmnet_ipa_pm_ops,
2351 .of_match_table = rmnet_ipa_dt_match,
2352 },
2353 .probe = ipa_wwan_probe,
2354 .remove = ipa_wwan_remove,
2355};
2356
Skylar Chang09e0e252017-03-20 14:51:29 -07002357/**
2358 * rmnet_ipa_send_ssr_notification(bool ssr_done) - send SSR notification
2359 *
2360 * This function sends the SSR notification before modem shutdown and
2361 * after_powerup from SSR framework, to user-space module
2362 */
2363static void rmnet_ipa_send_ssr_notification(bool ssr_done)
2364{
2365 struct ipa_msg_meta msg_meta;
2366 int rc;
2367
2368 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2369 if (ssr_done)
2370 msg_meta.msg_type = IPA_SSR_AFTER_POWERUP;
2371 else
2372 msg_meta.msg_type = IPA_SSR_BEFORE_SHUTDOWN;
2373 rc = ipa_send_msg(&msg_meta, NULL, NULL);
2374 if (rc) {
2375 IPAWANERR("ipa_send_msg failed: %d\n", rc);
2376 return;
2377 }
2378}
2379
Amir Levy9659e592016-10-27 18:08:27 +03002380static int ssr_notifier_cb(struct notifier_block *this,
2381 unsigned long code,
2382 void *data)
2383{
2384 if (ipa_rmnet_ctx.ipa_rmnet_ssr) {
2385 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2386 pr_info("IPA received MPSS BEFORE_SHUTDOWN\n");
Skylar Chang09e0e252017-03-20 14:51:29 -07002387 /* send SSR before-shutdown notification to IPACM */
2388 rmnet_ipa_send_ssr_notification(false);
Amir Levy9659e592016-10-27 18:08:27 +03002389 atomic_set(&is_ssr, 1);
2390 ipa_q6_pre_shutdown_cleanup();
2391 if (ipa_netdevs[0])
2392 netif_stop_queue(ipa_netdevs[0]);
2393 ipa_qmi_stop_workqueues();
2394 wan_ioctl_stop_qmi_messages();
2395 ipa_stop_polling_stats();
2396 if (atomic_read(&is_initialized))
2397 platform_driver_unregister(&rmnet_ipa_driver);
2398 pr_info("IPA BEFORE_SHUTDOWN handling is complete\n");
2399 return NOTIFY_DONE;
2400 }
2401 if (code == SUBSYS_AFTER_SHUTDOWN) {
2402 pr_info("IPA received MPSS AFTER_SHUTDOWN\n");
2403 if (atomic_read(&is_ssr))
2404 ipa_q6_post_shutdown_cleanup();
2405 pr_info("IPA AFTER_SHUTDOWN handling is complete\n");
2406 return NOTIFY_DONE;
2407 }
2408 if (code == SUBSYS_AFTER_POWERUP) {
2409 pr_info("IPA received MPSS AFTER_POWERUP\n");
2410 if (!atomic_read(&is_initialized)
2411 && atomic_read(&is_ssr))
2412 platform_driver_register(&rmnet_ipa_driver);
2413 pr_info("IPA AFTER_POWERUP handling is complete\n");
2414 return NOTIFY_DONE;
2415 }
2416 if (code == SUBSYS_BEFORE_POWERUP) {
2417 pr_info("IPA received MPSS BEFORE_POWERUP\n");
2418 if (atomic_read(&is_ssr))
2419 /* clean up cached QMI msg/handlers */
2420 ipa_qmi_service_exit();
2421 ipa2_proxy_clk_vote();
2422 pr_info("IPA BEFORE_POWERUP handling is complete\n");
2423 return NOTIFY_DONE;
2424 }
2425 }
2426 return NOTIFY_DONE;
2427}
2428
2429/**
2430 * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa2_send_msg
2431 * @buff: pointer to buffer containing the message
2432 * @len: message len
2433 * @type: message type
2434 *
2435 * This function is invoked when ipa2_send_msg is complete (Provided as a
2436 * free function pointer along with the message).
2437 */
2438static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
2439{
2440 if (!buff) {
2441 IPAWANERR("Null buffer\n");
2442 return;
2443 }
2444
2445 if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
2446 type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
2447 IPAWANERR("Wrong type given. buff %p type %d\n",
2448 buff, type);
2449 }
2450 kfree(buff);
2451}
2452
2453/**
2454 * rmnet_ipa_get_stats_and_update(bool reset) - Gets pipe stats from Modem
2455 *
2456 * This function queries the IPA Modem driver for the pipe stats
2457 * via QMI, and updates the user space IPA entity.
2458 */
2459static void rmnet_ipa_get_stats_and_update(bool reset)
2460{
2461 struct ipa_get_data_stats_req_msg_v01 req;
2462 struct ipa_get_data_stats_resp_msg_v01 *resp;
2463 struct ipa_msg_meta msg_meta;
2464 int rc;
2465
2466 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2467 GFP_KERNEL);
2468 if (!resp) {
2469 IPAWANERR("Can't allocate memory for stats message\n");
2470 return;
2471 }
2472
2473 memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2474 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2475
2476 req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2477 if (reset == true) {
2478 req.reset_stats_valid = true;
2479 req.reset_stats = true;
2480 IPAWANERR("Get the latest pipe-stats and reset it\n");
2481 }
2482
2483 rc = ipa_qmi_get_data_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002484 if (rc) {
2485 IPAWANERR("ipa_qmi_get_data_stats failed: %d\n", rc);
2486 kfree(resp);
2487 return;
2488 }
Amir Levy9659e592016-10-27 18:08:27 +03002489
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002490 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2491 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
2492 msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01);
2493 rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2494 if (rc) {
2495 IPAWANERR("ipa2_send_msg failed: %d\n", rc);
2496 kfree(resp);
2497 return;
Amir Levy9659e592016-10-27 18:08:27 +03002498 }
2499}
2500
2501/**
2502 * tethering_stats_poll_queue() - Stats polling function
2503 * @work - Work entry
2504 *
2505 * This function is scheduled periodically (per the interval) in
2506 * order to poll the IPA Modem driver for the pipe stats.
2507 */
2508static void tethering_stats_poll_queue(struct work_struct *work)
2509{
2510 rmnet_ipa_get_stats_and_update(false);
2511
2512 /* Schedule again only if there's an active polling interval */
2513 if (ipa_rmnet_ctx.polling_interval != 0)
2514 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work,
2515 msecs_to_jiffies(ipa_rmnet_ctx.polling_interval*1000));
2516}
2517
2518/**
2519 * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem
2520 *
2521 * This function retrieves the data usage (used quota) from the IPA Modem driver
2522 * via QMI, and updates IPA user space entity.
2523 */
2524static void rmnet_ipa_get_network_stats_and_update(void)
2525{
2526 struct ipa_get_apn_data_stats_req_msg_v01 req;
2527 struct ipa_get_apn_data_stats_resp_msg_v01 *resp;
2528 struct ipa_msg_meta msg_meta;
2529 int rc;
2530
2531 resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
2532 GFP_KERNEL);
2533 if (!resp) {
2534 IPAWANERR("Can't allocate memory for network stats message\n");
2535 return;
2536 }
2537
2538 memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01));
2539 memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01));
2540
2541 req.mux_id_list_valid = true;
2542 req.mux_id_list_len = 1;
2543 req.mux_id_list[0] = ipa_rmnet_ctx.metered_mux_id;
2544
2545 rc = ipa_qmi_get_network_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002546 if (rc) {
2547 IPAWANERR("ipa_qmi_get_network_stats failed %d\n", rc);
2548 kfree(resp);
2549 return;
2550 }
Amir Levy9659e592016-10-27 18:08:27 +03002551
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002552 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2553 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
2554 msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
2555 rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2556 if (rc) {
2557 IPAWANERR("ipa2_send_msg failed: %d\n", rc);
2558 kfree(resp);
2559 return;
Amir Levy9659e592016-10-27 18:08:27 +03002560 }
2561}
2562
2563/**
Skylar Chang09e0e252017-03-20 14:51:29 -07002564 * rmnet_ipa_send_quota_reach_ind() - send quota_reach notification from
2565 * IPA Modem
2566 * This function sends the quota_reach indication from the IPA Modem driver
2567 * via QMI, to user-space module
2568 */
2569static void rmnet_ipa_send_quota_reach_ind(void)
2570{
2571 struct ipa_msg_meta msg_meta;
2572 int rc;
2573
2574 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2575 msg_meta.msg_type = IPA_QUOTA_REACH;
2576 rc = ipa_send_msg(&msg_meta, NULL, NULL);
2577 if (rc) {
2578 IPAWANERR("ipa_send_msg failed: %d\n", rc);
2579 return;
2580 }
2581}
2582
2583/**
Amir Levy9659e592016-10-27 18:08:27 +03002584 * rmnet_ipa_poll_tethering_stats() - Tethering stats polling IOCTL handler
2585 * @data - IOCTL data
2586 *
2587 * This function handles WAN_IOC_POLL_TETHERING_STATS.
2588 * In case polling interval received is 0, polling will stop
2589 * (If there's a polling in progress, it will allow it to finish), and then will
2590 * fetch network stats, and update the IPA user space.
2591 *
2592 * Return codes:
2593 * 0: Success
2594 */
2595int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
2596{
2597 ipa_rmnet_ctx.polling_interval = data->polling_interval_secs;
2598
2599 cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work);
2600
2601 if (ipa_rmnet_ctx.polling_interval == 0) {
2602 ipa_qmi_stop_data_qouta();
2603 rmnet_ipa_get_network_stats_and_update();
2604 rmnet_ipa_get_stats_and_update(true);
2605 return 0;
2606 }
2607
2608 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0);
2609 return 0;
2610}
2611
2612/**
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302613 * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler
Amir Levy9659e592016-10-27 18:08:27 +03002614 * @data - IOCTL data
2615 *
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302616 * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface.
Amir Levy9659e592016-10-27 18:08:27 +03002617 * It translates the given interface name to the Modem MUX ID and
2618 * sends the request of the quota to the IPA Modem driver via QMI.
2619 *
2620 * Return codes:
2621 * 0: Success
2622 * -EFAULT: Invalid interface name provided
2623 * other: See ipa_qmi_set_data_quota
2624 */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302625static int rmnet_ipa_set_data_quota_modem(struct wan_ioctl_set_data_quota *data)
Amir Levy9659e592016-10-27 18:08:27 +03002626{
2627 u32 mux_id;
2628 int index;
2629 struct ipa_set_data_usage_quota_req_msg_v01 req;
2630
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302631 /* stop quota */
2632 if (!data->set_quota)
2633 ipa_qmi_stop_data_qouta();
2634
Skylar Changcde17ed2017-06-21 16:51:26 -07002635 /* prevent string buffer overflows */
2636 data->interface_name[IFNAMSIZ-1] = '\0';
2637
Amir Levy9659e592016-10-27 18:08:27 +03002638 index = find_vchannel_name_index(data->interface_name);
2639 IPAWANERR("iface name %s, quota %lu\n",
2640 data->interface_name,
2641 (unsigned long int) data->quota_mbytes);
2642
2643 if (index == MAX_NUM_OF_MUX_CHANNEL) {
2644 IPAWANERR("%s is an invalid iface name\n",
2645 data->interface_name);
2646 return -EFAULT;
2647 }
2648
2649 mux_id = mux_channel[index].mux_id;
2650
2651 ipa_rmnet_ctx.metered_mux_id = mux_id;
2652
2653 memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
2654 req.apn_quota_list_valid = true;
2655 req.apn_quota_list_len = 1;
2656 req.apn_quota_list[0].mux_id = mux_id;
2657 req.apn_quota_list[0].num_Mbytes = data->quota_mbytes;
2658
2659 return ipa_qmi_set_data_quota(&req);
2660}
2661
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302662static int rmnet_ipa_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
2663{
2664 struct ipa_set_wifi_quota wifi_quota;
2665 int rc = 0;
2666
2667 memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota));
2668 wifi_quota.set_quota = data->set_quota;
2669 wifi_quota.quota_bytes = data->quota_mbytes;
2670 IPAWANDBG("iface name %s, quota %lu\n",
2671 data->interface_name,
2672 (unsigned long int) data->quota_mbytes);
2673
2674 rc = ipa2_set_wlan_quota(&wifi_quota);
2675 /* check if wlan-fw takes this quota-set */
2676 if (!wifi_quota.set_valid)
2677 rc = -EFAULT;
2678 return rc;
2679}
2680
2681/**
2682 * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
2683 * @data - IOCTL data
2684 *
2685 * This function handles WAN_IOC_SET_DATA_QUOTA.
2686 * It translates the given interface name to the Modem MUX ID and
2687 * sends the request of the quota to the IPA Modem driver via QMI.
2688 *
2689 * Return codes:
2690 * 0: Success
2691 * -EFAULT: Invalid interface name provided
2692 * other: See ipa_qmi_set_data_quota
2693 */
2694int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data)
2695{
2696 enum ipa_upstream_type upstream_type;
2697 int rc = 0;
2698
Mohammed Javidbba17b32017-09-26 12:51:14 +05302699 /* prevent string buffer overflows */
2700 data->interface_name[IFNAMSIZ-1] = '\0';
2701
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302702 /* get IPA backhaul type */
2703 upstream_type = find_upstream_type(data->interface_name);
2704
2705 if (upstream_type == IPA_UPSTEAM_MAX) {
2706 IPAWANERR("upstream iface %s not supported\n",
2707 data->interface_name);
2708 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2709 rc = rmnet_ipa_set_data_quota_wifi(data);
2710 if (rc) {
2711 IPAWANERR("set quota on wifi failed\n");
2712 return rc;
2713 }
2714 } else {
2715 rc = rmnet_ipa_set_data_quota_modem(data);
2716 if (rc) {
2717 IPAWANERR("set quota on modem failed\n");
2718 return rc;
2719 }
2720 }
2721 return rc;
2722}
2723
Amir Levy9659e592016-10-27 18:08:27 +03002724 /* rmnet_ipa_set_tether_client_pipe() -
2725 * @data - IOCTL data
2726 *
2727 * This function handles WAN_IOC_SET_DATA_QUOTA.
2728 * It translates the given interface name to the Modem MUX ID and
2729 * sends the request of the quota to the IPA Modem driver via QMI.
2730 *
2731 * Return codes:
2732 * 0: Success
Skylar Chang345c8142016-11-30 14:41:24 -08002733 * -EFAULT: Invalid src/dst pipes provided
Amir Levy9659e592016-10-27 18:08:27 +03002734 * other: See ipa_qmi_set_data_quota
2735 */
2736int rmnet_ipa_set_tether_client_pipe(
2737 struct wan_ioctl_set_tether_client_pipe *data)
2738{
2739 int number, i;
2740
Skylar Chang345c8142016-11-30 14:41:24 -08002741 /* error checking if ul_src_pipe_len valid or not*/
2742 if (data->ul_src_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
2743 data->ul_src_pipe_len < 0) {
2744 IPAWANERR("UL src pipes %d exceeding max %d\n",
2745 data->ul_src_pipe_len,
2746 QMI_IPA_MAX_PIPES_V01);
2747 return -EFAULT;
2748 }
2749 /* error checking if dl_dst_pipe_len valid or not*/
2750 if (data->dl_dst_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
2751 data->dl_dst_pipe_len < 0) {
2752 IPAWANERR("DL dst pipes %d exceeding max %d\n",
2753 data->dl_dst_pipe_len,
2754 QMI_IPA_MAX_PIPES_V01);
2755 return -EFAULT;
2756 }
2757
Amir Levy9659e592016-10-27 18:08:27 +03002758 IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
2759 data->ipa_client,
2760 data->ul_src_pipe_len,
2761 data->dl_dst_pipe_len,
2762 data->reset_client);
2763 number = data->ul_src_pipe_len;
2764 for (i = 0; i < number; i++) {
2765 IPAWANDBG("UL index-%d pipe %d\n", i,
2766 data->ul_src_pipe_list[i]);
2767 if (data->reset_client)
2768 ipa_set_client(data->ul_src_pipe_list[i],
2769 0, false);
2770 else
2771 ipa_set_client(data->ul_src_pipe_list[i],
2772 data->ipa_client, true);
2773 }
2774 number = data->dl_dst_pipe_len;
2775 for (i = 0; i < number; i++) {
2776 IPAWANDBG("DL index-%d pipe %d\n", i,
2777 data->dl_dst_pipe_list[i]);
2778 if (data->reset_client)
2779 ipa_set_client(data->dl_dst_pipe_list[i],
2780 0, false);
2781 else
2782 ipa_set_client(data->dl_dst_pipe_list[i],
2783 data->ipa_client, false);
2784 }
2785 return 0;
2786}
2787
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302788static int rmnet_ipa_query_tethering_stats_wifi(
2789 struct wan_ioctl_query_tether_stats *data, bool reset)
2790{
2791 struct ipa_get_wdi_sap_stats *sap_stats;
2792 int rc;
2793
2794 sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats),
2795 GFP_KERNEL);
2796 if (!sap_stats)
2797 return -ENOMEM;
2798
2799 sap_stats->reset_stats = reset;
2800 IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats);
2801
2802 rc = ipa2_get_wlan_stats(sap_stats);
2803 if (rc) {
2804 kfree(sap_stats);
2805 return rc;
2806 } else if (reset) {
2807 kfree(sap_stats);
2808 return 0;
2809 }
2810
2811 if (sap_stats->stats_valid) {
2812 data->ipv4_tx_packets = sap_stats->ipv4_tx_packets;
2813 data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes;
2814 data->ipv4_rx_packets = sap_stats->ipv4_rx_packets;
2815 data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes;
2816 data->ipv6_tx_packets = sap_stats->ipv6_tx_packets;
2817 data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes;
2818 data->ipv6_rx_packets = sap_stats->ipv6_rx_packets;
2819 data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes;
2820 }
2821
2822 IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2823 (unsigned long int) data->ipv4_rx_packets,
2824 (unsigned long int) data->ipv6_rx_packets,
2825 (unsigned long int) data->ipv4_rx_bytes,
2826 (unsigned long int) data->ipv6_rx_bytes);
2827 IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2828 (unsigned long int) data->ipv4_tx_packets,
2829 (unsigned long int) data->ipv6_tx_packets,
2830 (unsigned long int) data->ipv4_tx_bytes,
2831 (unsigned long int) data->ipv6_tx_bytes);
2832
2833 kfree(sap_stats);
2834 return rc;
2835}
2836
2837int rmnet_ipa_query_tethering_stats_modem(
2838 struct wan_ioctl_query_tether_stats *data,
2839 bool reset
2840)
Amir Levy9659e592016-10-27 18:08:27 +03002841{
2842 struct ipa_get_data_stats_req_msg_v01 *req;
2843 struct ipa_get_data_stats_resp_msg_v01 *resp;
2844 int pipe_len, rc;
2845
2846 req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
2847 GFP_KERNEL);
2848 if (!req) {
2849 IPAWANERR("failed to allocate memory for stats message\n");
2850 return -ENOMEM;
2851 }
2852 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2853 GFP_KERNEL);
2854 if (!resp) {
2855 IPAWANERR("failed to allocate memory for stats message\n");
2856 kfree(req);
2857 return -ENOMEM;
2858 }
2859 memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2860 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2861
2862 req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2863 if (reset) {
2864 req->reset_stats_valid = true;
2865 req->reset_stats = true;
2866 IPAWANERR("reset the pipe stats\n");
2867 } else {
2868 /* print tethered-client enum */
2869 IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
2870 }
2871
2872 rc = ipa_qmi_get_data_stats(req, resp);
2873 if (rc) {
2874 IPAWANERR("can't get ipa_qmi_get_data_stats\n");
2875 kfree(req);
2876 kfree(resp);
2877 return rc;
Mohammed Javid2cee34a2017-06-14 12:40:34 +05302878 } else if (data == NULL) {
2879 kfree(req);
2880 kfree(resp);
2881 return 0;
Amir Levy9659e592016-10-27 18:08:27 +03002882 }
2883
2884 if (resp->dl_dst_pipe_stats_list_valid) {
2885 for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len;
2886 pipe_len++) {
2887 IPAWANDBG("Check entry(%d) dl_dst_pipe(%d)\n",
2888 pipe_len, resp->dl_dst_pipe_stats_list
2889 [pipe_len].pipe_index);
2890 IPAWANDBG("dl_p_v4(%lu)v6(%lu) dl_b_v4(%lu)v6(%lu)\n",
2891 (unsigned long int) resp->
2892 dl_dst_pipe_stats_list[pipe_len].
2893 num_ipv4_packets,
2894 (unsigned long int) resp->
2895 dl_dst_pipe_stats_list[pipe_len].
2896 num_ipv6_packets,
2897 (unsigned long int) resp->
2898 dl_dst_pipe_stats_list[pipe_len].
2899 num_ipv4_bytes,
2900 (unsigned long int) resp->
2901 dl_dst_pipe_stats_list[pipe_len].
2902 num_ipv6_bytes);
2903 if (ipa_get_client_uplink(resp->
2904 dl_dst_pipe_stats_list[pipe_len].
2905 pipe_index) == false) {
2906 if (data->ipa_client == ipa_get_client(resp->
2907 dl_dst_pipe_stats_list[pipe_len].
2908 pipe_index)) {
2909 /* update the DL stats */
2910 data->ipv4_rx_packets += resp->
2911 dl_dst_pipe_stats_list[pipe_len].
2912 num_ipv4_packets;
2913 data->ipv6_rx_packets += resp->
2914 dl_dst_pipe_stats_list[pipe_len].
2915 num_ipv6_packets;
2916 data->ipv4_rx_bytes += resp->
2917 dl_dst_pipe_stats_list[pipe_len].
2918 num_ipv4_bytes;
2919 data->ipv6_rx_bytes += resp->
2920 dl_dst_pipe_stats_list[pipe_len].
2921 num_ipv6_bytes;
2922 }
2923 }
2924 }
2925 }
2926 IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2927 (unsigned long int) data->ipv4_rx_packets,
2928 (unsigned long int) data->ipv6_rx_packets,
2929 (unsigned long int) data->ipv4_rx_bytes,
2930 (unsigned long int) data->ipv6_rx_bytes);
2931
2932 if (resp->ul_src_pipe_stats_list_valid) {
2933 for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len;
2934 pipe_len++) {
2935 IPAWANDBG("Check entry(%d) ul_dst_pipe(%d)\n",
2936 pipe_len,
2937 resp->ul_src_pipe_stats_list[pipe_len].
2938 pipe_index);
2939 IPAWANDBG("ul_p_v4(%lu)v6(%lu)ul_b_v4(%lu)v6(%lu)\n",
2940 (unsigned long int) resp->
2941 ul_src_pipe_stats_list[pipe_len].
2942 num_ipv4_packets,
2943 (unsigned long int) resp->
2944 ul_src_pipe_stats_list[pipe_len].
2945 num_ipv6_packets,
2946 (unsigned long int) resp->
2947 ul_src_pipe_stats_list[pipe_len].
2948 num_ipv4_bytes,
2949 (unsigned long int) resp->
2950 ul_src_pipe_stats_list[pipe_len].
2951 num_ipv6_bytes);
2952 if (ipa_get_client_uplink(resp->
2953 ul_src_pipe_stats_list[pipe_len].
2954 pipe_index) == true) {
2955 if (data->ipa_client == ipa_get_client(resp->
2956 ul_src_pipe_stats_list[pipe_len].
2957 pipe_index)) {
2958 /* update the DL stats */
2959 data->ipv4_tx_packets += resp->
2960 ul_src_pipe_stats_list[pipe_len].
2961 num_ipv4_packets;
2962 data->ipv6_tx_packets += resp->
2963 ul_src_pipe_stats_list[pipe_len].
2964 num_ipv6_packets;
2965 data->ipv4_tx_bytes += resp->
2966 ul_src_pipe_stats_list[pipe_len].
2967 num_ipv4_bytes;
2968 data->ipv6_tx_bytes += resp->
2969 ul_src_pipe_stats_list[pipe_len].
2970 num_ipv6_bytes;
2971 }
2972 }
2973 }
2974 }
2975 IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2976 (unsigned long int) data->ipv4_tx_packets,
2977 (unsigned long int) data->ipv6_tx_packets,
2978 (unsigned long int) data->ipv4_tx_bytes,
2979 (unsigned long int) data->ipv6_tx_bytes);
2980 kfree(req);
2981 kfree(resp);
2982 return 0;
2983}
2984
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302985int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
2986 bool reset)
2987{
2988 enum ipa_upstream_type upstream_type;
2989 int rc = 0;
2990
Mohammed Javidbba17b32017-09-26 12:51:14 +05302991 /* prevent string buffer overflows */
2992 data->upstreamIface[IFNAMSIZ-1] = '\0';
2993 data->tetherIface[IFNAMSIZ-1] = '\0';
2994
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302995 /* get IPA backhaul type */
2996 upstream_type = find_upstream_type(data->upstreamIface);
2997
2998 if (upstream_type == IPA_UPSTEAM_MAX) {
2999 IPAWANERR("upstreamIface %s not supported\n",
3000 data->upstreamIface);
3001 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
3002 IPAWANDBG_LOW(" query wifi-backhaul stats\n");
3003 rc = rmnet_ipa_query_tethering_stats_wifi(
3004 data, false);
3005 if (rc) {
3006 IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
3007 return rc;
3008 }
3009 } else {
3010 IPAWANDBG_LOW(" query modem-backhaul stats\n");
3011 rc = rmnet_ipa_query_tethering_stats_modem(
3012 data, false);
3013 if (rc) {
3014 IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
3015 return rc;
3016 }
3017 }
3018 return rc;
3019}
3020
Skylar Chang09e0e252017-03-20 14:51:29 -07003021int rmnet_ipa_query_tethering_stats_all(
3022 struct wan_ioctl_query_tether_stats_all *data)
3023{
3024 struct wan_ioctl_query_tether_stats tether_stats;
3025 enum ipa_upstream_type upstream_type;
3026 int rc = 0;
3027
3028 memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
Mohammed Javidbba17b32017-09-26 12:51:14 +05303029
3030 /* prevent string buffer overflows */
3031 data->upstreamIface[IFNAMSIZ-1] = '\0';
3032
Skylar Chang09e0e252017-03-20 14:51:29 -07003033 /* get IPA backhaul type */
3034 upstream_type = find_upstream_type(data->upstreamIface);
3035
3036 if (upstream_type == IPA_UPSTEAM_MAX) {
3037 IPAWANERR(" Wrong upstreamIface name %s\n",
3038 data->upstreamIface);
3039 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
3040 IPAWANDBG_LOW(" query wifi-backhaul stats\n");
3041 rc = rmnet_ipa_query_tethering_stats_wifi(
3042 &tether_stats, data->reset_stats);
3043 if (rc) {
3044 IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
3045 return rc;
3046 }
3047 data->tx_bytes = tether_stats.ipv4_tx_bytes
3048 + tether_stats.ipv6_tx_bytes;
3049 data->rx_bytes = tether_stats.ipv4_rx_bytes
3050 + tether_stats.ipv6_rx_bytes;
3051 } else {
3052 IPAWANDBG_LOW(" query modem-backhaul stats\n");
3053 tether_stats.ipa_client = data->ipa_client;
3054 rc = rmnet_ipa_query_tethering_stats_modem(
3055 &tether_stats, data->reset_stats);
3056 if (rc) {
3057 IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
3058 return rc;
3059 }
3060 data->tx_bytes = tether_stats.ipv4_tx_bytes
3061 + tether_stats.ipv6_tx_bytes;
3062 data->rx_bytes = tether_stats.ipv4_rx_bytes
3063 + tether_stats.ipv6_rx_bytes;
3064 }
3065 return rc;
3066}
3067
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303068int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
3069{
3070 enum ipa_upstream_type upstream_type;
Mohammed Javid2cee34a2017-06-14 12:40:34 +05303071 struct wan_ioctl_query_tether_stats tether_stats;
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303072 int rc = 0;
3073
Mohammed Javid2cee34a2017-06-14 12:40:34 +05303074 memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
3075
Mohammed Javidbba17b32017-09-26 12:51:14 +05303076 /* prevent string buffer overflows */
3077 data->upstreamIface[IFNAMSIZ-1] = '\0';
3078
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303079 /* get IPA backhaul type */
3080 upstream_type = find_upstream_type(data->upstreamIface);
3081
3082 if (upstream_type == IPA_UPSTEAM_MAX) {
3083 IPAWANERR("upstream iface %s not supported\n",
3084 data->upstreamIface);
3085 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
3086 IPAWANDBG(" reset wifi-backhaul stats\n");
3087 rc = rmnet_ipa_query_tethering_stats_wifi(
3088 NULL, true);
3089 if (rc) {
3090 IPAWANERR("reset WLAN stats failed\n");
3091 return rc;
3092 }
3093 } else {
3094 IPAWANDBG(" reset modem-backhaul stats\n");
3095 rc = rmnet_ipa_query_tethering_stats_modem(
Mohammed Javid2cee34a2017-06-14 12:40:34 +05303096 &tether_stats, true);
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303097 if (rc) {
3098 IPAWANERR("reset MODEM stats failed\n");
3099 return rc;
3100 }
3101 }
3102 return rc;
3103}
3104
3105
Amir Levy9659e592016-10-27 18:08:27 +03003106/**
3107 * ipa_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
3108 * @mux_id - The MUX ID on which the quota has been reached
3109 *
3110 * This function broadcasts a Netlink event using the kobject of the
3111 * rmnet_ipa interface in order to alert the user space that the quota
3112 * on the specific interface which matches the mux_id has been reached.
3113 *
3114 */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303115void ipa_broadcast_quota_reach_ind(u32 mux_id,
3116 enum ipa_upstream_type upstream_type)
Amir Levy9659e592016-10-27 18:08:27 +03003117{
3118 char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
3119 char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
3120 char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
3121 char *envp[IPA_UEVENT_NUM_EVNP] = {
3122 alert_msg, iface_name_l, iface_name_m, NULL };
3123 int res;
3124 int index;
3125
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303126 /* check upstream_type*/
3127 if (upstream_type == IPA_UPSTEAM_MAX) {
3128 IPAWANERR("upstreamIface type %d not supported\n",
3129 upstream_type);
Amir Levy9659e592016-10-27 18:08:27 +03003130 return;
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303131 } else if (upstream_type == IPA_UPSTEAM_MODEM) {
3132 index = find_mux_channel_index(mux_id);
3133 if (index == MAX_NUM_OF_MUX_CHANNEL) {
3134 IPAWANERR("%u is an mux ID\n", mux_id);
3135 return;
3136 }
Amir Levy9659e592016-10-27 18:08:27 +03003137 }
3138
3139 res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
3140 "ALERT_NAME=%s", "quotaReachedAlert");
3141 if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
3142 IPAWANERR("message too long (%d)", res);
3143 return;
3144 }
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303145
Amir Levy9659e592016-10-27 18:08:27 +03003146 /* posting msg for L-release for CNE */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303147 if (upstream_type == IPA_UPSTEAM_MODEM) {
Amir Levy9659e592016-10-27 18:08:27 +03003148 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303149 "UPSTREAM=%s", mux_channel[index].vchannel_name);
3150 } else {
3151 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
3152 "UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
3153 }
Amir Levy9659e592016-10-27 18:08:27 +03003154 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
3155 IPAWANERR("message too long (%d)", res);
3156 return;
3157 }
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303158
Amir Levy9659e592016-10-27 18:08:27 +03003159 /* posting msg for M-release for CNE */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303160 if (upstream_type == IPA_UPSTEAM_MODEM) {
Amir Levy9659e592016-10-27 18:08:27 +03003161 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303162 "INTERFACE=%s", mux_channel[index].vchannel_name);
3163 } else {
3164 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
3165 "INTERFACE=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
3166 }
Amir Levy9659e592016-10-27 18:08:27 +03003167 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
3168 IPAWANERR("message too long (%d)", res);
3169 return;
3170 }
3171
3172 IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
3173 alert_msg, iface_name_l, iface_name_m);
3174 kobject_uevent_env(&(ipa_netdevs[0]->dev.kobj), KOBJ_CHANGE, envp);
Skylar Chang09e0e252017-03-20 14:51:29 -07003175
3176 rmnet_ipa_send_quota_reach_ind();
Amir Levy9659e592016-10-27 18:08:27 +03003177}
3178
3179/**
3180 * ipa_q6_handshake_complete() - Perform operations once Q6 is up
3181 * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR.
3182 *
3183 * This function is invoked once the handshake between the IPA AP driver
3184 * and IPA Q6 driver is complete. At this point, it is possible to perform
3185 * operations which can't be performed until IPA Q6 driver is up.
3186 *
3187 */
3188void ipa_q6_handshake_complete(bool ssr_bootup)
3189{
3190 /* It is required to recover the network stats after SSR recovery */
3191 if (ssr_bootup) {
3192 /*
3193 * In case the uC is required to be loaded by the Modem,
3194 * the proxy vote will be removed only when uC loading is
3195 * complete and indication is received by the AP. After SSR,
3196 * uC is already loaded. Therefore, proxy vote can be removed
3197 * once Modem init is complete.
3198 */
3199 ipa2_proxy_clk_unvote();
3200
Skylar Chang09e0e252017-03-20 14:51:29 -07003201 /* send SSR power-up notification to IPACM */
3202 rmnet_ipa_send_ssr_notification(true);
3203
Amir Levy9659e592016-10-27 18:08:27 +03003204 /*
3205 * It is required to recover the network stats after
3206 * SSR recovery
3207 */
3208 rmnet_ipa_get_network_stats_and_update();
3209
3210 /* Enable holb monitoring on Q6 pipes. */
3211 ipa_q6_monitor_holb_mitigation(true);
3212 }
3213}
3214
3215static int __init ipa_wwan_init(void)
3216{
3217 atomic_set(&is_initialized, 0);
3218 atomic_set(&is_ssr, 0);
3219
3220 mutex_init(&ipa_to_apps_pipe_handle_guard);
Skylar Chang8438ba52017-03-15 21:27:35 -07003221 mutex_init(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03003222 ipa_to_apps_hdl = -1;
3223
3224 ipa_qmi_init();
3225
3226 /* Register for Modem SSR */
3227 subsys_notify_handle = subsys_notif_register_notifier(SUBSYS_MODEM,
3228 &ssr_notifier);
3229 if (!IS_ERR(subsys_notify_handle))
3230 return platform_driver_register(&rmnet_ipa_driver);
3231 else
3232 return (int)PTR_ERR(subsys_notify_handle);
3233}
3234
3235static void __exit ipa_wwan_cleanup(void)
3236{
3237 int ret;
3238
3239 ipa_qmi_cleanup();
3240 mutex_destroy(&ipa_to_apps_pipe_handle_guard);
Skylar Chang8438ba52017-03-15 21:27:35 -07003241 mutex_destroy(&add_mux_channel_lock);
Amir Levy9659e592016-10-27 18:08:27 +03003242 ret = subsys_notif_unregister_notifier(subsys_notify_handle,
3243 &ssr_notifier);
3244 if (ret)
3245 IPAWANERR(
3246 "Error subsys_notif_unregister_notifier system %s, ret=%d\n",
3247 SUBSYS_MODEM, ret);
3248 platform_driver_unregister(&rmnet_ipa_driver);
3249}
3250
3251static void ipa_wwan_msg_free_cb(void *buff, u32 len, u32 type)
3252{
3253 if (!buff)
3254 IPAWANERR("Null buffer.\n");
3255 kfree(buff);
3256}
3257
3258static void ipa_rmnet_rx_cb(void *priv)
3259{
3260 struct net_device *dev = priv;
3261 struct wwan_private *wwan_ptr;
3262
3263 IPAWANDBG("\n");
3264
3265 if (dev != ipa_netdevs[0]) {
3266 IPAWANERR("Not matching with netdev\n");
3267 return;
3268 }
3269
3270 wwan_ptr = netdev_priv(dev);
3271 napi_schedule(&(wwan_ptr->napi));
3272}
3273
3274static int ipa_rmnet_poll(struct napi_struct *napi, int budget)
3275{
3276 int rcvd_pkts = 0;
3277
3278 rcvd_pkts = ipa_rx_poll(ipa_to_apps_hdl, NAPI_WEIGHT);
3279 IPAWANDBG("rcvd packets: %d\n", rcvd_pkts);
3280 return rcvd_pkts;
3281}
3282
3283late_initcall(ipa_wwan_init);
3284module_exit(ipa_wwan_cleanup);
3285MODULE_DESCRIPTION("WWAN Network Interface");
3286MODULE_LICENSE("GPL v2");