blob: db732c567e7a62620b05ffe5bb809233e5ac71c1 [file] [log] [blame]
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * WWAN Transport Network Driver.
15 */
16
17#include <linux/completion.h>
18#include <linux/errno.h>
19#include <linux/if_arp.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/of_device.h>
26#include <linux/string.h>
27#include <linux/skbuff.h>
28#include <linux/version.h>
29#include <linux/workqueue.h>
30#include <net/pkt_sched.h>
31#include <soc/qcom/subsystem_restart.h>
32#include <soc/qcom/subsystem_notif.h>
33#include "ipa_qmi_service.h"
34#include <linux/rmnet_ipa_fd_ioctl.h>
35#include <linux/ipa.h>
36#include <uapi/linux/net_map.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020037#include <uapi/linux/msm_rmnet.h>
38#include <net/rmnet_config.h>
Amir Levy9659e592016-10-27 18:08:27 +030039
40#include "ipa_trace.h"
41
42#define WWAN_METADATA_SHFT 24
43#define WWAN_METADATA_MASK 0xFF000000
44#define WWAN_DATA_LEN 2000
45#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
46#define HEADROOM_FOR_QMAP 8 /* for mux header */
47#define TAILROOM 0 /* for padding by mux layer */
48#define MAX_NUM_OF_MUX_CHANNEL 10 /* max mux channels */
49#define UL_FILTER_RULE_HANDLE_START 69
50#define DEFAULT_OUTSTANDING_HIGH_CTL 96
51#define DEFAULT_OUTSTANDING_HIGH 64
52#define DEFAULT_OUTSTANDING_LOW 32
53
54#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +053055#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0"
56
Amir Levy9659e592016-10-27 18:08:27 +030057#define IPA_WWAN_DEVICE_COUNT (1)
58
59#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
60
61#define INVALID_MUX_ID 0xFF
62#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
63#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
64#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
65
66#define NAPI_WEIGHT 60
67
68static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
69static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
70static u32 qmap_hdr_hdl, dflt_v4_wan_rt_hdl, dflt_v6_wan_rt_hdl;
71static struct rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
72static int num_q6_rule, old_num_q6_rule;
73static int rmnet_index;
74static bool egress_set, a7_ul_flt_set;
75static struct workqueue_struct *ipa_rm_q6_workqueue; /* IPA_RM workqueue*/
76static atomic_t is_initialized;
77static atomic_t is_ssr;
78static void *subsys_notify_handle;
79
80u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */
81static struct mutex ipa_to_apps_pipe_handle_guard;
82static int wwan_add_ul_flt_rule_to_ipa(void);
83static int wwan_del_ul_flt_rule_to_ipa(void);
84static void ipa_wwan_msg_free_cb(void*, u32, u32);
85static void ipa_rmnet_rx_cb(void *priv);
86static int ipa_rmnet_poll(struct napi_struct *napi, int budget);
87
88static void wake_tx_queue(struct work_struct *work);
89static DECLARE_WORK(ipa_tx_wakequeue_work, wake_tx_queue);
90
91static void tethering_stats_poll_queue(struct work_struct *work);
92static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work,
93 tethering_stats_poll_queue);
94
95enum wwan_device_status {
96 WWAN_DEVICE_INACTIVE = 0,
97 WWAN_DEVICE_ACTIVE = 1
98};
99
100struct ipa_rmnet_plat_drv_res {
101 bool ipa_rmnet_ssr;
102 bool ipa_loaduC;
103 bool ipa_advertise_sg_support;
104 bool ipa_napi_enable;
105};
106
107static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
108/**
109 * struct wwan_private - WWAN private data
110 * @net: network interface struct implemented by this driver
111 * @stats: iface statistics
112 * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
113 * @outstanding_high: number of outstanding packets allowed
114 * @outstanding_low: number of outstanding packets which shall cause
115 * @ch_id: channel id
116 * @lock: spinlock for mutual exclusion
117 * @device_status: holds device status
118 *
119 * WWAN private - holds all relevant info about WWAN driver
120 */
121struct wwan_private {
122 struct net_device *net;
123 struct net_device_stats stats;
124 atomic_t outstanding_pkts;
125 int outstanding_high_ctl;
126 int outstanding_high;
127 int outstanding_low;
128 uint32_t ch_id;
129 spinlock_t lock;
130 struct completion resource_granted_completion;
131 enum wwan_device_status device_status;
132 struct napi_struct napi;
133};
134
135/**
136* ipa_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
137*
138* Return codes:
139* 0: success
140* -ENOMEM: failed to allocate memory
141* -EPERM: failed to add the tables
142*/
143static int ipa_setup_a7_qmap_hdr(void)
144{
145 struct ipa_ioc_add_hdr *hdr;
146 struct ipa_hdr_add *hdr_entry;
147 u32 pyld_sz;
148 int ret;
149
150 /* install the basic exception header */
151 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
152 sizeof(struct ipa_hdr_add);
153 hdr = kzalloc(pyld_sz, GFP_KERNEL);
154 if (!hdr) {
155 IPAWANERR("fail to alloc exception hdr\n");
156 return -ENOMEM;
157 }
158 hdr->num_hdrs = 1;
159 hdr->commit = 1;
160 hdr_entry = &hdr->hdr[0];
161
162 strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
163 IPA_RESOURCE_NAME_MAX);
164 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
165
166 if (ipa2_add_hdr(hdr)) {
167 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
168 ret = -EPERM;
169 goto bail;
170 }
171
172 if (hdr_entry->status) {
173 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
174 ret = -EPERM;
175 goto bail;
176 }
177 qmap_hdr_hdl = hdr_entry->hdr_hdl;
178
179 ret = 0;
180bail:
181 kfree(hdr);
182 return ret;
183}
184
185static void ipa_del_a7_qmap_hdr(void)
186{
187 struct ipa_ioc_del_hdr *del_hdr;
188 struct ipa_hdr_del *hdl_entry;
189 u32 pyld_sz;
190 int ret;
191
192 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
193 sizeof(struct ipa_hdr_del);
194 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
195 if (!del_hdr) {
196 IPAWANERR("fail to alloc exception hdr_del\n");
197 return;
198 }
199
200 del_hdr->commit = 1;
201 del_hdr->num_hdls = 1;
202 hdl_entry = &del_hdr->hdl[0];
203 hdl_entry->hdl = qmap_hdr_hdl;
204
205 ret = ipa2_del_hdr(del_hdr);
206 if (ret || hdl_entry->status)
207 IPAWANERR("ipa2_del_hdr failed\n");
208 else
209 IPAWANDBG("hdrs deletion done\n");
210
211 qmap_hdr_hdl = 0;
212 kfree(del_hdr);
213}
214
215static void ipa_del_qmap_hdr(uint32_t hdr_hdl)
216{
217 struct ipa_ioc_del_hdr *del_hdr;
218 struct ipa_hdr_del *hdl_entry;
219 u32 pyld_sz;
220 int ret;
221
222 if (hdr_hdl == 0) {
223 IPAWANERR("Invalid hdr_hdl provided\n");
224 return;
225 }
226
227 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
228 sizeof(struct ipa_hdr_del);
229 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
230 if (!del_hdr) {
231 IPAWANERR("fail to alloc exception hdr_del\n");
232 return;
233 }
234
235 del_hdr->commit = 1;
236 del_hdr->num_hdls = 1;
237 hdl_entry = &del_hdr->hdl[0];
238 hdl_entry->hdl = hdr_hdl;
239
240 ret = ipa2_del_hdr(del_hdr);
241 if (ret || hdl_entry->status)
242 IPAWANERR("ipa2_del_hdr failed\n");
243 else
244 IPAWANDBG("header deletion done\n");
245
246 qmap_hdr_hdl = 0;
247 kfree(del_hdr);
248}
249
250static void ipa_del_mux_qmap_hdrs(void)
251{
252 int index;
253
254 for (index = 0; index < rmnet_index; index++) {
255 ipa_del_qmap_hdr(mux_channel[index].hdr_hdl);
256 mux_channel[index].hdr_hdl = 0;
257 }
258}
259
260static int ipa_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
261{
262 struct ipa_ioc_add_hdr *hdr;
263 struct ipa_hdr_add *hdr_entry;
264 char hdr_name[IPA_RESOURCE_NAME_MAX];
265 u32 pyld_sz;
266 int ret;
267
268 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
269 sizeof(struct ipa_hdr_add);
270 hdr = kzalloc(pyld_sz, GFP_KERNEL);
271 if (!hdr) {
272 IPAWANERR("fail to alloc exception hdr\n");
273 return -ENOMEM;
274 }
275 hdr->num_hdrs = 1;
276 hdr->commit = 1;
277 hdr_entry = &hdr->hdr[0];
278
279 snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
280 A2_MUX_HDR_NAME_V4_PREF,
281 mux_id);
282 strlcpy(hdr_entry->name, hdr_name,
283 IPA_RESOURCE_NAME_MAX);
284
285 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
286 hdr_entry->hdr[1] = (uint8_t) mux_id;
287 IPAWANDBG("header (%s) with mux-id: (%d)\n",
288 hdr_name,
289 hdr_entry->hdr[1]);
290 if (ipa2_add_hdr(hdr)) {
291 IPAWANERR("fail to add IPA_QMAP hdr\n");
292 ret = -EPERM;
293 goto bail;
294 }
295
296 if (hdr_entry->status) {
297 IPAWANERR("fail to add IPA_QMAP hdr\n");
298 ret = -EPERM;
299 goto bail;
300 }
301
302 ret = 0;
303 *hdr_hdl = hdr_entry->hdr_hdl;
304bail:
305 kfree(hdr);
306 return ret;
307}
308
309/**
310* ipa_setup_dflt_wan_rt_tables() - Setup default wan routing tables
311*
312* Return codes:
313* 0: success
314* -ENOMEM: failed to allocate memory
315* -EPERM: failed to add the tables
316*/
317static int ipa_setup_dflt_wan_rt_tables(void)
318{
319 struct ipa_ioc_add_rt_rule *rt_rule;
320 struct ipa_rt_rule_add *rt_rule_entry;
321
322 rt_rule =
323 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
324 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
325 if (!rt_rule) {
326 IPAWANERR("fail to alloc mem\n");
327 return -ENOMEM;
328 }
329 /* setup a default v4 route to point to Apps */
330 rt_rule->num_rules = 1;
331 rt_rule->commit = 1;
332 rt_rule->ip = IPA_IP_v4;
333 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
334 IPA_RESOURCE_NAME_MAX);
335
336 rt_rule_entry = &rt_rule->rules[0];
337 rt_rule_entry->at_rear = 1;
338 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
339 rt_rule_entry->rule.hdr_hdl = qmap_hdr_hdl;
340
341 if (ipa2_add_rt_rule(rt_rule)) {
342 IPAWANERR("fail to add dflt_wan v4 rule\n");
343 kfree(rt_rule);
344 return -EPERM;
345 }
346
347 IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
348 dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
349
350 /* setup a default v6 route to point to A5 */
351 rt_rule->ip = IPA_IP_v6;
352 if (ipa2_add_rt_rule(rt_rule)) {
353 IPAWANERR("fail to add dflt_wan v6 rule\n");
354 kfree(rt_rule);
355 return -EPERM;
356 }
357 IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
358 dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
359
360 kfree(rt_rule);
361 return 0;
362}
363
364static void ipa_del_dflt_wan_rt_tables(void)
365{
366 struct ipa_ioc_del_rt_rule *rt_rule;
367 struct ipa_rt_rule_del *rt_rule_entry;
368 int len;
369
370 len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
371 sizeof(struct ipa_rt_rule_del);
372 rt_rule = kzalloc(len, GFP_KERNEL);
373 if (!rt_rule) {
374 IPAWANERR("unable to allocate memory for del route rule\n");
375 return;
376 }
377
378 memset(rt_rule, 0, len);
379 rt_rule->commit = 1;
380 rt_rule->num_hdls = 1;
381 rt_rule->ip = IPA_IP_v4;
382
383 rt_rule_entry = &rt_rule->hdl[0];
384 rt_rule_entry->status = -1;
385 rt_rule_entry->hdl = dflt_v4_wan_rt_hdl;
386
387 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
388 rt_rule_entry->hdl, IPA_IP_v4);
389 if (ipa2_del_rt_rule(rt_rule) ||
390 (rt_rule_entry->status)) {
391 IPAWANERR("Routing rule deletion failed!\n");
392 }
393
394 rt_rule->ip = IPA_IP_v6;
395 rt_rule_entry->hdl = dflt_v6_wan_rt_hdl;
396 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
397 rt_rule_entry->hdl, IPA_IP_v6);
398 if (ipa2_del_rt_rule(rt_rule) ||
399 (rt_rule_entry->status)) {
400 IPAWANERR("Routing rule deletion failed!\n");
401 }
402
403 kfree(rt_rule);
404}
405
406int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
407 *rule_req, uint32_t *rule_hdl)
408{
409 int i, j;
410
411 if (rule_req->filter_spec_list_valid == true) {
412 num_q6_rule = rule_req->filter_spec_list_len;
413 IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule);
414 } else {
415 num_q6_rule = 0;
416 IPAWANERR("got no UL rules from modem\n");
417 return -EINVAL;
418 }
419
420 /* copy UL filter rules from Modem*/
421 for (i = 0; i < num_q6_rule; i++) {
422 /* check if rules overside the cache*/
423 if (i == MAX_NUM_Q6_RULE) {
424 IPAWANERR("Reaching (%d) max cache ",
425 MAX_NUM_Q6_RULE);
426 IPAWANERR(" however total (%d)\n",
427 num_q6_rule);
428 goto failure;
429 }
430 /* construct UL_filter_rule handler QMI use-cas */
431 ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl =
432 UL_FILTER_RULE_HANDLE_START + i;
433 rule_hdl[i] = ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
434 ipa_qmi_ctx->q6_ul_filter_rule[i].ip =
435 rule_req->filter_spec_list[i].ip_type;
436 ipa_qmi_ctx->q6_ul_filter_rule[i].action =
437 rule_req->filter_spec_list[i].filter_action;
438 if (rule_req->filter_spec_list[i].is_routing_table_index_valid
439 == true)
440 ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
441 rule_req->filter_spec_list[i].route_table_index;
442 if (rule_req->filter_spec_list[i].is_mux_id_valid == true)
443 ipa_qmi_ctx->q6_ul_filter_rule[i].mux_id =
444 rule_req->filter_spec_list[i].mux_id;
445 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap =
446 rule_req->filter_spec_list[i].filter_rule.
447 rule_eq_bitmap;
448 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present =
449 rule_req->filter_spec_list[i].filter_rule.
450 tos_eq_present;
451 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq =
452 rule_req->filter_spec_list[i].filter_rule.tos_eq;
453 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
454 protocol_eq_present = rule_req->filter_spec_list[i].
455 filter_rule.protocol_eq_present;
456 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq =
457 rule_req->filter_spec_list[i].filter_rule.
458 protocol_eq;
459
460 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
461 num_ihl_offset_range_16 = rule_req->filter_spec_list[i].
462 filter_rule.num_ihl_offset_range_16;
463 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
464 num_ihl_offset_range_16; j++) {
465 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
466 ihl_offset_range_16[j].offset = rule_req->
467 filter_spec_list[i].filter_rule.
468 ihl_offset_range_16[j].offset;
469 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
470 ihl_offset_range_16[j].range_low = rule_req->
471 filter_spec_list[i].filter_rule.
472 ihl_offset_range_16[j].range_low;
473 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
474 ihl_offset_range_16[j].range_high = rule_req->
475 filter_spec_list[i].filter_rule.
476 ihl_offset_range_16[j].range_high;
477 }
478 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 =
479 rule_req->filter_spec_list[i].filter_rule.
480 num_offset_meq_32;
481 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
482 num_offset_meq_32; j++) {
483 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
484 offset_meq_32[j].offset = rule_req->filter_spec_list[i].
485 filter_rule.offset_meq_32[j].offset;
486 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
487 offset_meq_32[j].mask = rule_req->filter_spec_list[i].
488 filter_rule.offset_meq_32[j].mask;
489 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
490 offset_meq_32[j].value = rule_req->filter_spec_list[i].
491 filter_rule.offset_meq_32[j].value;
492 }
493
494 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present =
495 rule_req->filter_spec_list[i].filter_rule.tc_eq_present;
496 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq =
497 rule_req->filter_spec_list[i].filter_rule.tc_eq;
498 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present =
499 rule_req->filter_spec_list[i].filter_rule.
500 flow_eq_present;
501 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq =
502 rule_req->filter_spec_list[i].filter_rule.flow_eq;
503 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
504 ihl_offset_eq_16_present = rule_req->filter_spec_list[i].
505 filter_rule.ihl_offset_eq_16_present;
506 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
507 ihl_offset_eq_16.offset = rule_req->filter_spec_list[i].
508 filter_rule.ihl_offset_eq_16.offset;
509 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
510 ihl_offset_eq_16.value = rule_req->filter_spec_list[i].
511 filter_rule.ihl_offset_eq_16.value;
512
513 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
514 ihl_offset_eq_32_present = rule_req->filter_spec_list[i].
515 filter_rule.ihl_offset_eq_32_present;
516 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
517 ihl_offset_eq_32.offset = rule_req->filter_spec_list[i].
518 filter_rule.ihl_offset_eq_32.offset;
519 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
520 ihl_offset_eq_32.value = rule_req->filter_spec_list[i].
521 filter_rule.ihl_offset_eq_32.value;
522
523 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
524 num_ihl_offset_meq_32 = rule_req->filter_spec_list[i].
525 filter_rule.num_ihl_offset_meq_32;
526 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].
527 eq_attrib.num_ihl_offset_meq_32; j++) {
528 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
529 ihl_offset_meq_32[j].offset = rule_req->
530 filter_spec_list[i].filter_rule.
531 ihl_offset_meq_32[j].offset;
532 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
533 ihl_offset_meq_32[j].mask = rule_req->
534 filter_spec_list[i].filter_rule.
535 ihl_offset_meq_32[j].mask;
536 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
537 ihl_offset_meq_32[j].value = rule_req->
538 filter_spec_list[i].filter_rule.
539 ihl_offset_meq_32[j].value;
540 }
541 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 =
542 rule_req->filter_spec_list[i].filter_rule.
543 num_offset_meq_128;
544 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
545 num_offset_meq_128; j++) {
546 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
547 offset_meq_128[j].offset = rule_req->
548 filter_spec_list[i].filter_rule.
549 offset_meq_128[j].offset;
550 memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
551 offset_meq_128[j].mask,
552 rule_req->filter_spec_list[i].
553 filter_rule.offset_meq_128[j].mask, 16);
554 memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
555 offset_meq_128[j].value, rule_req->
556 filter_spec_list[i].filter_rule.
557 offset_meq_128[j].value, 16);
558 }
559
560 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
561 metadata_meq32_present = rule_req->filter_spec_list[i].
562 filter_rule.metadata_meq32_present;
563 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
564 metadata_meq32.offset = rule_req->filter_spec_list[i].
565 filter_rule.metadata_meq32.offset;
566 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
567 metadata_meq32.mask = rule_req->filter_spec_list[i].
568 filter_rule.metadata_meq32.mask;
569 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.
570 value = rule_req->filter_spec_list[i].filter_rule.
571 metadata_meq32.value;
572 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
573 ipv4_frag_eq_present = rule_req->filter_spec_list[i].
574 filter_rule.ipv4_frag_eq_present;
575 }
576
577 if (rule_req->xlat_filter_indices_list_valid) {
578 if (rule_req->xlat_filter_indices_list_len > num_q6_rule) {
579 IPAWANERR("Number of xlat indices is not valid: %d\n",
580 rule_req->xlat_filter_indices_list_len);
581 goto failure;
582 }
583 IPAWANDBG("Receive %d XLAT indices: ",
584 rule_req->xlat_filter_indices_list_len);
585 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++)
586 IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]);
587 IPAWANDBG("\n");
588
589 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
590 if (rule_req->xlat_filter_indices_list[i]
591 >= num_q6_rule) {
592 IPAWANERR("Xlat rule idx is wrong: %d\n",
593 rule_req->xlat_filter_indices_list[i]);
594 goto failure;
595 } else {
596 ipa_qmi_ctx->q6_ul_filter_rule
597 [rule_req->xlat_filter_indices_list[i]]
598 .is_xlat_rule = 1;
599 IPAWANDBG("Rule %d is xlat rule\n",
600 rule_req->xlat_filter_indices_list[i]);
601 }
602 }
603 }
604 goto success;
605
606failure:
607 num_q6_rule = 0;
608 memset(ipa_qmi_ctx->q6_ul_filter_rule, 0,
609 sizeof(ipa_qmi_ctx->q6_ul_filter_rule));
610 return -EINVAL;
611
612success:
613 return 0;
614}
615
616static int wwan_add_ul_flt_rule_to_ipa(void)
617{
618 u32 pyld_sz;
619 int i, retval = 0;
620 int num_v4_rule = 0, num_v6_rule = 0;
621 struct ipa_ioc_add_flt_rule *param;
622 struct ipa_flt_rule_add flt_rule_entry;
623 struct ipa_fltr_installed_notif_req_msg_v01 *req;
624
625 if (ipa_qmi_ctx == NULL) {
626 IPAWANERR("ipa_qmi_ctx is NULL!\n");
627 return -EFAULT;
628 }
629
630 pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) +
631 sizeof(struct ipa_flt_rule_add);
632 param = kzalloc(pyld_sz, GFP_KERNEL);
633 if (!param)
634 return -ENOMEM;
635
636 req = (struct ipa_fltr_installed_notif_req_msg_v01 *)
637 kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
638 GFP_KERNEL);
639 if (!req) {
640 kfree(param);
641 return -ENOMEM;
642 }
643
644 param->commit = 1;
645 param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD;
646 param->global = false;
647 param->num_rules = (uint8_t)1;
648
649 mutex_lock(&ipa_qmi_lock);
650 for (i = 0; i < num_q6_rule; i++) {
651 param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
652 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
653 flt_rule_entry.at_rear = true;
654 flt_rule_entry.rule.action =
655 ipa_qmi_ctx->q6_ul_filter_rule[i].action;
656 flt_rule_entry.rule.rt_tbl_idx
657 = ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx;
658 flt_rule_entry.rule.retain_hdr = true;
659
660 /* debug rt-hdl*/
661 IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n",
662 i, flt_rule_entry.rule.rt_tbl_idx);
663 flt_rule_entry.rule.eq_attrib_type = true;
664 memcpy(&(flt_rule_entry.rule.eq_attrib),
665 &ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib,
666 sizeof(struct ipa_ipfltri_rule_eq));
667 memcpy(&(param->rules[0]), &flt_rule_entry,
668 sizeof(struct ipa_flt_rule_add));
669 if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
670 retval = -EFAULT;
671 IPAWANERR("add A7 UL filter rule(%d) failed\n", i);
672 } else {
673 /* store the rule handler */
674 ipa_qmi_ctx->q6_ul_filter_rule_hdl[i] =
675 param->rules[0].flt_rule_hdl;
676 }
677 }
678 mutex_unlock(&ipa_qmi_lock);
679
680 /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
681 req->source_pipe_index =
682 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
683 req->install_status = QMI_RESULT_SUCCESS_V01;
684 req->filter_index_list_len = num_q6_rule;
685 mutex_lock(&ipa_qmi_lock);
686 for (i = 0; i < num_q6_rule; i++) {
687 if (ipa_qmi_ctx->q6_ul_filter_rule[i].ip == IPA_IP_v4) {
688 req->filter_index_list[i].filter_index = num_v4_rule;
689 num_v4_rule++;
690 } else {
691 req->filter_index_list[i].filter_index = num_v6_rule;
692 num_v6_rule++;
693 }
694 req->filter_index_list[i].filter_handle =
695 ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
696 }
697 mutex_unlock(&ipa_qmi_lock);
698 if (qmi_filter_notify_send(req)) {
699 IPAWANDBG("add filter rule index on A7-RX failed\n");
700 retval = -EFAULT;
701 }
702 old_num_q6_rule = num_q6_rule;
703 IPAWANDBG("add (%d) filter rule index on A7-RX\n",
704 old_num_q6_rule);
705 kfree(param);
706 kfree(req);
707 return retval;
708}
709
710static int wwan_del_ul_flt_rule_to_ipa(void)
711{
712 u32 pyld_sz;
713 int i, retval = 0;
714 struct ipa_ioc_del_flt_rule *param;
715 struct ipa_flt_rule_del flt_rule_entry;
716
717 pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) +
718 sizeof(struct ipa_flt_rule_del);
719 param = kzalloc(pyld_sz, GFP_KERNEL);
720 if (!param) {
721 IPAWANERR("kzalloc failed\n");
722 return -ENOMEM;
723 }
724
725 param->commit = 1;
726 param->num_hdls = (uint8_t) 1;
727
728 for (i = 0; i < old_num_q6_rule; i++) {
729 param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
730 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
731 flt_rule_entry.hdl = ipa_qmi_ctx->q6_ul_filter_rule_hdl[i];
732 /* debug rt-hdl*/
733 IPAWANDBG("delete-IPA rule index(%d)\n", i);
734 memcpy(&(param->hdl[0]), &flt_rule_entry,
735 sizeof(struct ipa_flt_rule_del));
736 if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
737 IPAWANERR("del A7 UL filter rule(%d) failed\n", i);
738 kfree(param);
739 return -EFAULT;
740 }
741 }
742
743 /* set UL filter-rule add-indication */
744 a7_ul_flt_set = false;
745 old_num_q6_rule = 0;
746
747 kfree(param);
748 return retval;
749}
750
751static int find_mux_channel_index(uint32_t mux_id)
752{
753 int i;
754
755 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
756 if (mux_id == mux_channel[i].mux_id)
757 return i;
758 }
759 return MAX_NUM_OF_MUX_CHANNEL;
760}
761
762static int find_vchannel_name_index(const char *vchannel_name)
763{
764 int i;
765
766 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
767 if (strcmp(mux_channel[i].vchannel_name, vchannel_name == 0))
768 return i;
769 }
770 return MAX_NUM_OF_MUX_CHANNEL;
771}
772
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +0530773static enum ipa_upstream_type find_upstream_type(const char *upstreamIface)
774{
775 int i;
776
777 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
778 if (strcmp(mux_channel[i].vchannel_name,
779 upstreamIface) == 0)
780 return IPA_UPSTEAM_MODEM;
781 }
782
783 if (strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0)
784 return IPA_UPSTEAM_WLAN;
785 else
786 return IPA_UPSTEAM_MAX;
787}
788
Amir Levy9659e592016-10-27 18:08:27 +0300789static int wwan_register_to_ipa(int index)
790{
791 struct ipa_tx_intf tx_properties = {0};
792 struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
793 struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
794 struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
795 struct ipa_rx_intf rx_properties = {0};
796 struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
797 struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
798 struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
799 struct ipa_ext_intf ext_properties = {0};
800 struct ipa_ioc_ext_intf_prop *ext_ioc_properties;
801 u32 pyld_sz;
802 int ret = 0, i;
803
804 IPAWANDBG("index(%d) device[%s]:\n", index,
805 mux_channel[index].vchannel_name);
806 if (!mux_channel[index].mux_hdr_set) {
807 ret = ipa_add_qmap_hdr(mux_channel[index].mux_id,
808 &mux_channel[index].hdr_hdl);
809 if (ret) {
810 IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
811 return ret;
812 }
813 mux_channel[index].mux_hdr_set = true;
814 }
815 tx_properties.prop = tx_ioc_properties;
816 tx_ipv4_property = &tx_properties.prop[0];
817 tx_ipv4_property->ip = IPA_IP_v4;
818 tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
819 snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
820 A2_MUX_HDR_NAME_V4_PREF,
821 mux_channel[index].mux_id);
822 tx_ipv6_property = &tx_properties.prop[1];
823 tx_ipv6_property->ip = IPA_IP_v6;
824 tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
825 /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
826 snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
827 A2_MUX_HDR_NAME_V4_PREF,
828 mux_channel[index].mux_id);
829 tx_properties.num_props = 2;
830
831 rx_properties.prop = rx_ioc_properties;
832 rx_ipv4_property = &rx_properties.prop[0];
833 rx_ipv4_property->ip = IPA_IP_v4;
834 rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
835 rx_ipv4_property->attrib.meta_data =
836 mux_channel[index].mux_id << WWAN_METADATA_SHFT;
837 rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
838 rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
839 rx_ipv6_property = &rx_properties.prop[1];
840 rx_ipv6_property->ip = IPA_IP_v6;
841 rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
842 rx_ipv6_property->attrib.meta_data =
843 mux_channel[index].mux_id << WWAN_METADATA_SHFT;
844 rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
845 rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
846 rx_properties.num_props = 2;
847
848 pyld_sz = num_q6_rule *
849 sizeof(struct ipa_ioc_ext_intf_prop);
850 ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
851 if (!ext_ioc_properties) {
852 IPAWANERR("Error allocate memory\n");
853 return -ENOMEM;
854 }
855
856 ext_properties.prop = ext_ioc_properties;
857 ext_properties.excp_pipe_valid = true;
858 ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
859 ext_properties.num_props = num_q6_rule;
860 for (i = 0; i < num_q6_rule; i++) {
861 memcpy(&(ext_properties.prop[i]),
862 &(ipa_qmi_ctx->q6_ul_filter_rule[i]),
863 sizeof(struct ipa_ioc_ext_intf_prop));
864 ext_properties.prop[i].mux_id = mux_channel[index].mux_id;
865 IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
866 ext_properties.prop[i].ip,
867 ext_properties.prop[i].rt_tbl_idx);
868 IPAWANDBG("action: %d mux:%d\n",
869 ext_properties.prop[i].action,
870 ext_properties.prop[i].mux_id);
871 }
872 ret = ipa2_register_intf_ext(mux_channel[index].
873 vchannel_name, &tx_properties,
874 &rx_properties, &ext_properties);
875 if (ret) {
876 IPAWANERR("[%s]:ipa2_register_intf failed %d\n",
877 mux_channel[index].vchannel_name, ret);
878 goto fail;
879 }
880 mux_channel[index].ul_flt_reg = true;
881fail:
882 kfree(ext_ioc_properties);
883 return ret;
884}
885
886static void ipa_cleanup_deregister_intf(void)
887{
888 int i;
889 int ret;
890
891 for (i = 0; i < rmnet_index; i++) {
892 if (mux_channel[i].ul_flt_reg) {
893 ret = ipa2_deregister_intf(
894 mux_channel[i].vchannel_name);
895 if (ret < 0) {
896 IPAWANERR("de-register device %s(%d) failed\n",
897 mux_channel[i].vchannel_name,
898 i);
899 return;
900 }
901 IPAWANDBG("de-register device %s(%d) success\n",
902 mux_channel[i].vchannel_name,
903 i);
904 }
905 mux_channel[i].ul_flt_reg = false;
906 }
907}
908
909int wwan_update_mux_channel_prop(void)
910{
911 int ret = 0, i;
912 /* install UL filter rules */
913 if (egress_set) {
914 if (ipa_qmi_ctx &&
915 ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
916 IPAWANDBG("setup UL filter rules\n");
917 if (a7_ul_flt_set) {
918 IPAWANDBG("del previous UL filter rules\n");
919 /* delete rule hdlers */
920 ret = wwan_del_ul_flt_rule_to_ipa();
921 if (ret) {
922 IPAWANERR("failed to del old rules\n");
923 return -EINVAL;
924 }
925 IPAWANDBG("deleted old UL rules\n");
926 }
927 ret = wwan_add_ul_flt_rule_to_ipa();
928 }
929 if (ret)
930 IPAWANERR("failed to install UL rules\n");
931 else
932 a7_ul_flt_set = true;
933 }
934 /* update Tx/Rx/Ext property */
935 IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
936 if (rmnet_index == 0) {
937 IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
938 return ret;
939 }
940
941 ipa_cleanup_deregister_intf();
942
943 for (i = 0; i < rmnet_index; i++) {
944 ret = wwan_register_to_ipa(i);
945 if (ret < 0) {
946 IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
947 mux_channel[i].vchannel_name,
948 mux_channel[i].mux_id,
949 i);
950 return -ENODEV;
951 }
952 IPAWANERR("dev(%s) has registered to IPA\n",
953 mux_channel[i].vchannel_name);
954 mux_channel[i].ul_flt_reg = true;
955 }
956 return ret;
957}
958
959#ifdef INIT_COMPLETION
960#define reinit_completion(x) INIT_COMPLETION(*(x))
961#endif /* INIT_COMPLETION */
962
963static int __ipa_wwan_open(struct net_device *dev)
964{
965 struct wwan_private *wwan_ptr = netdev_priv(dev);
966
967 IPAWANDBG("[%s] __wwan_open()\n", dev->name);
968 if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
969 reinit_completion(&wwan_ptr->resource_granted_completion);
970 wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
971
972 if (ipa_rmnet_res.ipa_napi_enable)
973 napi_enable(&(wwan_ptr->napi));
974 return 0;
975}
976
977/**
978 * wwan_open() - Opens the wwan network interface. Opens logical
979 * channel on A2 MUX driver and starts the network stack queue
980 *
981 * @dev: network device
982 *
983 * Return codes:
984 * 0: success
985 * -ENODEV: Error while opening logical channel on A2 MUX driver
986 */
987static int ipa_wwan_open(struct net_device *dev)
988{
989 int rc = 0;
990
991 IPAWANDBG("[%s] wwan_open()\n", dev->name);
992 rc = __ipa_wwan_open(dev);
993 if (rc == 0)
994 netif_start_queue(dev);
995 return rc;
996}
997
998static int __ipa_wwan_close(struct net_device *dev)
999{
1000 struct wwan_private *wwan_ptr = netdev_priv(dev);
1001 int rc = 0;
1002
1003 if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
1004 wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
1005 /* do not close wwan port once up, this causes
1006 * remote side to hang if tried to open again
1007 */
1008 reinit_completion(&wwan_ptr->resource_granted_completion);
1009 if (ipa_rmnet_res.ipa_napi_enable)
1010 napi_disable(&(wwan_ptr->napi));
1011 rc = ipa2_deregister_intf(dev->name);
1012 if (rc) {
1013 IPAWANERR("[%s]: ipa2_deregister_intf failed %d\n",
1014 dev->name, rc);
1015 return rc;
1016 }
1017 return rc;
1018 } else {
1019 return -EBADF;
1020 }
1021}
1022
1023/**
1024 * ipa_wwan_stop() - Stops the wwan network interface. Closes
1025 * logical channel on A2 MUX driver and stops the network stack
1026 * queue
1027 *
1028 * @dev: network device
1029 *
1030 * Return codes:
1031 * 0: success
1032 * -ENODEV: Error while opening logical channel on A2 MUX driver
1033 */
1034static int ipa_wwan_stop(struct net_device *dev)
1035{
1036 IPAWANDBG("[%s] ipa_wwan_stop()\n", dev->name);
1037 __ipa_wwan_close(dev);
1038 netif_stop_queue(dev);
1039 return 0;
1040}
1041
1042static int ipa_wwan_change_mtu(struct net_device *dev, int new_mtu)
1043{
1044 if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
1045 return -EINVAL;
1046 IPAWANDBG("[%s] MTU change: old=%d new=%d\n",
1047 dev->name, dev->mtu, new_mtu);
1048 dev->mtu = new_mtu;
1049 return 0;
1050}
1051
1052/**
1053 * ipa_wwan_xmit() - Transmits an skb.
1054 *
1055 * @skb: skb to be transmitted
1056 * @dev: network device
1057 *
1058 * Return codes:
1059 * 0: success
1060 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1061 * later
1062 * -EFAULT: Error while transmitting the skb
1063 */
1064static int ipa_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
1065{
1066 int ret = 0;
1067 bool qmap_check;
1068 struct wwan_private *wwan_ptr = netdev_priv(dev);
1069 struct ipa_tx_meta meta;
1070
1071 if (skb->protocol != htons(ETH_P_MAP)) {
1072 IPAWANDBG
1073 ("SW filtering out none QMAP packet received from %s",
1074 current->comm);
Sunil Paidimarri6c818e82016-10-17 18:33:13 -07001075 dev_kfree_skb_any(skb);
1076 dev->stats.tx_dropped++;
Amir Levy9659e592016-10-27 18:08:27 +03001077 return NETDEV_TX_OK;
1078 }
1079
1080 qmap_check = RMNET_MAP_GET_CD_BIT(skb);
1081 if (netif_queue_stopped(dev)) {
1082 if (qmap_check &&
1083 atomic_read(&wwan_ptr->outstanding_pkts) <
1084 wwan_ptr->outstanding_high_ctl) {
1085 pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
1086 goto send;
1087 } else {
1088 pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
1089 return NETDEV_TX_BUSY;
1090 }
1091 }
1092
1093 /* checking High WM hit */
1094 if (atomic_read(&wwan_ptr->outstanding_pkts) >=
1095 wwan_ptr->outstanding_high) {
1096 if (!qmap_check) {
1097 IPAWANDBG("pending(%d)/(%d)- stop(%d), qmap_chk(%d)\n",
1098 atomic_read(&wwan_ptr->outstanding_pkts),
1099 wwan_ptr->outstanding_high,
1100 netif_queue_stopped(dev),
1101 qmap_check);
1102 netif_stop_queue(dev);
1103 return NETDEV_TX_BUSY;
1104 }
1105 }
1106
1107send:
1108 /* IPA_RM checking start */
1109 ret = ipa_rm_inactivity_timer_request_resource(
1110 IPA_RM_RESOURCE_WWAN_0_PROD);
1111 if (ret == -EINPROGRESS) {
1112 netif_stop_queue(dev);
1113 return NETDEV_TX_BUSY;
1114 }
1115 if (ret) {
1116 pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
1117 dev->name, ret);
Sunil Paidimarri6c818e82016-10-17 18:33:13 -07001118 dev_kfree_skb_any(skb);
1119 dev->stats.tx_dropped++;
Amir Levy9659e592016-10-27 18:08:27 +03001120 return -EFAULT;
1121 }
1122 /* IPA_RM checking end */
1123
1124 if (qmap_check) {
1125 memset(&meta, 0, sizeof(meta));
1126 meta.pkt_init_dst_ep_valid = true;
1127 meta.pkt_init_dst_ep_remote = true;
1128 ret = ipa2_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta);
1129 } else {
1130 ret = ipa2_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL);
1131 }
1132
1133 if (ret) {
1134 ret = NETDEV_TX_BUSY;
Amir Levy9659e592016-10-27 18:08:27 +03001135 goto out;
1136 }
1137
1138 atomic_inc(&wwan_ptr->outstanding_pkts);
1139 dev->stats.tx_packets++;
1140 dev->stats.tx_bytes += skb->len;
1141 ret = NETDEV_TX_OK;
1142out:
1143 ipa_rm_inactivity_timer_release_resource(
1144 IPA_RM_RESOURCE_WWAN_0_PROD);
1145 return ret;
1146}
1147
1148static void ipa_wwan_tx_timeout(struct net_device *dev)
1149{
1150 IPAWANERR("[%s] ipa_wwan_tx_timeout(), data stall in UL\n", dev->name);
1151}
1152
1153/**
1154 * apps_ipa_tx_complete_notify() - Rx notify
1155 *
1156 * @priv: driver context
1157 * @evt: event type
1158 * @data: data provided with event
1159 *
1160 * Check that the packet is the one we sent and release it
1161 * This function will be called in defered context in IPA wq.
1162 */
1163static void apps_ipa_tx_complete_notify(void *priv,
1164 enum ipa_dp_evt_type evt,
1165 unsigned long data)
1166{
1167 struct sk_buff *skb = (struct sk_buff *)data;
1168 struct net_device *dev = (struct net_device *)priv;
1169 struct wwan_private *wwan_ptr;
1170
1171 if (dev != ipa_netdevs[0]) {
1172 IPAWANDBG("Received pre-SSR packet completion\n");
1173 dev_kfree_skb_any(skb);
1174 return;
1175 }
1176
1177 if (evt != IPA_WRITE_DONE) {
1178 IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
1179 dev_kfree_skb_any(skb);
1180 dev->stats.tx_dropped++;
1181 return;
1182 }
1183
1184 wwan_ptr = netdev_priv(dev);
1185 atomic_dec(&wwan_ptr->outstanding_pkts);
1186 __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
1187 if (!atomic_read(&is_ssr) &&
1188 netif_queue_stopped(wwan_ptr->net) &&
1189 atomic_read(&wwan_ptr->outstanding_pkts) <
1190 (wwan_ptr->outstanding_low)) {
1191 IPAWANDBG("Outstanding low (%d) - wake up queue\n",
1192 wwan_ptr->outstanding_low);
1193 netif_wake_queue(wwan_ptr->net);
1194 }
1195 __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
1196 dev_kfree_skb_any(skb);
1197 ipa_rm_inactivity_timer_release_resource(
1198 IPA_RM_RESOURCE_WWAN_0_PROD);
1199}
1200
1201/**
1202 * apps_ipa_packet_receive_notify() - Rx notify
1203 *
1204 * @priv: driver context
1205 * @evt: event type
1206 * @data: data provided with event
1207 *
1208 * IPA will pass a packet to the Linux network stack with skb->data
1209 */
1210static void apps_ipa_packet_receive_notify(void *priv,
1211 enum ipa_dp_evt_type evt,
1212 unsigned long data)
1213{
1214 struct net_device *dev = (struct net_device *)priv;
1215
1216 if (evt == IPA_RECEIVE) {
1217 struct sk_buff *skb = (struct sk_buff *)data;
1218 int result;
1219 unsigned int packet_len = skb->len;
1220
1221 IPAWANDBG("Rx packet was received");
1222 skb->dev = ipa_netdevs[0];
1223 skb->protocol = htons(ETH_P_MAP);
1224
1225 if (ipa_rmnet_res.ipa_napi_enable) {
1226 trace_rmnet_ipa_netif_rcv_skb(dev->stats.rx_packets);
1227 result = netif_receive_skb(skb);
1228 } else {
1229 if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
1230 == 0) {
1231 trace_rmnet_ipa_netifni(dev->stats.rx_packets);
1232 result = netif_rx_ni(skb);
1233 } else {
1234 trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
1235 result = netif_rx(skb);
1236 }
1237 }
1238
1239 if (result) {
1240 pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
1241 __func__, __LINE__);
1242 dev->stats.rx_dropped++;
1243 }
1244 dev->stats.rx_packets++;
1245 dev->stats.rx_bytes += packet_len;
1246 } else if (evt == IPA_CLIENT_START_POLL)
1247 ipa_rmnet_rx_cb(priv);
1248 else if (evt == IPA_CLIENT_COMP_NAPI) {
1249 struct wwan_private *wwan_ptr = netdev_priv(dev);
1250
1251 if (ipa_rmnet_res.ipa_napi_enable)
1252 napi_complete(&(wwan_ptr->napi));
1253 } else
1254 IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
1255
1256}
1257
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001258static int handle_ingress_format(struct net_device *dev,
1259 struct rmnet_ioctl_extended_s *in)
1260{
1261 int ret = 0;
1262 struct rmnet_phys_ep_conf_s *ep_cfg;
1263
1264 IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
1265 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
1266 ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.cs_offload_en =
1267 IPA_ENABLE_CS_OFFLOAD_DL;
1268
1269 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
1270 IPAWANERR("get AGG size %d count %d\n",
1271 in->u.ingress_format.agg_size,
1272 in->u.ingress_format.agg_count);
1273
1274 ret = ipa_disable_apps_wan_cons_deaggr(
1275 in->u.ingress_format.agg_size,
1276 in->u.ingress_format.agg_count);
1277
1278 if (!ret) {
1279 ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_byte_limit =
1280 in->u.ingress_format.agg_size;
1281 ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit =
1282 in->u.ingress_format.agg_count;
1283
1284 if (ipa_rmnet_res.ipa_napi_enable) {
1285 ipa_to_apps_ep_cfg.recycle_enabled = true;
1286 ep_cfg = (struct rmnet_phys_ep_conf_s *)
1287 rcu_dereference(dev->rx_handler_data);
1288 ep_cfg->recycle = ipa_recycle_wan_skb;
1289 pr_info("Wan Recycle Enabled\n");
1290 }
1291 }
1292 }
1293
1294 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
1295 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
1296 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
1297 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
1298 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
1299
1300 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
1301 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
1302 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding =
1303 true;
1304 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
1305 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
1306 ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
1307
1308 ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
1309 ipa_to_apps_ep_cfg.notify = apps_ipa_packet_receive_notify;
1310 ipa_to_apps_ep_cfg.priv = dev;
1311
1312 ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
1313 if (ipa_to_apps_ep_cfg.napi_enabled)
1314 ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
1315 else
1316 ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
1317
1318 mutex_lock(&ipa_to_apps_pipe_handle_guard);
1319 if (atomic_read(&is_ssr)) {
1320 IPAWANDBG("In SSR sequence/recovery\n");
1321 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
1322 return -EFAULT;
1323 }
1324 ret = ipa2_setup_sys_pipe(&ipa_to_apps_ep_cfg, &ipa_to_apps_hdl);
1325 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
1326
1327 if (ret)
1328 IPAWANERR("failed to configure ingress\n");
1329
1330 return ret;
1331}
1332
Amir Levy9659e592016-10-27 18:08:27 +03001333/**
1334 * ipa_wwan_ioctl() - I/O control for wwan network driver.
1335 *
1336 * @dev: network device
1337 * @ifr: ignored
1338 * @cmd: cmd to be excecuded. can be one of the following:
1339 * IPA_WWAN_IOCTL_OPEN - Open the network interface
1340 * IPA_WWAN_IOCTL_CLOSE - Close the network interface
1341 *
1342 * Return codes:
1343 * 0: success
1344 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1345 * later
1346 * -EFAULT: Error while transmitting the skb
1347 */
1348static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1349{
1350 int rc = 0;
1351 int mru = 1000, epid = 1, mux_index, len;
1352 struct ipa_msg_meta msg_meta;
1353 struct ipa_wan_msg *wan_msg = NULL;
1354 struct rmnet_ioctl_extended_s extend_ioctl_data;
1355 struct rmnet_ioctl_data_s ioctl_data;
1356
1357 IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
1358 switch (cmd) {
1359 /* Set Ethernet protocol */
1360 case RMNET_IOCTL_SET_LLP_ETHERNET:
1361 break;
1362 /* Set RAWIP protocol */
1363 case RMNET_IOCTL_SET_LLP_IP:
1364 break;
1365 /* Get link protocol */
1366 case RMNET_IOCTL_GET_LLP:
1367 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1368 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1369 sizeof(struct rmnet_ioctl_data_s)))
1370 rc = -EFAULT;
1371 break;
1372 /* Set QoS header enabled */
1373 case RMNET_IOCTL_SET_QOS_ENABLE:
1374 return -EINVAL;
1375 /* Set QoS header disabled */
1376 case RMNET_IOCTL_SET_QOS_DISABLE:
1377 break;
1378 /* Get QoS header state */
1379 case RMNET_IOCTL_GET_QOS:
1380 ioctl_data.u.operation_mode = RMNET_MODE_NONE;
1381 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1382 sizeof(struct rmnet_ioctl_data_s)))
1383 rc = -EFAULT;
1384 break;
1385 /* Get operation mode */
1386 case RMNET_IOCTL_GET_OPMODE:
1387 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1388 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1389 sizeof(struct rmnet_ioctl_data_s)))
1390 rc = -EFAULT;
1391 break;
1392 /* Open transport port */
1393 case RMNET_IOCTL_OPEN:
1394 break;
1395 /* Close transport port */
1396 case RMNET_IOCTL_CLOSE:
1397 break;
1398 /* Flow enable */
1399 case RMNET_IOCTL_FLOW_ENABLE:
1400 IPAWANDBG("Received flow enable\n");
1401 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1402 sizeof(struct rmnet_ioctl_data_s))) {
1403 rc = -EFAULT;
1404 break;
1405 }
1406 ipa_flow_control(IPA_CLIENT_USB_PROD, true,
1407 ioctl_data.u.tcm_handle);
1408 break;
1409 /* Flow disable */
1410 case RMNET_IOCTL_FLOW_DISABLE:
1411 IPAWANDBG("Received flow disable\n");
1412 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1413 sizeof(struct rmnet_ioctl_data_s))) {
1414 rc = -EFAULT;
1415 break;
1416 }
1417 ipa_flow_control(IPA_CLIENT_USB_PROD, false,
1418 ioctl_data.u.tcm_handle);
1419 break;
1420 /* Set flow handle */
1421 case RMNET_IOCTL_FLOW_SET_HNDL:
1422 break;
1423
1424 /* Extended IOCTLs */
1425 case RMNET_IOCTL_EXTENDED:
1426 IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
1427 if (copy_from_user(&extend_ioctl_data,
1428 (u8 *)ifr->ifr_ifru.ifru_data,
1429 sizeof(struct rmnet_ioctl_extended_s))) {
1430 IPAWANERR("failed to copy extended ioctl data\n");
1431 rc = -EFAULT;
1432 break;
1433 }
1434 switch (extend_ioctl_data.extended_ioctl) {
1435 /* Get features */
1436 case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
1437 IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n");
1438 extend_ioctl_data.u.data =
1439 (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL |
1440 RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT |
1441 RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT);
1442 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1443 &extend_ioctl_data,
1444 sizeof(struct rmnet_ioctl_extended_s)))
1445 rc = -EFAULT;
1446 break;
1447 /* Set MRU */
1448 case RMNET_IOCTL_SET_MRU:
1449 mru = extend_ioctl_data.u.data;
1450 IPAWANDBG("get MRU size %d\n",
1451 extend_ioctl_data.u.data);
1452 break;
1453 /* Get MRU */
1454 case RMNET_IOCTL_GET_MRU:
1455 extend_ioctl_data.u.data = mru;
1456 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1457 &extend_ioctl_data,
1458 sizeof(struct rmnet_ioctl_extended_s)))
1459 rc = -EFAULT;
1460 break;
1461 /* GET SG support */
1462 case RMNET_IOCTL_GET_SG_SUPPORT:
1463 extend_ioctl_data.u.data =
1464 ipa_rmnet_res.ipa_advertise_sg_support;
1465 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1466 &extend_ioctl_data,
1467 sizeof(struct rmnet_ioctl_extended_s)))
1468 rc = -EFAULT;
1469 break;
1470 /* Get endpoint ID */
1471 case RMNET_IOCTL_GET_EPID:
1472 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
1473 extend_ioctl_data.u.data = epid;
1474 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1475 &extend_ioctl_data,
1476 sizeof(struct rmnet_ioctl_extended_s)))
1477 rc = -EFAULT;
1478 if (copy_from_user(&extend_ioctl_data,
1479 (u8 *)ifr->ifr_ifru.ifru_data,
1480 sizeof(struct rmnet_ioctl_extended_s))) {
1481 IPAWANERR("copy extended ioctl data failed\n");
1482 rc = -EFAULT;
1483 break;
1484 }
1485 IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n",
1486 extend_ioctl_data.u.data);
1487 break;
1488 /* Endpoint pair */
1489 case RMNET_IOCTL_GET_EP_PAIR:
1490 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n");
1491 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
1492 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
1493 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
1494 ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
1495 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1496 &extend_ioctl_data,
1497 sizeof(struct rmnet_ioctl_extended_s)))
1498 rc = -EFAULT;
1499 if (copy_from_user(&extend_ioctl_data,
1500 (u8 *)ifr->ifr_ifru.ifru_data,
1501 sizeof(struct rmnet_ioctl_extended_s))) {
1502 IPAWANERR("copy extended ioctl data failed\n");
1503 rc = -EFAULT;
1504 break;
1505 }
1506 IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n",
1507 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
1508 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
1509 break;
1510 /* Get driver name */
1511 case RMNET_IOCTL_GET_DRIVER_NAME:
1512 memcpy(&extend_ioctl_data.u.if_name,
1513 ipa_netdevs[0]->name,
1514 sizeof(IFNAMSIZ));
1515 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1516 &extend_ioctl_data,
1517 sizeof(struct rmnet_ioctl_extended_s)))
1518 rc = -EFAULT;
1519 break;
1520 /* Add MUX ID */
1521 case RMNET_IOCTL_ADD_MUX_CHANNEL:
1522 mux_index = find_mux_channel_index(
1523 extend_ioctl_data.u.rmnet_mux_val.mux_id);
1524 if (mux_index < MAX_NUM_OF_MUX_CHANNEL) {
1525 IPAWANDBG("already setup mux(%d)\n",
1526 extend_ioctl_data.u.
1527 rmnet_mux_val.mux_id);
1528 return rc;
1529 }
1530 if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
1531 IPAWANERR("Exceed mux_channel limit(%d)\n",
1532 rmnet_index);
1533 return -EFAULT;
1534 }
1535 IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
1536 extend_ioctl_data.u.rmnet_mux_val.mux_id,
1537 extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
1538 /* cache the mux name and id */
1539 mux_channel[rmnet_index].mux_id =
1540 extend_ioctl_data.u.rmnet_mux_val.mux_id;
1541 memcpy(mux_channel[rmnet_index].vchannel_name,
1542 extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
1543 sizeof(mux_channel[rmnet_index].vchannel_name));
1544 IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
1545 mux_channel[rmnet_index].vchannel_name,
1546 mux_channel[rmnet_index].mux_id,
1547 rmnet_index);
1548 /* check if UL filter rules coming*/
1549 if (num_q6_rule != 0) {
1550 IPAWANERR("dev(%s) register to IPA\n",
1551 extend_ioctl_data.u.rmnet_mux_val.
1552 vchannel_name);
1553 rc = wwan_register_to_ipa(rmnet_index);
1554 if (rc < 0) {
1555 IPAWANERR("device %s reg IPA failed\n",
1556 extend_ioctl_data.u.
1557 rmnet_mux_val.vchannel_name);
1558 return -ENODEV;
1559 }
1560 mux_channel[rmnet_index].mux_channel_set = true;
1561 mux_channel[rmnet_index].ul_flt_reg = true;
1562 } else {
1563 IPAWANDBG("dev(%s) haven't registered to IPA\n",
1564 extend_ioctl_data.u.
1565 rmnet_mux_val.vchannel_name);
1566 mux_channel[rmnet_index].mux_channel_set = true;
1567 mux_channel[rmnet_index].ul_flt_reg = false;
1568 }
1569 rmnet_index++;
1570 break;
1571 case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
1572 IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
1573 if ((extend_ioctl_data.u.data) &
1574 RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
1575 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 8;
1576 apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
1577 cs_offload_en =
1578 IPA_ENABLE_CS_OFFLOAD_UL;
1579 apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
1580 cs_metadata_hdr_offset = 1;
1581 } else {
1582 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
1583 }
1584 if ((extend_ioctl_data.u.data) &
1585 RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
1586 apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
1587 IPA_ENABLE_AGGR;
1588 else
1589 apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
1590 IPA_BYPASS_AGGR;
1591 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
1592 hdr_ofst_metadata_valid = 1;
1593 /* modem want offset at 0! */
1594 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
1595 apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.dst =
1596 IPA_CLIENT_APPS_LAN_WAN_PROD;
1597 apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.mode = IPA_BASIC;
1598
1599 apps_to_ipa_ep_cfg.client =
1600 IPA_CLIENT_APPS_LAN_WAN_PROD;
1601 apps_to_ipa_ep_cfg.notify =
1602 apps_ipa_tx_complete_notify;
1603 apps_to_ipa_ep_cfg.desc_fifo_sz =
1604 IPA_SYS_TX_DATA_DESC_FIFO_SZ;
1605 apps_to_ipa_ep_cfg.priv = dev;
1606
1607 rc = ipa2_setup_sys_pipe(&apps_to_ipa_ep_cfg,
1608 &apps_to_ipa_hdl);
1609 if (rc)
1610 IPAWANERR("failed to config egress endpoint\n");
1611
1612 if (num_q6_rule != 0) {
1613 /* already got Q6 UL filter rules*/
1614 if (ipa_qmi_ctx &&
1615 ipa_qmi_ctx->modem_cfg_emb_pipe_flt
1616 == false)
1617 rc = wwan_add_ul_flt_rule_to_ipa();
1618 else
1619 rc = 0;
1620 egress_set = true;
1621 if (rc)
1622 IPAWANERR("install UL rules failed\n");
1623 else
1624 a7_ul_flt_set = true;
1625 } else {
1626 /* wait Q6 UL filter rules*/
1627 egress_set = true;
1628 IPAWANDBG("no UL-rules, egress_set(%d)\n",
1629 egress_set);
1630 }
1631 break;
1632 case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001633 rc = handle_ingress_format(dev, &extend_ioctl_data);
Amir Levy9659e592016-10-27 18:08:27 +03001634 break;
1635 case RMNET_IOCTL_SET_XLAT_DEV_INFO:
1636 wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
1637 GFP_KERNEL);
1638 if (!wan_msg) {
1639 IPAWANERR("Failed to allocate memory.\n");
1640 return -ENOMEM;
1641 }
1642 len = sizeof(wan_msg->upstream_ifname) >
1643 sizeof(extend_ioctl_data.u.if_name) ?
1644 sizeof(extend_ioctl_data.u.if_name) :
1645 sizeof(wan_msg->upstream_ifname);
1646 strlcpy(wan_msg->upstream_ifname,
1647 extend_ioctl_data.u.if_name, len);
1648 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
1649 msg_meta.msg_type = WAN_XLAT_CONNECT;
1650 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
1651 rc = ipa2_send_msg(&msg_meta, wan_msg,
1652 ipa_wwan_msg_free_cb);
1653 if (rc) {
1654 IPAWANERR("Failed to send XLAT_CONNECT msg\n");
1655 kfree(wan_msg);
1656 }
1657 break;
1658 /* Get agg count */
1659 case RMNET_IOCTL_GET_AGGREGATION_COUNT:
1660 break;
1661 /* Set agg count */
1662 case RMNET_IOCTL_SET_AGGREGATION_COUNT:
1663 break;
1664 /* Get agg size */
1665 case RMNET_IOCTL_GET_AGGREGATION_SIZE:
1666 break;
1667 /* Set agg size */
1668 case RMNET_IOCTL_SET_AGGREGATION_SIZE:
1669 break;
1670 /* Do flow control */
1671 case RMNET_IOCTL_FLOW_CONTROL:
1672 break;
1673 /* For legacy use */
1674 case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL:
1675 break;
1676 /* Get HW/SW map */
1677 case RMNET_IOCTL_GET_HWSW_MAP:
1678 break;
1679 /* Set RX Headroom */
1680 case RMNET_IOCTL_SET_RX_HEADROOM:
1681 break;
1682 default:
1683 IPAWANERR("[%s] unsupported extended cmd[%d]",
1684 dev->name,
1685 extend_ioctl_data.extended_ioctl);
1686 rc = -EINVAL;
1687 }
1688 break;
1689 default:
1690 IPAWANERR("[%s] unsupported cmd[%d]",
1691 dev->name, cmd);
1692 rc = -EINVAL;
1693 }
1694 return rc;
1695}
1696
1697static const struct net_device_ops ipa_wwan_ops_ip = {
1698 .ndo_open = ipa_wwan_open,
1699 .ndo_stop = ipa_wwan_stop,
1700 .ndo_start_xmit = ipa_wwan_xmit,
1701 .ndo_tx_timeout = ipa_wwan_tx_timeout,
1702 .ndo_do_ioctl = ipa_wwan_ioctl,
1703 .ndo_change_mtu = ipa_wwan_change_mtu,
1704 .ndo_set_mac_address = 0,
1705 .ndo_validate_addr = 0,
1706};
1707
1708/**
1709 * wwan_setup() - Setups the wwan network driver.
1710 *
1711 * @dev: network device
1712 *
1713 * Return codes:
1714 * None
1715 */
1716
1717static void ipa_wwan_setup(struct net_device *dev)
1718{
1719 dev->netdev_ops = &ipa_wwan_ops_ip;
1720 ether_setup(dev);
1721 /* set this after calling ether_setup */
1722 dev->header_ops = 0; /* No header */
1723 dev->type = ARPHRD_RAWIP;
1724 dev->hard_header_len = 0;
1725 dev->mtu = WWAN_DATA_LEN;
1726 dev->addr_len = 0;
1727 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
1728 dev->needed_headroom = HEADROOM_FOR_QMAP;
1729 dev->needed_tailroom = TAILROOM;
1730 dev->watchdog_timeo = 1000;
1731}
1732
1733/* IPA_RM related functions start*/
1734static void q6_prod_rm_request_resource(struct work_struct *work);
1735static DECLARE_DELAYED_WORK(q6_con_rm_request, q6_prod_rm_request_resource);
1736static void q6_prod_rm_release_resource(struct work_struct *work);
1737static DECLARE_DELAYED_WORK(q6_con_rm_release, q6_prod_rm_release_resource);
1738
1739static void q6_prod_rm_request_resource(struct work_struct *work)
1740{
1741 int ret = 0;
1742
1743 ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
1744 if (ret < 0 && ret != -EINPROGRESS) {
1745 IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
1746 ret);
1747 return;
1748 }
1749}
1750
1751static int q6_rm_request_resource(void)
1752{
1753 queue_delayed_work(ipa_rm_q6_workqueue,
1754 &q6_con_rm_request, 0);
1755 return 0;
1756}
1757
1758static void q6_prod_rm_release_resource(struct work_struct *work)
1759{
1760 int ret = 0;
1761
1762 ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
1763 if (ret < 0 && ret != -EINPROGRESS) {
1764 IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
1765 ret);
1766 return;
1767 }
1768}
1769
1770
1771static int q6_rm_release_resource(void)
1772{
1773 queue_delayed_work(ipa_rm_q6_workqueue,
1774 &q6_con_rm_release, 0);
1775 return 0;
1776}
1777
1778
1779static void q6_rm_notify_cb(void *user_data,
1780 enum ipa_rm_event event,
1781 unsigned long data)
1782{
1783 switch (event) {
1784 case IPA_RM_RESOURCE_GRANTED:
1785 IPAWANDBG("%s: Q6_PROD GRANTED CB\n", __func__);
1786 break;
1787 case IPA_RM_RESOURCE_RELEASED:
1788 IPAWANDBG("%s: Q6_PROD RELEASED CB\n", __func__);
1789 break;
1790 default:
1791 return;
1792 }
1793}
1794static int q6_initialize_rm(void)
1795{
1796 struct ipa_rm_create_params create_params;
1797 struct ipa_rm_perf_profile profile;
1798 int result;
1799
1800 /* Initialize IPA_RM workqueue */
1801 ipa_rm_q6_workqueue = create_singlethread_workqueue("clnt_req");
1802 if (!ipa_rm_q6_workqueue)
1803 return -ENOMEM;
1804
1805 memset(&create_params, 0, sizeof(create_params));
1806 create_params.name = IPA_RM_RESOURCE_Q6_PROD;
1807 create_params.reg_params.notify_cb = &q6_rm_notify_cb;
1808 result = ipa_rm_create_resource(&create_params);
1809 if (result)
1810 goto create_rsrc_err1;
1811 memset(&create_params, 0, sizeof(create_params));
1812 create_params.name = IPA_RM_RESOURCE_Q6_CONS;
1813 create_params.release_resource = &q6_rm_release_resource;
1814 create_params.request_resource = &q6_rm_request_resource;
1815 result = ipa_rm_create_resource(&create_params);
1816 if (result)
1817 goto create_rsrc_err2;
1818 /* add dependency*/
1819 result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
1820 IPA_RM_RESOURCE_APPS_CONS);
1821 if (result)
1822 goto add_dpnd_err;
1823 /* setup Performance profile */
1824 memset(&profile, 0, sizeof(profile));
1825 profile.max_supported_bandwidth_mbps = 100;
1826 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
1827 &profile);
1828 if (result)
1829 goto set_perf_err;
1830 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
1831 &profile);
1832 if (result)
1833 goto set_perf_err;
1834 return result;
1835
1836set_perf_err:
1837 ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1838 IPA_RM_RESOURCE_APPS_CONS);
1839add_dpnd_err:
1840 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1841 if (result < 0)
1842 IPAWANERR("Error deleting resource %d, ret=%d\n",
1843 IPA_RM_RESOURCE_Q6_CONS, result);
1844create_rsrc_err2:
1845 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1846 if (result < 0)
1847 IPAWANERR("Error deleting resource %d, ret=%d\n",
1848 IPA_RM_RESOURCE_Q6_PROD, result);
1849create_rsrc_err1:
1850 destroy_workqueue(ipa_rm_q6_workqueue);
1851 return result;
1852}
1853
1854void q6_deinitialize_rm(void)
1855{
1856 int ret;
1857
1858 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1859 IPA_RM_RESOURCE_APPS_CONS);
1860 if (ret < 0)
1861 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
1862 IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
1863 ret);
1864 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1865 if (ret < 0)
1866 IPAWANERR("Error deleting resource %d, ret=%d\n",
1867 IPA_RM_RESOURCE_Q6_CONS, ret);
1868 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1869 if (ret < 0)
1870 IPAWANERR("Error deleting resource %d, ret=%d\n",
1871 IPA_RM_RESOURCE_Q6_PROD, ret);
1872 destroy_workqueue(ipa_rm_q6_workqueue);
1873}
1874
1875static void wake_tx_queue(struct work_struct *work)
1876{
1877 if (ipa_netdevs[0]) {
1878 __netif_tx_lock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
1879 netif_wake_queue(ipa_netdevs[0]);
1880 __netif_tx_unlock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
1881 }
1882}
1883
1884/**
1885 * ipa_rm_resource_granted() - Called upon
1886 * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
1887 *
1888 * @work: work object supplied ny workqueue
1889 *
1890 * Return codes:
1891 * None
1892 */
1893static void ipa_rm_resource_granted(void *dev)
1894{
1895 IPAWANDBG("Resource Granted - starting queue\n");
1896 schedule_work(&ipa_tx_wakequeue_work);
1897}
1898
1899/**
1900 * ipa_rm_notify() - Callback function for RM events. Handles
1901 * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
1902 * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
1903 * workqueue.
1904 *
1905 * @dev: network device
1906 * @event: IPA RM event
1907 * @data: Additional data provided by IPA RM
1908 *
1909 * Return codes:
1910 * None
1911 */
1912static void ipa_rm_notify(void *dev, enum ipa_rm_event event,
1913 unsigned long data)
1914{
1915 struct wwan_private *wwan_ptr = netdev_priv(dev);
1916
1917 pr_debug("%s: event %d\n", __func__, event);
1918 switch (event) {
1919 case IPA_RM_RESOURCE_GRANTED:
1920 if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
1921 complete_all(&wwan_ptr->resource_granted_completion);
1922 break;
1923 }
1924 ipa_rm_resource_granted(dev);
1925 break;
1926 case IPA_RM_RESOURCE_RELEASED:
1927 break;
1928 default:
1929 pr_err("%s: unknown event %d\n", __func__, event);
1930 break;
1931 }
1932}
1933
1934/* IPA_RM related functions end*/
1935
1936static int ssr_notifier_cb(struct notifier_block *this,
1937 unsigned long code,
1938 void *data);
1939
1940static struct notifier_block ssr_notifier = {
1941 .notifier_call = ssr_notifier_cb,
1942};
1943
1944static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
1945 struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
1946{
1947 ipa_rmnet_drv_res->ipa_rmnet_ssr =
1948 of_property_read_bool(pdev->dev.of_node,
1949 "qcom,rmnet-ipa-ssr");
1950 pr_info("IPA SSR support = %s\n",
1951 ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False");
1952 ipa_rmnet_drv_res->ipa_loaduC =
1953 of_property_read_bool(pdev->dev.of_node,
1954 "qcom,ipa-loaduC");
1955 pr_info("IPA ipa-loaduC = %s\n",
1956 ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
1957
1958 ipa_rmnet_drv_res->ipa_advertise_sg_support =
1959 of_property_read_bool(pdev->dev.of_node,
1960 "qcom,ipa-advertise-sg-support");
1961 pr_info("IPA SG support = %s\n",
1962 ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
1963
1964 ipa_rmnet_drv_res->ipa_napi_enable =
1965 of_property_read_bool(pdev->dev.of_node,
1966 "qcom,ipa-napi-enable");
1967 pr_info("IPA Napi Enable = %s\n",
1968 ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
1969 return 0;
1970}
1971
1972struct ipa_rmnet_context ipa_rmnet_ctx;
1973
1974/**
1975 * ipa_wwan_probe() - Initialized the module and registers as a
1976 * network interface to the network stack
1977 *
1978 * Return codes:
1979 * 0: success
1980 * -ENOMEM: No memory available
1981 * -EFAULT: Internal error
1982 * -ENODEV: IPA driver not loaded
1983 */
1984static int ipa_wwan_probe(struct platform_device *pdev)
1985{
1986 int ret, i;
1987 struct net_device *dev;
1988 struct wwan_private *wwan_ptr;
1989 struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */
1990 struct ipa_rm_perf_profile profile; /* IPA_RM */
1991
1992 pr_info("rmnet_ipa started initialization\n");
1993
1994 if (!ipa2_is_ready()) {
1995 IPAWANERR("IPA driver not loaded\n");
1996 return -ENODEV;
1997 }
1998
1999 ret = get_ipa_rmnet_dts_configuration(pdev, &ipa_rmnet_res);
2000 ipa_rmnet_ctx.ipa_rmnet_ssr = ipa_rmnet_res.ipa_rmnet_ssr;
2001
2002 ret = ipa_init_q6_smem();
2003 if (ret) {
2004 IPAWANERR("ipa_init_q6_smem failed!\n");
2005 return ret;
2006 }
2007
2008 /* initialize tx/rx enpoint setup */
2009 memset(&apps_to_ipa_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
2010 memset(&ipa_to_apps_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
2011
2012 /* initialize ex property setup */
2013 num_q6_rule = 0;
2014 old_num_q6_rule = 0;
2015 rmnet_index = 0;
2016 egress_set = false;
2017 a7_ul_flt_set = false;
2018 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
2019 memset(&mux_channel[i], 0, sizeof(struct rmnet_mux_val));
2020
2021 /* start A7 QMI service/client */
2022 if (ipa_rmnet_res.ipa_loaduC)
2023 /* Android platform loads uC */
2024 ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
2025 else
2026 /* LE platform not loads uC */
2027 ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
2028
2029 /* construct default WAN RT tbl for IPACM */
2030 ret = ipa_setup_a7_qmap_hdr();
2031 if (ret)
2032 goto setup_a7_qmap_hdr_err;
2033 ret = ipa_setup_dflt_wan_rt_tables();
2034 if (ret)
2035 goto setup_dflt_wan_rt_tables_err;
2036
2037 if (!atomic_read(&is_ssr)) {
2038 /* Start transport-driver fd ioctl for ipacm for first init */
2039 ret = wan_ioctl_init();
2040 if (ret)
2041 goto wan_ioctl_init_err;
2042 } else {
2043 /* Enable sending QMI messages after SSR */
2044 wan_ioctl_enable_qmi_messages();
2045 }
2046
2047 /* initialize wan-driver netdev */
2048 dev = alloc_netdev(sizeof(struct wwan_private),
2049 IPA_WWAN_DEV_NAME,
2050 NET_NAME_UNKNOWN,
2051 ipa_wwan_setup);
2052 if (!dev) {
2053 IPAWANERR("no memory for netdev\n");
2054 ret = -ENOMEM;
2055 goto alloc_netdev_err;
2056 }
2057 ipa_netdevs[0] = dev;
2058 wwan_ptr = netdev_priv(dev);
2059 memset(wwan_ptr, 0, sizeof(*wwan_ptr));
2060 IPAWANDBG("wwan_ptr (private) = %p", wwan_ptr);
2061 wwan_ptr->net = dev;
2062 wwan_ptr->outstanding_high_ctl = DEFAULT_OUTSTANDING_HIGH_CTL;
2063 wwan_ptr->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
2064 wwan_ptr->outstanding_low = DEFAULT_OUTSTANDING_LOW;
2065 atomic_set(&wwan_ptr->outstanding_pkts, 0);
2066 spin_lock_init(&wwan_ptr->lock);
2067 init_completion(&wwan_ptr->resource_granted_completion);
2068
2069 if (!atomic_read(&is_ssr)) {
2070 /* IPA_RM configuration starts */
2071 ret = q6_initialize_rm();
2072 if (ret) {
2073 IPAWANERR("%s: q6_initialize_rm failed, ret: %d\n",
2074 __func__, ret);
2075 goto q6_init_err;
2076 }
2077 }
2078
2079 memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
2080 ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
2081 ipa_rm_params.reg_params.user_data = dev;
2082 ipa_rm_params.reg_params.notify_cb = ipa_rm_notify;
2083 ret = ipa_rm_create_resource(&ipa_rm_params);
2084 if (ret) {
2085 pr_err("%s: unable to create resourse %d in IPA RM\n",
2086 __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
2087 goto create_rsrc_err;
2088 }
2089 ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
2090 IPA_RM_INACTIVITY_TIMER);
2091 if (ret) {
2092 pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
2093 __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
2094 goto timer_init_err;
2095 }
2096 /* add dependency */
2097 ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2098 IPA_RM_RESOURCE_Q6_CONS);
2099 if (ret)
2100 goto add_dpnd_err;
2101 /* setup Performance profile */
2102 memset(&profile, 0, sizeof(profile));
2103 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
2104 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
2105 &profile);
2106 if (ret)
2107 goto set_perf_err;
2108 /* IPA_RM configuration ends */
2109
2110 /* Enable SG support in netdevice. */
2111 if (ipa_rmnet_res.ipa_advertise_sg_support)
2112 dev->hw_features |= NETIF_F_SG;
2113
2114 /* Enable NAPI support in netdevice. */
2115 if (ipa_rmnet_res.ipa_napi_enable) {
2116 netif_napi_add(dev, &(wwan_ptr->napi),
2117 ipa_rmnet_poll, NAPI_WEIGHT);
2118 }
2119
2120 ret = register_netdev(dev);
2121 if (ret) {
2122 IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
2123 0, ret);
2124 goto set_perf_err;
2125 }
2126
2127 IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n",
2128 ipa_netdevs[0]->name);
2129 if (ret) {
2130 IPAWANERR("default configuration failed rc=%d\n",
2131 ret);
2132 goto config_err;
2133 }
2134 atomic_set(&is_initialized, 1);
2135 if (!atomic_read(&is_ssr)) {
2136 /* offline charging mode */
2137 ipa2_proxy_clk_unvote();
2138 }
2139 atomic_set(&is_ssr, 0);
2140
2141 pr_info("rmnet_ipa completed initialization\n");
2142 return 0;
2143config_err:
2144 if (ipa_rmnet_res.ipa_napi_enable)
2145 netif_napi_del(&(wwan_ptr->napi));
2146 unregister_netdev(ipa_netdevs[0]);
2147set_perf_err:
2148 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2149 IPA_RM_RESOURCE_Q6_CONS);
2150 if (ret)
2151 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2152 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2153 ret);
2154add_dpnd_err:
2155 ret = ipa_rm_inactivity_timer_destroy(
2156 IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
2157 if (ret)
2158 IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
2159 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2160timer_init_err:
2161 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2162 if (ret)
2163 IPAWANERR("Error deleting resource %d, ret=%d\n",
2164 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2165create_rsrc_err:
2166 q6_deinitialize_rm();
2167q6_init_err:
2168 free_netdev(ipa_netdevs[0]);
2169 ipa_netdevs[0] = NULL;
2170alloc_netdev_err:
2171 wan_ioctl_deinit();
2172wan_ioctl_init_err:
2173 ipa_del_dflt_wan_rt_tables();
2174setup_dflt_wan_rt_tables_err:
2175 ipa_del_a7_qmap_hdr();
2176setup_a7_qmap_hdr_err:
2177 ipa_qmi_service_exit();
2178 atomic_set(&is_ssr, 0);
2179 return ret;
2180}
2181
2182static int ipa_wwan_remove(struct platform_device *pdev)
2183{
2184 int ret;
2185 struct wwan_private *wwan_ptr;
2186
2187 wwan_ptr = netdev_priv(ipa_netdevs[0]);
2188
2189 pr_info("rmnet_ipa started deinitialization\n");
2190 mutex_lock(&ipa_to_apps_pipe_handle_guard);
2191 ret = ipa2_teardown_sys_pipe(ipa_to_apps_hdl);
2192 if (ret < 0)
2193 IPAWANERR("Failed to teardown IPA->APPS pipe\n");
2194 else
2195 ipa_to_apps_hdl = -1;
2196 if (ipa_rmnet_res.ipa_napi_enable)
2197 netif_napi_del(&(wwan_ptr->napi));
2198 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
2199 unregister_netdev(ipa_netdevs[0]);
2200 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2201 IPA_RM_RESOURCE_Q6_CONS);
2202 if (ret < 0)
2203 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2204 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2205 ret);
2206 ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
2207 if (ret < 0)
2208 IPAWANERR(
2209 "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
2210 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2211 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2212 if (ret < 0)
2213 IPAWANERR("Error deleting resource %d, ret=%d\n",
2214 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2215 cancel_work_sync(&ipa_tx_wakequeue_work);
2216 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2217 free_netdev(ipa_netdevs[0]);
2218 ipa_netdevs[0] = NULL;
2219 /* No need to remove wwan_ioctl during SSR */
2220 if (!atomic_read(&is_ssr))
2221 wan_ioctl_deinit();
2222 ipa_del_dflt_wan_rt_tables();
2223 ipa_del_a7_qmap_hdr();
2224 ipa_del_mux_qmap_hdrs();
2225 if (ipa_qmi_ctx && ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false)
2226 wwan_del_ul_flt_rule_to_ipa();
2227 ipa_cleanup_deregister_intf();
2228 atomic_set(&is_initialized, 0);
2229 pr_info("rmnet_ipa completed deinitialization\n");
2230 return 0;
2231}
2232
2233/**
2234* rmnet_ipa_ap_suspend() - suspend callback for runtime_pm
2235* @dev: pointer to device
2236*
2237* This callback will be invoked by the runtime_pm framework when an AP suspend
2238* operation is invoked, usually by pressing a suspend button.
2239*
2240* Returns -EAGAIN to runtime_pm framework in case there are pending packets
2241* in the Tx queue. This will postpone the suspend operation until all the
2242* pending packets will be transmitted.
2243*
2244* In case there are no packets to send, releases the WWAN0_PROD entity.
2245* As an outcome, the number of IPA active clients should be decremented
2246* until IPA clocks can be gated.
2247*/
2248static int rmnet_ipa_ap_suspend(struct device *dev)
2249{
2250 struct net_device *netdev = ipa_netdevs[0];
2251 struct wwan_private *wwan_ptr = netdev_priv(netdev);
2252
2253 IPAWANDBG("Enter...\n");
2254 /* Do not allow A7 to suspend in case there are oustanding packets */
2255 if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
2256 IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
2257 return -EAGAIN;
2258 }
2259
2260 /* Make sure that there is no Tx operation ongoing */
2261 netif_tx_lock_bh(netdev);
2262 ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2263 netif_tx_unlock_bh(netdev);
2264 IPAWANDBG("Exit\n");
2265
2266 return 0;
2267}
2268
2269/**
2270* rmnet_ipa_ap_resume() - resume callback for runtime_pm
2271* @dev: pointer to device
2272*
2273* This callback will be invoked by the runtime_pm framework when an AP resume
2274* operation is invoked.
2275*
2276* Enables the network interface queue and returns success to the
2277* runtime_pm framework.
2278*/
2279static int rmnet_ipa_ap_resume(struct device *dev)
2280{
2281 struct net_device *netdev = ipa_netdevs[0];
2282
2283 IPAWANDBG("Enter...\n");
2284 netif_wake_queue(netdev);
2285 IPAWANDBG("Exit\n");
2286
2287 return 0;
2288}
2289
2290static void ipa_stop_polling_stats(void)
2291{
2292 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2293 ipa_rmnet_ctx.polling_interval = 0;
2294}
2295
2296static const struct of_device_id rmnet_ipa_dt_match[] = {
2297 {.compatible = "qcom,rmnet-ipa"},
2298 {},
2299};
2300MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
2301
2302static const struct dev_pm_ops rmnet_ipa_pm_ops = {
2303 .suspend_noirq = rmnet_ipa_ap_suspend,
2304 .resume_noirq = rmnet_ipa_ap_resume,
2305};
2306
2307static struct platform_driver rmnet_ipa_driver = {
2308 .driver = {
2309 .name = "rmnet_ipa",
2310 .owner = THIS_MODULE,
2311 .pm = &rmnet_ipa_pm_ops,
2312 .of_match_table = rmnet_ipa_dt_match,
2313 },
2314 .probe = ipa_wwan_probe,
2315 .remove = ipa_wwan_remove,
2316};
2317
2318static int ssr_notifier_cb(struct notifier_block *this,
2319 unsigned long code,
2320 void *data)
2321{
2322 if (ipa_rmnet_ctx.ipa_rmnet_ssr) {
2323 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2324 pr_info("IPA received MPSS BEFORE_SHUTDOWN\n");
2325 atomic_set(&is_ssr, 1);
2326 ipa_q6_pre_shutdown_cleanup();
2327 if (ipa_netdevs[0])
2328 netif_stop_queue(ipa_netdevs[0]);
2329 ipa_qmi_stop_workqueues();
2330 wan_ioctl_stop_qmi_messages();
2331 ipa_stop_polling_stats();
2332 if (atomic_read(&is_initialized))
2333 platform_driver_unregister(&rmnet_ipa_driver);
2334 pr_info("IPA BEFORE_SHUTDOWN handling is complete\n");
2335 return NOTIFY_DONE;
2336 }
2337 if (code == SUBSYS_AFTER_SHUTDOWN) {
2338 pr_info("IPA received MPSS AFTER_SHUTDOWN\n");
2339 if (atomic_read(&is_ssr))
2340 ipa_q6_post_shutdown_cleanup();
2341 pr_info("IPA AFTER_SHUTDOWN handling is complete\n");
2342 return NOTIFY_DONE;
2343 }
2344 if (code == SUBSYS_AFTER_POWERUP) {
2345 pr_info("IPA received MPSS AFTER_POWERUP\n");
2346 if (!atomic_read(&is_initialized)
2347 && atomic_read(&is_ssr))
2348 platform_driver_register(&rmnet_ipa_driver);
2349 pr_info("IPA AFTER_POWERUP handling is complete\n");
2350 return NOTIFY_DONE;
2351 }
2352 if (code == SUBSYS_BEFORE_POWERUP) {
2353 pr_info("IPA received MPSS BEFORE_POWERUP\n");
2354 if (atomic_read(&is_ssr))
2355 /* clean up cached QMI msg/handlers */
2356 ipa_qmi_service_exit();
2357 ipa2_proxy_clk_vote();
2358 pr_info("IPA BEFORE_POWERUP handling is complete\n");
2359 return NOTIFY_DONE;
2360 }
2361 }
2362 return NOTIFY_DONE;
2363}
2364
2365/**
2366 * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa2_send_msg
2367 * @buff: pointer to buffer containing the message
2368 * @len: message len
2369 * @type: message type
2370 *
2371 * This function is invoked when ipa2_send_msg is complete (Provided as a
2372 * free function pointer along with the message).
2373 */
2374static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
2375{
2376 if (!buff) {
2377 IPAWANERR("Null buffer\n");
2378 return;
2379 }
2380
2381 if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
2382 type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
2383 IPAWANERR("Wrong type given. buff %p type %d\n",
2384 buff, type);
2385 }
2386 kfree(buff);
2387}
2388
2389/**
2390 * rmnet_ipa_get_stats_and_update(bool reset) - Gets pipe stats from Modem
2391 *
2392 * This function queries the IPA Modem driver for the pipe stats
2393 * via QMI, and updates the user space IPA entity.
2394 */
2395static void rmnet_ipa_get_stats_and_update(bool reset)
2396{
2397 struct ipa_get_data_stats_req_msg_v01 req;
2398 struct ipa_get_data_stats_resp_msg_v01 *resp;
2399 struct ipa_msg_meta msg_meta;
2400 int rc;
2401
2402 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2403 GFP_KERNEL);
2404 if (!resp) {
2405 IPAWANERR("Can't allocate memory for stats message\n");
2406 return;
2407 }
2408
2409 memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2410 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2411
2412 req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2413 if (reset == true) {
2414 req.reset_stats_valid = true;
2415 req.reset_stats = true;
2416 IPAWANERR("Get the latest pipe-stats and reset it\n");
2417 }
2418
2419 rc = ipa_qmi_get_data_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002420 if (rc) {
2421 IPAWANERR("ipa_qmi_get_data_stats failed: %d\n", rc);
2422 kfree(resp);
2423 return;
2424 }
Amir Levy9659e592016-10-27 18:08:27 +03002425
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002426 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2427 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
2428 msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01);
2429 rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2430 if (rc) {
2431 IPAWANERR("ipa2_send_msg failed: %d\n", rc);
2432 kfree(resp);
2433 return;
Amir Levy9659e592016-10-27 18:08:27 +03002434 }
2435}
2436
2437/**
2438 * tethering_stats_poll_queue() - Stats polling function
2439 * @work - Work entry
2440 *
2441 * This function is scheduled periodically (per the interval) in
2442 * order to poll the IPA Modem driver for the pipe stats.
2443 */
2444static void tethering_stats_poll_queue(struct work_struct *work)
2445{
2446 rmnet_ipa_get_stats_and_update(false);
2447
2448 /* Schedule again only if there's an active polling interval */
2449 if (ipa_rmnet_ctx.polling_interval != 0)
2450 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work,
2451 msecs_to_jiffies(ipa_rmnet_ctx.polling_interval*1000));
2452}
2453
2454/**
2455 * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem
2456 *
2457 * This function retrieves the data usage (used quota) from the IPA Modem driver
2458 * via QMI, and updates IPA user space entity.
2459 */
2460static void rmnet_ipa_get_network_stats_and_update(void)
2461{
2462 struct ipa_get_apn_data_stats_req_msg_v01 req;
2463 struct ipa_get_apn_data_stats_resp_msg_v01 *resp;
2464 struct ipa_msg_meta msg_meta;
2465 int rc;
2466
2467 resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
2468 GFP_KERNEL);
2469 if (!resp) {
2470 IPAWANERR("Can't allocate memory for network stats message\n");
2471 return;
2472 }
2473
2474 memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01));
2475 memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01));
2476
2477 req.mux_id_list_valid = true;
2478 req.mux_id_list_len = 1;
2479 req.mux_id_list[0] = ipa_rmnet_ctx.metered_mux_id;
2480
2481 rc = ipa_qmi_get_network_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002482 if (rc) {
2483 IPAWANERR("ipa_qmi_get_network_stats failed %d\n", rc);
2484 kfree(resp);
2485 return;
2486 }
Amir Levy9659e592016-10-27 18:08:27 +03002487
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002488 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2489 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
2490 msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
2491 rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2492 if (rc) {
2493 IPAWANERR("ipa2_send_msg failed: %d\n", rc);
2494 kfree(resp);
2495 return;
Amir Levy9659e592016-10-27 18:08:27 +03002496 }
2497}
2498
2499/**
2500 * rmnet_ipa_poll_tethering_stats() - Tethering stats polling IOCTL handler
2501 * @data - IOCTL data
2502 *
2503 * This function handles WAN_IOC_POLL_TETHERING_STATS.
2504 * In case polling interval received is 0, polling will stop
2505 * (If there's a polling in progress, it will allow it to finish), and then will
2506 * fetch network stats, and update the IPA user space.
2507 *
2508 * Return codes:
2509 * 0: Success
2510 */
2511int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
2512{
2513 ipa_rmnet_ctx.polling_interval = data->polling_interval_secs;
2514
2515 cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work);
2516
2517 if (ipa_rmnet_ctx.polling_interval == 0) {
2518 ipa_qmi_stop_data_qouta();
2519 rmnet_ipa_get_network_stats_and_update();
2520 rmnet_ipa_get_stats_and_update(true);
2521 return 0;
2522 }
2523
2524 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0);
2525 return 0;
2526}
2527
2528/**
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302529 * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler
Amir Levy9659e592016-10-27 18:08:27 +03002530 * @data - IOCTL data
2531 *
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302532 * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface.
Amir Levy9659e592016-10-27 18:08:27 +03002533 * It translates the given interface name to the Modem MUX ID and
2534 * sends the request of the quota to the IPA Modem driver via QMI.
2535 *
2536 * Return codes:
2537 * 0: Success
2538 * -EFAULT: Invalid interface name provided
2539 * other: See ipa_qmi_set_data_quota
2540 */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302541static int rmnet_ipa_set_data_quota_modem(struct wan_ioctl_set_data_quota *data)
Amir Levy9659e592016-10-27 18:08:27 +03002542{
2543 u32 mux_id;
2544 int index;
2545 struct ipa_set_data_usage_quota_req_msg_v01 req;
2546
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302547 /* stop quota */
2548 if (!data->set_quota)
2549 ipa_qmi_stop_data_qouta();
2550
Amir Levy9659e592016-10-27 18:08:27 +03002551 index = find_vchannel_name_index(data->interface_name);
2552 IPAWANERR("iface name %s, quota %lu\n",
2553 data->interface_name,
2554 (unsigned long int) data->quota_mbytes);
2555
2556 if (index == MAX_NUM_OF_MUX_CHANNEL) {
2557 IPAWANERR("%s is an invalid iface name\n",
2558 data->interface_name);
2559 return -EFAULT;
2560 }
2561
2562 mux_id = mux_channel[index].mux_id;
2563
2564 ipa_rmnet_ctx.metered_mux_id = mux_id;
2565
2566 memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
2567 req.apn_quota_list_valid = true;
2568 req.apn_quota_list_len = 1;
2569 req.apn_quota_list[0].mux_id = mux_id;
2570 req.apn_quota_list[0].num_Mbytes = data->quota_mbytes;
2571
2572 return ipa_qmi_set_data_quota(&req);
2573}
2574
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302575static int rmnet_ipa_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
2576{
2577 struct ipa_set_wifi_quota wifi_quota;
2578 int rc = 0;
2579
2580 memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota));
2581 wifi_quota.set_quota = data->set_quota;
2582 wifi_quota.quota_bytes = data->quota_mbytes;
2583 IPAWANDBG("iface name %s, quota %lu\n",
2584 data->interface_name,
2585 (unsigned long int) data->quota_mbytes);
2586
2587 rc = ipa2_set_wlan_quota(&wifi_quota);
2588 /* check if wlan-fw takes this quota-set */
2589 if (!wifi_quota.set_valid)
2590 rc = -EFAULT;
2591 return rc;
2592}
2593
2594/**
2595 * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
2596 * @data - IOCTL data
2597 *
2598 * This function handles WAN_IOC_SET_DATA_QUOTA.
2599 * It translates the given interface name to the Modem MUX ID and
2600 * sends the request of the quota to the IPA Modem driver via QMI.
2601 *
2602 * Return codes:
2603 * 0: Success
2604 * -EFAULT: Invalid interface name provided
2605 * other: See ipa_qmi_set_data_quota
2606 */
2607int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data)
2608{
2609 enum ipa_upstream_type upstream_type;
2610 int rc = 0;
2611
2612 /* get IPA backhaul type */
2613 upstream_type = find_upstream_type(data->interface_name);
2614
2615 if (upstream_type == IPA_UPSTEAM_MAX) {
2616 IPAWANERR("upstream iface %s not supported\n",
2617 data->interface_name);
2618 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2619 rc = rmnet_ipa_set_data_quota_wifi(data);
2620 if (rc) {
2621 IPAWANERR("set quota on wifi failed\n");
2622 return rc;
2623 }
2624 } else {
2625 rc = rmnet_ipa_set_data_quota_modem(data);
2626 if (rc) {
2627 IPAWANERR("set quota on modem failed\n");
2628 return rc;
2629 }
2630 }
2631 return rc;
2632}
2633
Amir Levy9659e592016-10-27 18:08:27 +03002634 /* rmnet_ipa_set_tether_client_pipe() -
2635 * @data - IOCTL data
2636 *
2637 * This function handles WAN_IOC_SET_DATA_QUOTA.
2638 * It translates the given interface name to the Modem MUX ID and
2639 * sends the request of the quota to the IPA Modem driver via QMI.
2640 *
2641 * Return codes:
2642 * 0: Success
Skylar Chang345c8142016-11-30 14:41:24 -08002643 * -EFAULT: Invalid src/dst pipes provided
Amir Levy9659e592016-10-27 18:08:27 +03002644 * other: See ipa_qmi_set_data_quota
2645 */
2646int rmnet_ipa_set_tether_client_pipe(
2647 struct wan_ioctl_set_tether_client_pipe *data)
2648{
2649 int number, i;
2650
Skylar Chang345c8142016-11-30 14:41:24 -08002651 /* error checking if ul_src_pipe_len valid or not*/
2652 if (data->ul_src_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
2653 data->ul_src_pipe_len < 0) {
2654 IPAWANERR("UL src pipes %d exceeding max %d\n",
2655 data->ul_src_pipe_len,
2656 QMI_IPA_MAX_PIPES_V01);
2657 return -EFAULT;
2658 }
2659 /* error checking if dl_dst_pipe_len valid or not*/
2660 if (data->dl_dst_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
2661 data->dl_dst_pipe_len < 0) {
2662 IPAWANERR("DL dst pipes %d exceeding max %d\n",
2663 data->dl_dst_pipe_len,
2664 QMI_IPA_MAX_PIPES_V01);
2665 return -EFAULT;
2666 }
2667
Amir Levy9659e592016-10-27 18:08:27 +03002668 IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
2669 data->ipa_client,
2670 data->ul_src_pipe_len,
2671 data->dl_dst_pipe_len,
2672 data->reset_client);
2673 number = data->ul_src_pipe_len;
2674 for (i = 0; i < number; i++) {
2675 IPAWANDBG("UL index-%d pipe %d\n", i,
2676 data->ul_src_pipe_list[i]);
2677 if (data->reset_client)
2678 ipa_set_client(data->ul_src_pipe_list[i],
2679 0, false);
2680 else
2681 ipa_set_client(data->ul_src_pipe_list[i],
2682 data->ipa_client, true);
2683 }
2684 number = data->dl_dst_pipe_len;
2685 for (i = 0; i < number; i++) {
2686 IPAWANDBG("DL index-%d pipe %d\n", i,
2687 data->dl_dst_pipe_list[i]);
2688 if (data->reset_client)
2689 ipa_set_client(data->dl_dst_pipe_list[i],
2690 0, false);
2691 else
2692 ipa_set_client(data->dl_dst_pipe_list[i],
2693 data->ipa_client, false);
2694 }
2695 return 0;
2696}
2697
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302698static int rmnet_ipa_query_tethering_stats_wifi(
2699 struct wan_ioctl_query_tether_stats *data, bool reset)
2700{
2701 struct ipa_get_wdi_sap_stats *sap_stats;
2702 int rc;
2703
2704 sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats),
2705 GFP_KERNEL);
2706 if (!sap_stats)
2707 return -ENOMEM;
2708
2709 sap_stats->reset_stats = reset;
2710 IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats);
2711
2712 rc = ipa2_get_wlan_stats(sap_stats);
2713 if (rc) {
2714 kfree(sap_stats);
2715 return rc;
2716 } else if (reset) {
2717 kfree(sap_stats);
2718 return 0;
2719 }
2720
2721 if (sap_stats->stats_valid) {
2722 data->ipv4_tx_packets = sap_stats->ipv4_tx_packets;
2723 data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes;
2724 data->ipv4_rx_packets = sap_stats->ipv4_rx_packets;
2725 data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes;
2726 data->ipv6_tx_packets = sap_stats->ipv6_tx_packets;
2727 data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes;
2728 data->ipv6_rx_packets = sap_stats->ipv6_rx_packets;
2729 data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes;
2730 }
2731
2732 IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2733 (unsigned long int) data->ipv4_rx_packets,
2734 (unsigned long int) data->ipv6_rx_packets,
2735 (unsigned long int) data->ipv4_rx_bytes,
2736 (unsigned long int) data->ipv6_rx_bytes);
2737 IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2738 (unsigned long int) data->ipv4_tx_packets,
2739 (unsigned long int) data->ipv6_tx_packets,
2740 (unsigned long int) data->ipv4_tx_bytes,
2741 (unsigned long int) data->ipv6_tx_bytes);
2742
2743 kfree(sap_stats);
2744 return rc;
2745}
2746
2747int rmnet_ipa_query_tethering_stats_modem(
2748 struct wan_ioctl_query_tether_stats *data,
2749 bool reset
2750)
Amir Levy9659e592016-10-27 18:08:27 +03002751{
2752 struct ipa_get_data_stats_req_msg_v01 *req;
2753 struct ipa_get_data_stats_resp_msg_v01 *resp;
2754 int pipe_len, rc;
2755
2756 req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
2757 GFP_KERNEL);
2758 if (!req) {
2759 IPAWANERR("failed to allocate memory for stats message\n");
2760 return -ENOMEM;
2761 }
2762 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2763 GFP_KERNEL);
2764 if (!resp) {
2765 IPAWANERR("failed to allocate memory for stats message\n");
2766 kfree(req);
2767 return -ENOMEM;
2768 }
2769 memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2770 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2771
2772 req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2773 if (reset) {
2774 req->reset_stats_valid = true;
2775 req->reset_stats = true;
2776 IPAWANERR("reset the pipe stats\n");
2777 } else {
2778 /* print tethered-client enum */
2779 IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
2780 }
2781
2782 rc = ipa_qmi_get_data_stats(req, resp);
2783 if (rc) {
2784 IPAWANERR("can't get ipa_qmi_get_data_stats\n");
2785 kfree(req);
2786 kfree(resp);
2787 return rc;
2788 } else if (reset) {
2789 kfree(req);
2790 kfree(resp);
2791 return 0;
2792 }
2793
2794 if (resp->dl_dst_pipe_stats_list_valid) {
2795 for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len;
2796 pipe_len++) {
2797 IPAWANDBG("Check entry(%d) dl_dst_pipe(%d)\n",
2798 pipe_len, resp->dl_dst_pipe_stats_list
2799 [pipe_len].pipe_index);
2800 IPAWANDBG("dl_p_v4(%lu)v6(%lu) dl_b_v4(%lu)v6(%lu)\n",
2801 (unsigned long int) resp->
2802 dl_dst_pipe_stats_list[pipe_len].
2803 num_ipv4_packets,
2804 (unsigned long int) resp->
2805 dl_dst_pipe_stats_list[pipe_len].
2806 num_ipv6_packets,
2807 (unsigned long int) resp->
2808 dl_dst_pipe_stats_list[pipe_len].
2809 num_ipv4_bytes,
2810 (unsigned long int) resp->
2811 dl_dst_pipe_stats_list[pipe_len].
2812 num_ipv6_bytes);
2813 if (ipa_get_client_uplink(resp->
2814 dl_dst_pipe_stats_list[pipe_len].
2815 pipe_index) == false) {
2816 if (data->ipa_client == ipa_get_client(resp->
2817 dl_dst_pipe_stats_list[pipe_len].
2818 pipe_index)) {
2819 /* update the DL stats */
2820 data->ipv4_rx_packets += resp->
2821 dl_dst_pipe_stats_list[pipe_len].
2822 num_ipv4_packets;
2823 data->ipv6_rx_packets += resp->
2824 dl_dst_pipe_stats_list[pipe_len].
2825 num_ipv6_packets;
2826 data->ipv4_rx_bytes += resp->
2827 dl_dst_pipe_stats_list[pipe_len].
2828 num_ipv4_bytes;
2829 data->ipv6_rx_bytes += resp->
2830 dl_dst_pipe_stats_list[pipe_len].
2831 num_ipv6_bytes;
2832 }
2833 }
2834 }
2835 }
2836 IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2837 (unsigned long int) data->ipv4_rx_packets,
2838 (unsigned long int) data->ipv6_rx_packets,
2839 (unsigned long int) data->ipv4_rx_bytes,
2840 (unsigned long int) data->ipv6_rx_bytes);
2841
2842 if (resp->ul_src_pipe_stats_list_valid) {
2843 for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len;
2844 pipe_len++) {
2845 IPAWANDBG("Check entry(%d) ul_dst_pipe(%d)\n",
2846 pipe_len,
2847 resp->ul_src_pipe_stats_list[pipe_len].
2848 pipe_index);
2849 IPAWANDBG("ul_p_v4(%lu)v6(%lu)ul_b_v4(%lu)v6(%lu)\n",
2850 (unsigned long int) resp->
2851 ul_src_pipe_stats_list[pipe_len].
2852 num_ipv4_packets,
2853 (unsigned long int) resp->
2854 ul_src_pipe_stats_list[pipe_len].
2855 num_ipv6_packets,
2856 (unsigned long int) resp->
2857 ul_src_pipe_stats_list[pipe_len].
2858 num_ipv4_bytes,
2859 (unsigned long int) resp->
2860 ul_src_pipe_stats_list[pipe_len].
2861 num_ipv6_bytes);
2862 if (ipa_get_client_uplink(resp->
2863 ul_src_pipe_stats_list[pipe_len].
2864 pipe_index) == true) {
2865 if (data->ipa_client == ipa_get_client(resp->
2866 ul_src_pipe_stats_list[pipe_len].
2867 pipe_index)) {
2868 /* update the DL stats */
2869 data->ipv4_tx_packets += resp->
2870 ul_src_pipe_stats_list[pipe_len].
2871 num_ipv4_packets;
2872 data->ipv6_tx_packets += resp->
2873 ul_src_pipe_stats_list[pipe_len].
2874 num_ipv6_packets;
2875 data->ipv4_tx_bytes += resp->
2876 ul_src_pipe_stats_list[pipe_len].
2877 num_ipv4_bytes;
2878 data->ipv6_tx_bytes += resp->
2879 ul_src_pipe_stats_list[pipe_len].
2880 num_ipv6_bytes;
2881 }
2882 }
2883 }
2884 }
2885 IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2886 (unsigned long int) data->ipv4_tx_packets,
2887 (unsigned long int) data->ipv6_tx_packets,
2888 (unsigned long int) data->ipv4_tx_bytes,
2889 (unsigned long int) data->ipv6_tx_bytes);
2890 kfree(req);
2891 kfree(resp);
2892 return 0;
2893}
2894
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302895int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
2896 bool reset)
2897{
2898 enum ipa_upstream_type upstream_type;
2899 int rc = 0;
2900
2901 /* get IPA backhaul type */
2902 upstream_type = find_upstream_type(data->upstreamIface);
2903
2904 if (upstream_type == IPA_UPSTEAM_MAX) {
2905 IPAWANERR("upstreamIface %s not supported\n",
2906 data->upstreamIface);
2907 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2908 IPAWANDBG_LOW(" query wifi-backhaul stats\n");
2909 rc = rmnet_ipa_query_tethering_stats_wifi(
2910 data, false);
2911 if (rc) {
2912 IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
2913 return rc;
2914 }
2915 } else {
2916 IPAWANDBG_LOW(" query modem-backhaul stats\n");
2917 rc = rmnet_ipa_query_tethering_stats_modem(
2918 data, false);
2919 if (rc) {
2920 IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
2921 return rc;
2922 }
2923 }
2924 return rc;
2925}
2926
2927int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
2928{
2929 enum ipa_upstream_type upstream_type;
2930 int rc = 0;
2931
2932 /* get IPA backhaul type */
2933 upstream_type = find_upstream_type(data->upstreamIface);
2934
2935 if (upstream_type == IPA_UPSTEAM_MAX) {
2936 IPAWANERR("upstream iface %s not supported\n",
2937 data->upstreamIface);
2938 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2939 IPAWANDBG(" reset wifi-backhaul stats\n");
2940 rc = rmnet_ipa_query_tethering_stats_wifi(
2941 NULL, true);
2942 if (rc) {
2943 IPAWANERR("reset WLAN stats failed\n");
2944 return rc;
2945 }
2946 } else {
2947 IPAWANDBG(" reset modem-backhaul stats\n");
2948 rc = rmnet_ipa_query_tethering_stats_modem(
2949 NULL, true);
2950 if (rc) {
2951 IPAWANERR("reset MODEM stats failed\n");
2952 return rc;
2953 }
2954 }
2955 return rc;
2956}
2957
2958
Amir Levy9659e592016-10-27 18:08:27 +03002959/**
2960 * ipa_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
2961 * @mux_id - The MUX ID on which the quota has been reached
2962 *
2963 * This function broadcasts a Netlink event using the kobject of the
2964 * rmnet_ipa interface in order to alert the user space that the quota
2965 * on the specific interface which matches the mux_id has been reached.
2966 *
2967 */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302968void ipa_broadcast_quota_reach_ind(u32 mux_id,
2969 enum ipa_upstream_type upstream_type)
Amir Levy9659e592016-10-27 18:08:27 +03002970{
2971 char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
2972 char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
2973 char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
2974 char *envp[IPA_UEVENT_NUM_EVNP] = {
2975 alert_msg, iface_name_l, iface_name_m, NULL };
2976 int res;
2977 int index;
2978
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302979 /* check upstream_type*/
2980 if (upstream_type == IPA_UPSTEAM_MAX) {
2981 IPAWANERR("upstreamIface type %d not supported\n",
2982 upstream_type);
Amir Levy9659e592016-10-27 18:08:27 +03002983 return;
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302984 } else if (upstream_type == IPA_UPSTEAM_MODEM) {
2985 index = find_mux_channel_index(mux_id);
2986 if (index == MAX_NUM_OF_MUX_CHANNEL) {
2987 IPAWANERR("%u is an mux ID\n", mux_id);
2988 return;
2989 }
Amir Levy9659e592016-10-27 18:08:27 +03002990 }
2991
2992 res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
2993 "ALERT_NAME=%s", "quotaReachedAlert");
2994 if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
2995 IPAWANERR("message too long (%d)", res);
2996 return;
2997 }
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302998
Amir Levy9659e592016-10-27 18:08:27 +03002999 /* posting msg for L-release for CNE */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303000 if (upstream_type == IPA_UPSTEAM_MODEM) {
Amir Levy9659e592016-10-27 18:08:27 +03003001 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303002 "UPSTREAM=%s", mux_channel[index].vchannel_name);
3003 } else {
3004 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
3005 "UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
3006 }
Amir Levy9659e592016-10-27 18:08:27 +03003007 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
3008 IPAWANERR("message too long (%d)", res);
3009 return;
3010 }
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303011
Amir Levy9659e592016-10-27 18:08:27 +03003012 /* posting msg for M-release for CNE */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303013 if (upstream_type == IPA_UPSTEAM_MODEM) {
Amir Levy9659e592016-10-27 18:08:27 +03003014 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303015 "INTERFACE=%s", mux_channel[index].vchannel_name);
3016 } else {
3017 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
3018 "INTERFACE=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
3019 }
Amir Levy9659e592016-10-27 18:08:27 +03003020 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
3021 IPAWANERR("message too long (%d)", res);
3022 return;
3023 }
3024
3025 IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
3026 alert_msg, iface_name_l, iface_name_m);
3027 kobject_uevent_env(&(ipa_netdevs[0]->dev.kobj), KOBJ_CHANGE, envp);
3028}
3029
3030/**
3031 * ipa_q6_handshake_complete() - Perform operations once Q6 is up
3032 * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR.
3033 *
3034 * This function is invoked once the handshake between the IPA AP driver
3035 * and IPA Q6 driver is complete. At this point, it is possible to perform
3036 * operations which can't be performed until IPA Q6 driver is up.
3037 *
3038 */
3039void ipa_q6_handshake_complete(bool ssr_bootup)
3040{
3041 /* It is required to recover the network stats after SSR recovery */
3042 if (ssr_bootup) {
3043 /*
3044 * In case the uC is required to be loaded by the Modem,
3045 * the proxy vote will be removed only when uC loading is
3046 * complete and indication is received by the AP. After SSR,
3047 * uC is already loaded. Therefore, proxy vote can be removed
3048 * once Modem init is complete.
3049 */
3050 ipa2_proxy_clk_unvote();
3051
3052 /*
3053 * It is required to recover the network stats after
3054 * SSR recovery
3055 */
3056 rmnet_ipa_get_network_stats_and_update();
3057
3058 /* Enable holb monitoring on Q6 pipes. */
3059 ipa_q6_monitor_holb_mitigation(true);
3060 }
3061}
3062
3063static int __init ipa_wwan_init(void)
3064{
3065 atomic_set(&is_initialized, 0);
3066 atomic_set(&is_ssr, 0);
3067
3068 mutex_init(&ipa_to_apps_pipe_handle_guard);
3069 ipa_to_apps_hdl = -1;
3070
3071 ipa_qmi_init();
3072
3073 /* Register for Modem SSR */
3074 subsys_notify_handle = subsys_notif_register_notifier(SUBSYS_MODEM,
3075 &ssr_notifier);
3076 if (!IS_ERR(subsys_notify_handle))
3077 return platform_driver_register(&rmnet_ipa_driver);
3078 else
3079 return (int)PTR_ERR(subsys_notify_handle);
3080}
3081
3082static void __exit ipa_wwan_cleanup(void)
3083{
3084 int ret;
3085
3086 ipa_qmi_cleanup();
3087 mutex_destroy(&ipa_to_apps_pipe_handle_guard);
3088 ret = subsys_notif_unregister_notifier(subsys_notify_handle,
3089 &ssr_notifier);
3090 if (ret)
3091 IPAWANERR(
3092 "Error subsys_notif_unregister_notifier system %s, ret=%d\n",
3093 SUBSYS_MODEM, ret);
3094 platform_driver_unregister(&rmnet_ipa_driver);
3095}
3096
3097static void ipa_wwan_msg_free_cb(void *buff, u32 len, u32 type)
3098{
3099 if (!buff)
3100 IPAWANERR("Null buffer.\n");
3101 kfree(buff);
3102}
3103
3104static void ipa_rmnet_rx_cb(void *priv)
3105{
3106 struct net_device *dev = priv;
3107 struct wwan_private *wwan_ptr;
3108
3109 IPAWANDBG("\n");
3110
3111 if (dev != ipa_netdevs[0]) {
3112 IPAWANERR("Not matching with netdev\n");
3113 return;
3114 }
3115
3116 wwan_ptr = netdev_priv(dev);
3117 napi_schedule(&(wwan_ptr->napi));
3118}
3119
3120static int ipa_rmnet_poll(struct napi_struct *napi, int budget)
3121{
3122 int rcvd_pkts = 0;
3123
3124 rcvd_pkts = ipa_rx_poll(ipa_to_apps_hdl, NAPI_WEIGHT);
3125 IPAWANDBG("rcvd packets: %d\n", rcvd_pkts);
3126 return rcvd_pkts;
3127}
3128
3129late_initcall(ipa_wwan_init);
3130module_exit(ipa_wwan_cleanup);
3131MODULE_DESCRIPTION("WWAN Network Interface");
3132MODULE_LICENSE("GPL v2");