blob: 0af9387ae7e6ca52375e89f2afc4e6d68a978f9f [file] [log] [blame]
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05301/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Amir Levy9659e592016-10-27 18:08:27 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * WWAN Transport Network Driver.
15 */
16
17#include <linux/completion.h>
18#include <linux/errno.h>
19#include <linux/if_arp.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/of_device.h>
26#include <linux/string.h>
27#include <linux/skbuff.h>
28#include <linux/version.h>
29#include <linux/workqueue.h>
30#include <net/pkt_sched.h>
31#include <soc/qcom/subsystem_restart.h>
32#include <soc/qcom/subsystem_notif.h>
33#include "ipa_qmi_service.h"
34#include <linux/rmnet_ipa_fd_ioctl.h>
35#include <linux/ipa.h>
36#include <uapi/linux/net_map.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020037#include <uapi/linux/msm_rmnet.h>
38#include <net/rmnet_config.h>
Amir Levy9659e592016-10-27 18:08:27 +030039
40#include "ipa_trace.h"
41
42#define WWAN_METADATA_SHFT 24
43#define WWAN_METADATA_MASK 0xFF000000
44#define WWAN_DATA_LEN 2000
45#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
46#define HEADROOM_FOR_QMAP 8 /* for mux header */
47#define TAILROOM 0 /* for padding by mux layer */
48#define MAX_NUM_OF_MUX_CHANNEL 10 /* max mux channels */
49#define UL_FILTER_RULE_HANDLE_START 69
50#define DEFAULT_OUTSTANDING_HIGH_CTL 96
51#define DEFAULT_OUTSTANDING_HIGH 64
52#define DEFAULT_OUTSTANDING_LOW 32
53
54#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +053055#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0"
56
Amir Levy9659e592016-10-27 18:08:27 +030057#define IPA_WWAN_DEVICE_COUNT (1)
58
59#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
60
61#define INVALID_MUX_ID 0xFF
62#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
63#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
64#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
65
66#define NAPI_WEIGHT 60
Sunil Paidimarri226cf032016-10-14 13:33:08 -070067#define IPA_WWAN_CONS_DESC_FIFO_SZ 1024
Amir Levy9659e592016-10-27 18:08:27 +030068
69static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
70static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
71static u32 qmap_hdr_hdl, dflt_v4_wan_rt_hdl, dflt_v6_wan_rt_hdl;
72static struct rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
73static int num_q6_rule, old_num_q6_rule;
74static int rmnet_index;
75static bool egress_set, a7_ul_flt_set;
76static struct workqueue_struct *ipa_rm_q6_workqueue; /* IPA_RM workqueue*/
77static atomic_t is_initialized;
78static atomic_t is_ssr;
79static void *subsys_notify_handle;
80
81u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */
82static struct mutex ipa_to_apps_pipe_handle_guard;
83static int wwan_add_ul_flt_rule_to_ipa(void);
84static int wwan_del_ul_flt_rule_to_ipa(void);
85static void ipa_wwan_msg_free_cb(void*, u32, u32);
86static void ipa_rmnet_rx_cb(void *priv);
87static int ipa_rmnet_poll(struct napi_struct *napi, int budget);
88
89static void wake_tx_queue(struct work_struct *work);
90static DECLARE_WORK(ipa_tx_wakequeue_work, wake_tx_queue);
91
92static void tethering_stats_poll_queue(struct work_struct *work);
93static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work,
94 tethering_stats_poll_queue);
95
96enum wwan_device_status {
97 WWAN_DEVICE_INACTIVE = 0,
98 WWAN_DEVICE_ACTIVE = 1
99};
100
101struct ipa_rmnet_plat_drv_res {
102 bool ipa_rmnet_ssr;
103 bool ipa_loaduC;
104 bool ipa_advertise_sg_support;
105 bool ipa_napi_enable;
Sunil Paidimarri226cf032016-10-14 13:33:08 -0700106 u32 wan_rx_desc_size;
Amir Levy9659e592016-10-27 18:08:27 +0300107};
108
109static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
110/**
111 * struct wwan_private - WWAN private data
112 * @net: network interface struct implemented by this driver
113 * @stats: iface statistics
114 * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
115 * @outstanding_high: number of outstanding packets allowed
116 * @outstanding_low: number of outstanding packets which shall cause
117 * @ch_id: channel id
118 * @lock: spinlock for mutual exclusion
119 * @device_status: holds device status
120 *
121 * WWAN private - holds all relevant info about WWAN driver
122 */
123struct wwan_private {
124 struct net_device *net;
125 struct net_device_stats stats;
126 atomic_t outstanding_pkts;
127 int outstanding_high_ctl;
128 int outstanding_high;
129 int outstanding_low;
130 uint32_t ch_id;
131 spinlock_t lock;
132 struct completion resource_granted_completion;
133 enum wwan_device_status device_status;
134 struct napi_struct napi;
135};
136
137/**
138* ipa_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
139*
140* Return codes:
141* 0: success
142* -ENOMEM: failed to allocate memory
143* -EPERM: failed to add the tables
144*/
145static int ipa_setup_a7_qmap_hdr(void)
146{
147 struct ipa_ioc_add_hdr *hdr;
148 struct ipa_hdr_add *hdr_entry;
149 u32 pyld_sz;
150 int ret;
151
152 /* install the basic exception header */
153 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
154 sizeof(struct ipa_hdr_add);
155 hdr = kzalloc(pyld_sz, GFP_KERNEL);
156 if (!hdr) {
157 IPAWANERR("fail to alloc exception hdr\n");
158 return -ENOMEM;
159 }
160 hdr->num_hdrs = 1;
161 hdr->commit = 1;
162 hdr_entry = &hdr->hdr[0];
163
164 strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
165 IPA_RESOURCE_NAME_MAX);
166 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
167
168 if (ipa2_add_hdr(hdr)) {
169 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
170 ret = -EPERM;
171 goto bail;
172 }
173
174 if (hdr_entry->status) {
175 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
176 ret = -EPERM;
177 goto bail;
178 }
179 qmap_hdr_hdl = hdr_entry->hdr_hdl;
180
181 ret = 0;
182bail:
183 kfree(hdr);
184 return ret;
185}
186
187static void ipa_del_a7_qmap_hdr(void)
188{
189 struct ipa_ioc_del_hdr *del_hdr;
190 struct ipa_hdr_del *hdl_entry;
191 u32 pyld_sz;
192 int ret;
193
194 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
195 sizeof(struct ipa_hdr_del);
196 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
197 if (!del_hdr) {
198 IPAWANERR("fail to alloc exception hdr_del\n");
199 return;
200 }
201
202 del_hdr->commit = 1;
203 del_hdr->num_hdls = 1;
204 hdl_entry = &del_hdr->hdl[0];
205 hdl_entry->hdl = qmap_hdr_hdl;
206
207 ret = ipa2_del_hdr(del_hdr);
208 if (ret || hdl_entry->status)
209 IPAWANERR("ipa2_del_hdr failed\n");
210 else
211 IPAWANDBG("hdrs deletion done\n");
212
213 qmap_hdr_hdl = 0;
214 kfree(del_hdr);
215}
216
217static void ipa_del_qmap_hdr(uint32_t hdr_hdl)
218{
219 struct ipa_ioc_del_hdr *del_hdr;
220 struct ipa_hdr_del *hdl_entry;
221 u32 pyld_sz;
222 int ret;
223
224 if (hdr_hdl == 0) {
225 IPAWANERR("Invalid hdr_hdl provided\n");
226 return;
227 }
228
229 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
230 sizeof(struct ipa_hdr_del);
231 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
232 if (!del_hdr) {
233 IPAWANERR("fail to alloc exception hdr_del\n");
234 return;
235 }
236
237 del_hdr->commit = 1;
238 del_hdr->num_hdls = 1;
239 hdl_entry = &del_hdr->hdl[0];
240 hdl_entry->hdl = hdr_hdl;
241
242 ret = ipa2_del_hdr(del_hdr);
243 if (ret || hdl_entry->status)
244 IPAWANERR("ipa2_del_hdr failed\n");
245 else
246 IPAWANDBG("header deletion done\n");
247
248 qmap_hdr_hdl = 0;
249 kfree(del_hdr);
250}
251
252static void ipa_del_mux_qmap_hdrs(void)
253{
254 int index;
255
256 for (index = 0; index < rmnet_index; index++) {
257 ipa_del_qmap_hdr(mux_channel[index].hdr_hdl);
258 mux_channel[index].hdr_hdl = 0;
259 }
260}
261
262static int ipa_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
263{
264 struct ipa_ioc_add_hdr *hdr;
265 struct ipa_hdr_add *hdr_entry;
266 char hdr_name[IPA_RESOURCE_NAME_MAX];
267 u32 pyld_sz;
268 int ret;
269
270 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
271 sizeof(struct ipa_hdr_add);
272 hdr = kzalloc(pyld_sz, GFP_KERNEL);
273 if (!hdr) {
274 IPAWANERR("fail to alloc exception hdr\n");
275 return -ENOMEM;
276 }
277 hdr->num_hdrs = 1;
278 hdr->commit = 1;
279 hdr_entry = &hdr->hdr[0];
280
281 snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
282 A2_MUX_HDR_NAME_V4_PREF,
283 mux_id);
284 strlcpy(hdr_entry->name, hdr_name,
285 IPA_RESOURCE_NAME_MAX);
286
287 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
288 hdr_entry->hdr[1] = (uint8_t) mux_id;
289 IPAWANDBG("header (%s) with mux-id: (%d)\n",
290 hdr_name,
291 hdr_entry->hdr[1]);
292 if (ipa2_add_hdr(hdr)) {
293 IPAWANERR("fail to add IPA_QMAP hdr\n");
294 ret = -EPERM;
295 goto bail;
296 }
297
298 if (hdr_entry->status) {
299 IPAWANERR("fail to add IPA_QMAP hdr\n");
300 ret = -EPERM;
301 goto bail;
302 }
303
304 ret = 0;
305 *hdr_hdl = hdr_entry->hdr_hdl;
306bail:
307 kfree(hdr);
308 return ret;
309}
310
311/**
312* ipa_setup_dflt_wan_rt_tables() - Setup default wan routing tables
313*
314* Return codes:
315* 0: success
316* -ENOMEM: failed to allocate memory
317* -EPERM: failed to add the tables
318*/
319static int ipa_setup_dflt_wan_rt_tables(void)
320{
321 struct ipa_ioc_add_rt_rule *rt_rule;
322 struct ipa_rt_rule_add *rt_rule_entry;
323
324 rt_rule =
325 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
326 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
327 if (!rt_rule) {
328 IPAWANERR("fail to alloc mem\n");
329 return -ENOMEM;
330 }
331 /* setup a default v4 route to point to Apps */
332 rt_rule->num_rules = 1;
333 rt_rule->commit = 1;
334 rt_rule->ip = IPA_IP_v4;
335 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
336 IPA_RESOURCE_NAME_MAX);
337
338 rt_rule_entry = &rt_rule->rules[0];
339 rt_rule_entry->at_rear = 1;
340 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
341 rt_rule_entry->rule.hdr_hdl = qmap_hdr_hdl;
342
343 if (ipa2_add_rt_rule(rt_rule)) {
344 IPAWANERR("fail to add dflt_wan v4 rule\n");
345 kfree(rt_rule);
346 return -EPERM;
347 }
348
349 IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
350 dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
351
352 /* setup a default v6 route to point to A5 */
353 rt_rule->ip = IPA_IP_v6;
354 if (ipa2_add_rt_rule(rt_rule)) {
355 IPAWANERR("fail to add dflt_wan v6 rule\n");
356 kfree(rt_rule);
357 return -EPERM;
358 }
359 IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
360 dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
361
362 kfree(rt_rule);
363 return 0;
364}
365
366static void ipa_del_dflt_wan_rt_tables(void)
367{
368 struct ipa_ioc_del_rt_rule *rt_rule;
369 struct ipa_rt_rule_del *rt_rule_entry;
370 int len;
371
372 len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
373 sizeof(struct ipa_rt_rule_del);
374 rt_rule = kzalloc(len, GFP_KERNEL);
375 if (!rt_rule) {
376 IPAWANERR("unable to allocate memory for del route rule\n");
377 return;
378 }
379
380 memset(rt_rule, 0, len);
381 rt_rule->commit = 1;
382 rt_rule->num_hdls = 1;
383 rt_rule->ip = IPA_IP_v4;
384
385 rt_rule_entry = &rt_rule->hdl[0];
386 rt_rule_entry->status = -1;
387 rt_rule_entry->hdl = dflt_v4_wan_rt_hdl;
388
389 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
390 rt_rule_entry->hdl, IPA_IP_v4);
391 if (ipa2_del_rt_rule(rt_rule) ||
392 (rt_rule_entry->status)) {
393 IPAWANERR("Routing rule deletion failed!\n");
394 }
395
396 rt_rule->ip = IPA_IP_v6;
397 rt_rule_entry->hdl = dflt_v6_wan_rt_hdl;
398 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
399 rt_rule_entry->hdl, IPA_IP_v6);
400 if (ipa2_del_rt_rule(rt_rule) ||
401 (rt_rule_entry->status)) {
402 IPAWANERR("Routing rule deletion failed!\n");
403 }
404
405 kfree(rt_rule);
406}
407
408int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
409 *rule_req, uint32_t *rule_hdl)
410{
411 int i, j;
412
413 if (rule_req->filter_spec_list_valid == true) {
414 num_q6_rule = rule_req->filter_spec_list_len;
415 IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule);
416 } else {
417 num_q6_rule = 0;
418 IPAWANERR("got no UL rules from modem\n");
419 return -EINVAL;
420 }
421
422 /* copy UL filter rules from Modem*/
423 for (i = 0; i < num_q6_rule; i++) {
424 /* check if rules overside the cache*/
425 if (i == MAX_NUM_Q6_RULE) {
426 IPAWANERR("Reaching (%d) max cache ",
427 MAX_NUM_Q6_RULE);
428 IPAWANERR(" however total (%d)\n",
429 num_q6_rule);
430 goto failure;
431 }
432 /* construct UL_filter_rule handler QMI use-cas */
433 ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl =
434 UL_FILTER_RULE_HANDLE_START + i;
435 rule_hdl[i] = ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
436 ipa_qmi_ctx->q6_ul_filter_rule[i].ip =
437 rule_req->filter_spec_list[i].ip_type;
438 ipa_qmi_ctx->q6_ul_filter_rule[i].action =
439 rule_req->filter_spec_list[i].filter_action;
440 if (rule_req->filter_spec_list[i].is_routing_table_index_valid
441 == true)
442 ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
443 rule_req->filter_spec_list[i].route_table_index;
444 if (rule_req->filter_spec_list[i].is_mux_id_valid == true)
445 ipa_qmi_ctx->q6_ul_filter_rule[i].mux_id =
446 rule_req->filter_spec_list[i].mux_id;
447 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap =
448 rule_req->filter_spec_list[i].filter_rule.
449 rule_eq_bitmap;
450 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present =
451 rule_req->filter_spec_list[i].filter_rule.
452 tos_eq_present;
453 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq =
454 rule_req->filter_spec_list[i].filter_rule.tos_eq;
455 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
456 protocol_eq_present = rule_req->filter_spec_list[i].
457 filter_rule.protocol_eq_present;
458 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq =
459 rule_req->filter_spec_list[i].filter_rule.
460 protocol_eq;
461
462 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
463 num_ihl_offset_range_16 = rule_req->filter_spec_list[i].
464 filter_rule.num_ihl_offset_range_16;
465 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
466 num_ihl_offset_range_16; j++) {
467 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
468 ihl_offset_range_16[j].offset = rule_req->
469 filter_spec_list[i].filter_rule.
470 ihl_offset_range_16[j].offset;
471 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
472 ihl_offset_range_16[j].range_low = rule_req->
473 filter_spec_list[i].filter_rule.
474 ihl_offset_range_16[j].range_low;
475 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
476 ihl_offset_range_16[j].range_high = rule_req->
477 filter_spec_list[i].filter_rule.
478 ihl_offset_range_16[j].range_high;
479 }
480 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 =
481 rule_req->filter_spec_list[i].filter_rule.
482 num_offset_meq_32;
483 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
484 num_offset_meq_32; j++) {
485 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
486 offset_meq_32[j].offset = rule_req->filter_spec_list[i].
487 filter_rule.offset_meq_32[j].offset;
488 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
489 offset_meq_32[j].mask = rule_req->filter_spec_list[i].
490 filter_rule.offset_meq_32[j].mask;
491 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
492 offset_meq_32[j].value = rule_req->filter_spec_list[i].
493 filter_rule.offset_meq_32[j].value;
494 }
495
496 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present =
497 rule_req->filter_spec_list[i].filter_rule.tc_eq_present;
498 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq =
499 rule_req->filter_spec_list[i].filter_rule.tc_eq;
500 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present =
501 rule_req->filter_spec_list[i].filter_rule.
502 flow_eq_present;
503 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq =
504 rule_req->filter_spec_list[i].filter_rule.flow_eq;
505 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
506 ihl_offset_eq_16_present = rule_req->filter_spec_list[i].
507 filter_rule.ihl_offset_eq_16_present;
508 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
509 ihl_offset_eq_16.offset = rule_req->filter_spec_list[i].
510 filter_rule.ihl_offset_eq_16.offset;
511 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
512 ihl_offset_eq_16.value = rule_req->filter_spec_list[i].
513 filter_rule.ihl_offset_eq_16.value;
514
515 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
516 ihl_offset_eq_32_present = rule_req->filter_spec_list[i].
517 filter_rule.ihl_offset_eq_32_present;
518 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
519 ihl_offset_eq_32.offset = rule_req->filter_spec_list[i].
520 filter_rule.ihl_offset_eq_32.offset;
521 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
522 ihl_offset_eq_32.value = rule_req->filter_spec_list[i].
523 filter_rule.ihl_offset_eq_32.value;
524
525 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
526 num_ihl_offset_meq_32 = rule_req->filter_spec_list[i].
527 filter_rule.num_ihl_offset_meq_32;
528 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].
529 eq_attrib.num_ihl_offset_meq_32; j++) {
530 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
531 ihl_offset_meq_32[j].offset = rule_req->
532 filter_spec_list[i].filter_rule.
533 ihl_offset_meq_32[j].offset;
534 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
535 ihl_offset_meq_32[j].mask = rule_req->
536 filter_spec_list[i].filter_rule.
537 ihl_offset_meq_32[j].mask;
538 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
539 ihl_offset_meq_32[j].value = rule_req->
540 filter_spec_list[i].filter_rule.
541 ihl_offset_meq_32[j].value;
542 }
543 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 =
544 rule_req->filter_spec_list[i].filter_rule.
545 num_offset_meq_128;
546 for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
547 num_offset_meq_128; j++) {
548 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
549 offset_meq_128[j].offset = rule_req->
550 filter_spec_list[i].filter_rule.
551 offset_meq_128[j].offset;
552 memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
553 offset_meq_128[j].mask,
554 rule_req->filter_spec_list[i].
555 filter_rule.offset_meq_128[j].mask, 16);
556 memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
557 offset_meq_128[j].value, rule_req->
558 filter_spec_list[i].filter_rule.
559 offset_meq_128[j].value, 16);
560 }
561
562 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
563 metadata_meq32_present = rule_req->filter_spec_list[i].
564 filter_rule.metadata_meq32_present;
565 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
566 metadata_meq32.offset = rule_req->filter_spec_list[i].
567 filter_rule.metadata_meq32.offset;
568 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
569 metadata_meq32.mask = rule_req->filter_spec_list[i].
570 filter_rule.metadata_meq32.mask;
571 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.
572 value = rule_req->filter_spec_list[i].filter_rule.
573 metadata_meq32.value;
574 ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
575 ipv4_frag_eq_present = rule_req->filter_spec_list[i].
576 filter_rule.ipv4_frag_eq_present;
577 }
578
579 if (rule_req->xlat_filter_indices_list_valid) {
580 if (rule_req->xlat_filter_indices_list_len > num_q6_rule) {
581 IPAWANERR("Number of xlat indices is not valid: %d\n",
582 rule_req->xlat_filter_indices_list_len);
583 goto failure;
584 }
585 IPAWANDBG("Receive %d XLAT indices: ",
586 rule_req->xlat_filter_indices_list_len);
587 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++)
588 IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]);
589 IPAWANDBG("\n");
590
591 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
592 if (rule_req->xlat_filter_indices_list[i]
593 >= num_q6_rule) {
594 IPAWANERR("Xlat rule idx is wrong: %d\n",
595 rule_req->xlat_filter_indices_list[i]);
596 goto failure;
597 } else {
598 ipa_qmi_ctx->q6_ul_filter_rule
599 [rule_req->xlat_filter_indices_list[i]]
600 .is_xlat_rule = 1;
601 IPAWANDBG("Rule %d is xlat rule\n",
602 rule_req->xlat_filter_indices_list[i]);
603 }
604 }
605 }
606 goto success;
607
608failure:
609 num_q6_rule = 0;
610 memset(ipa_qmi_ctx->q6_ul_filter_rule, 0,
611 sizeof(ipa_qmi_ctx->q6_ul_filter_rule));
612 return -EINVAL;
613
614success:
615 return 0;
616}
617
618static int wwan_add_ul_flt_rule_to_ipa(void)
619{
620 u32 pyld_sz;
621 int i, retval = 0;
622 int num_v4_rule = 0, num_v6_rule = 0;
623 struct ipa_ioc_add_flt_rule *param;
624 struct ipa_flt_rule_add flt_rule_entry;
625 struct ipa_fltr_installed_notif_req_msg_v01 *req;
626
627 if (ipa_qmi_ctx == NULL) {
628 IPAWANERR("ipa_qmi_ctx is NULL!\n");
629 return -EFAULT;
630 }
631
632 pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) +
633 sizeof(struct ipa_flt_rule_add);
634 param = kzalloc(pyld_sz, GFP_KERNEL);
635 if (!param)
636 return -ENOMEM;
637
638 req = (struct ipa_fltr_installed_notif_req_msg_v01 *)
639 kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
640 GFP_KERNEL);
641 if (!req) {
642 kfree(param);
643 return -ENOMEM;
644 }
645
646 param->commit = 1;
647 param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD;
648 param->global = false;
649 param->num_rules = (uint8_t)1;
650
651 mutex_lock(&ipa_qmi_lock);
652 for (i = 0; i < num_q6_rule; i++) {
653 param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
654 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
655 flt_rule_entry.at_rear = true;
656 flt_rule_entry.rule.action =
657 ipa_qmi_ctx->q6_ul_filter_rule[i].action;
658 flt_rule_entry.rule.rt_tbl_idx
659 = ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx;
660 flt_rule_entry.rule.retain_hdr = true;
661
662 /* debug rt-hdl*/
663 IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n",
664 i, flt_rule_entry.rule.rt_tbl_idx);
665 flt_rule_entry.rule.eq_attrib_type = true;
666 memcpy(&(flt_rule_entry.rule.eq_attrib),
667 &ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib,
668 sizeof(struct ipa_ipfltri_rule_eq));
669 memcpy(&(param->rules[0]), &flt_rule_entry,
670 sizeof(struct ipa_flt_rule_add));
671 if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
672 retval = -EFAULT;
673 IPAWANERR("add A7 UL filter rule(%d) failed\n", i);
674 } else {
675 /* store the rule handler */
676 ipa_qmi_ctx->q6_ul_filter_rule_hdl[i] =
677 param->rules[0].flt_rule_hdl;
678 }
679 }
680 mutex_unlock(&ipa_qmi_lock);
681
682 /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
683 req->source_pipe_index =
684 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
685 req->install_status = QMI_RESULT_SUCCESS_V01;
686 req->filter_index_list_len = num_q6_rule;
687 mutex_lock(&ipa_qmi_lock);
688 for (i = 0; i < num_q6_rule; i++) {
689 if (ipa_qmi_ctx->q6_ul_filter_rule[i].ip == IPA_IP_v4) {
690 req->filter_index_list[i].filter_index = num_v4_rule;
691 num_v4_rule++;
692 } else {
693 req->filter_index_list[i].filter_index = num_v6_rule;
694 num_v6_rule++;
695 }
696 req->filter_index_list[i].filter_handle =
697 ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
698 }
699 mutex_unlock(&ipa_qmi_lock);
700 if (qmi_filter_notify_send(req)) {
701 IPAWANDBG("add filter rule index on A7-RX failed\n");
702 retval = -EFAULT;
703 }
704 old_num_q6_rule = num_q6_rule;
705 IPAWANDBG("add (%d) filter rule index on A7-RX\n",
706 old_num_q6_rule);
707 kfree(param);
708 kfree(req);
709 return retval;
710}
711
712static int wwan_del_ul_flt_rule_to_ipa(void)
713{
714 u32 pyld_sz;
715 int i, retval = 0;
716 struct ipa_ioc_del_flt_rule *param;
717 struct ipa_flt_rule_del flt_rule_entry;
718
719 pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) +
720 sizeof(struct ipa_flt_rule_del);
721 param = kzalloc(pyld_sz, GFP_KERNEL);
722 if (!param) {
723 IPAWANERR("kzalloc failed\n");
724 return -ENOMEM;
725 }
726
727 param->commit = 1;
728 param->num_hdls = (uint8_t) 1;
729
730 for (i = 0; i < old_num_q6_rule; i++) {
731 param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
732 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
733 flt_rule_entry.hdl = ipa_qmi_ctx->q6_ul_filter_rule_hdl[i];
734 /* debug rt-hdl*/
735 IPAWANDBG("delete-IPA rule index(%d)\n", i);
736 memcpy(&(param->hdl[0]), &flt_rule_entry,
737 sizeof(struct ipa_flt_rule_del));
738 if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
739 IPAWANERR("del A7 UL filter rule(%d) failed\n", i);
740 kfree(param);
741 return -EFAULT;
742 }
743 }
744
745 /* set UL filter-rule add-indication */
746 a7_ul_flt_set = false;
747 old_num_q6_rule = 0;
748
749 kfree(param);
750 return retval;
751}
752
753static int find_mux_channel_index(uint32_t mux_id)
754{
755 int i;
756
757 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
758 if (mux_id == mux_channel[i].mux_id)
759 return i;
760 }
761 return MAX_NUM_OF_MUX_CHANNEL;
762}
763
764static int find_vchannel_name_index(const char *vchannel_name)
765{
766 int i;
767
768 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
769 if (strcmp(mux_channel[i].vchannel_name, vchannel_name == 0))
770 return i;
771 }
772 return MAX_NUM_OF_MUX_CHANNEL;
773}
774
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +0530775static enum ipa_upstream_type find_upstream_type(const char *upstreamIface)
776{
777 int i;
778
779 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
780 if (strcmp(mux_channel[i].vchannel_name,
781 upstreamIface) == 0)
782 return IPA_UPSTEAM_MODEM;
783 }
784
785 if (strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0)
786 return IPA_UPSTEAM_WLAN;
787 else
788 return IPA_UPSTEAM_MAX;
789}
790
Amir Levy9659e592016-10-27 18:08:27 +0300791static int wwan_register_to_ipa(int index)
792{
793 struct ipa_tx_intf tx_properties = {0};
794 struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
795 struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
796 struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
797 struct ipa_rx_intf rx_properties = {0};
798 struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
799 struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
800 struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
801 struct ipa_ext_intf ext_properties = {0};
802 struct ipa_ioc_ext_intf_prop *ext_ioc_properties;
803 u32 pyld_sz;
804 int ret = 0, i;
805
806 IPAWANDBG("index(%d) device[%s]:\n", index,
807 mux_channel[index].vchannel_name);
808 if (!mux_channel[index].mux_hdr_set) {
809 ret = ipa_add_qmap_hdr(mux_channel[index].mux_id,
810 &mux_channel[index].hdr_hdl);
811 if (ret) {
812 IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
813 return ret;
814 }
815 mux_channel[index].mux_hdr_set = true;
816 }
817 tx_properties.prop = tx_ioc_properties;
818 tx_ipv4_property = &tx_properties.prop[0];
819 tx_ipv4_property->ip = IPA_IP_v4;
820 tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
821 snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
822 A2_MUX_HDR_NAME_V4_PREF,
823 mux_channel[index].mux_id);
824 tx_ipv6_property = &tx_properties.prop[1];
825 tx_ipv6_property->ip = IPA_IP_v6;
826 tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
827 /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
828 snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
829 A2_MUX_HDR_NAME_V4_PREF,
830 mux_channel[index].mux_id);
831 tx_properties.num_props = 2;
832
833 rx_properties.prop = rx_ioc_properties;
834 rx_ipv4_property = &rx_properties.prop[0];
835 rx_ipv4_property->ip = IPA_IP_v4;
836 rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
837 rx_ipv4_property->attrib.meta_data =
838 mux_channel[index].mux_id << WWAN_METADATA_SHFT;
839 rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
840 rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
841 rx_ipv6_property = &rx_properties.prop[1];
842 rx_ipv6_property->ip = IPA_IP_v6;
843 rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
844 rx_ipv6_property->attrib.meta_data =
845 mux_channel[index].mux_id << WWAN_METADATA_SHFT;
846 rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
847 rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
848 rx_properties.num_props = 2;
849
850 pyld_sz = num_q6_rule *
851 sizeof(struct ipa_ioc_ext_intf_prop);
852 ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
853 if (!ext_ioc_properties) {
854 IPAWANERR("Error allocate memory\n");
855 return -ENOMEM;
856 }
857
858 ext_properties.prop = ext_ioc_properties;
859 ext_properties.excp_pipe_valid = true;
860 ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
861 ext_properties.num_props = num_q6_rule;
862 for (i = 0; i < num_q6_rule; i++) {
863 memcpy(&(ext_properties.prop[i]),
864 &(ipa_qmi_ctx->q6_ul_filter_rule[i]),
865 sizeof(struct ipa_ioc_ext_intf_prop));
866 ext_properties.prop[i].mux_id = mux_channel[index].mux_id;
867 IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
868 ext_properties.prop[i].ip,
869 ext_properties.prop[i].rt_tbl_idx);
870 IPAWANDBG("action: %d mux:%d\n",
871 ext_properties.prop[i].action,
872 ext_properties.prop[i].mux_id);
873 }
874 ret = ipa2_register_intf_ext(mux_channel[index].
875 vchannel_name, &tx_properties,
876 &rx_properties, &ext_properties);
877 if (ret) {
878 IPAWANERR("[%s]:ipa2_register_intf failed %d\n",
879 mux_channel[index].vchannel_name, ret);
880 goto fail;
881 }
882 mux_channel[index].ul_flt_reg = true;
883fail:
884 kfree(ext_ioc_properties);
885 return ret;
886}
887
888static void ipa_cleanup_deregister_intf(void)
889{
890 int i;
891 int ret;
892
893 for (i = 0; i < rmnet_index; i++) {
894 if (mux_channel[i].ul_flt_reg) {
895 ret = ipa2_deregister_intf(
896 mux_channel[i].vchannel_name);
897 if (ret < 0) {
898 IPAWANERR("de-register device %s(%d) failed\n",
899 mux_channel[i].vchannel_name,
900 i);
901 return;
902 }
903 IPAWANDBG("de-register device %s(%d) success\n",
904 mux_channel[i].vchannel_name,
905 i);
906 }
907 mux_channel[i].ul_flt_reg = false;
908 }
909}
910
911int wwan_update_mux_channel_prop(void)
912{
913 int ret = 0, i;
914 /* install UL filter rules */
915 if (egress_set) {
916 if (ipa_qmi_ctx &&
917 ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
918 IPAWANDBG("setup UL filter rules\n");
919 if (a7_ul_flt_set) {
920 IPAWANDBG("del previous UL filter rules\n");
921 /* delete rule hdlers */
922 ret = wwan_del_ul_flt_rule_to_ipa();
923 if (ret) {
924 IPAWANERR("failed to del old rules\n");
925 return -EINVAL;
926 }
927 IPAWANDBG("deleted old UL rules\n");
928 }
929 ret = wwan_add_ul_flt_rule_to_ipa();
930 }
931 if (ret)
932 IPAWANERR("failed to install UL rules\n");
933 else
934 a7_ul_flt_set = true;
935 }
936 /* update Tx/Rx/Ext property */
937 IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
938 if (rmnet_index == 0) {
939 IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
940 return ret;
941 }
942
943 ipa_cleanup_deregister_intf();
944
945 for (i = 0; i < rmnet_index; i++) {
946 ret = wwan_register_to_ipa(i);
947 if (ret < 0) {
948 IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
949 mux_channel[i].vchannel_name,
950 mux_channel[i].mux_id,
951 i);
952 return -ENODEV;
953 }
954 IPAWANERR("dev(%s) has registered to IPA\n",
955 mux_channel[i].vchannel_name);
956 mux_channel[i].ul_flt_reg = true;
957 }
958 return ret;
959}
960
961#ifdef INIT_COMPLETION
962#define reinit_completion(x) INIT_COMPLETION(*(x))
963#endif /* INIT_COMPLETION */
964
965static int __ipa_wwan_open(struct net_device *dev)
966{
967 struct wwan_private *wwan_ptr = netdev_priv(dev);
968
969 IPAWANDBG("[%s] __wwan_open()\n", dev->name);
970 if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
971 reinit_completion(&wwan_ptr->resource_granted_completion);
972 wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
973
974 if (ipa_rmnet_res.ipa_napi_enable)
975 napi_enable(&(wwan_ptr->napi));
976 return 0;
977}
978
979/**
980 * wwan_open() - Opens the wwan network interface. Opens logical
981 * channel on A2 MUX driver and starts the network stack queue
982 *
983 * @dev: network device
984 *
985 * Return codes:
986 * 0: success
987 * -ENODEV: Error while opening logical channel on A2 MUX driver
988 */
989static int ipa_wwan_open(struct net_device *dev)
990{
991 int rc = 0;
992
993 IPAWANDBG("[%s] wwan_open()\n", dev->name);
994 rc = __ipa_wwan_open(dev);
995 if (rc == 0)
996 netif_start_queue(dev);
997 return rc;
998}
999
1000static int __ipa_wwan_close(struct net_device *dev)
1001{
1002 struct wwan_private *wwan_ptr = netdev_priv(dev);
1003 int rc = 0;
1004
1005 if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
1006 wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
1007 /* do not close wwan port once up, this causes
1008 * remote side to hang if tried to open again
1009 */
1010 reinit_completion(&wwan_ptr->resource_granted_completion);
1011 if (ipa_rmnet_res.ipa_napi_enable)
1012 napi_disable(&(wwan_ptr->napi));
1013 rc = ipa2_deregister_intf(dev->name);
1014 if (rc) {
1015 IPAWANERR("[%s]: ipa2_deregister_intf failed %d\n",
1016 dev->name, rc);
1017 return rc;
1018 }
1019 return rc;
1020 } else {
1021 return -EBADF;
1022 }
1023}
1024
1025/**
1026 * ipa_wwan_stop() - Stops the wwan network interface. Closes
1027 * logical channel on A2 MUX driver and stops the network stack
1028 * queue
1029 *
1030 * @dev: network device
1031 *
1032 * Return codes:
1033 * 0: success
1034 * -ENODEV: Error while opening logical channel on A2 MUX driver
1035 */
1036static int ipa_wwan_stop(struct net_device *dev)
1037{
1038 IPAWANDBG("[%s] ipa_wwan_stop()\n", dev->name);
1039 __ipa_wwan_close(dev);
1040 netif_stop_queue(dev);
1041 return 0;
1042}
1043
1044static int ipa_wwan_change_mtu(struct net_device *dev, int new_mtu)
1045{
1046 if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
1047 return -EINVAL;
1048 IPAWANDBG("[%s] MTU change: old=%d new=%d\n",
1049 dev->name, dev->mtu, new_mtu);
1050 dev->mtu = new_mtu;
1051 return 0;
1052}
1053
1054/**
1055 * ipa_wwan_xmit() - Transmits an skb.
1056 *
1057 * @skb: skb to be transmitted
1058 * @dev: network device
1059 *
1060 * Return codes:
1061 * 0: success
1062 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1063 * later
1064 * -EFAULT: Error while transmitting the skb
1065 */
1066static int ipa_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
1067{
1068 int ret = 0;
1069 bool qmap_check;
1070 struct wwan_private *wwan_ptr = netdev_priv(dev);
1071 struct ipa_tx_meta meta;
1072
1073 if (skb->protocol != htons(ETH_P_MAP)) {
1074 IPAWANDBG
1075 ("SW filtering out none QMAP packet received from %s",
1076 current->comm);
Sunil Paidimarri6c818e82016-10-17 18:33:13 -07001077 dev_kfree_skb_any(skb);
1078 dev->stats.tx_dropped++;
Amir Levy9659e592016-10-27 18:08:27 +03001079 return NETDEV_TX_OK;
1080 }
1081
1082 qmap_check = RMNET_MAP_GET_CD_BIT(skb);
1083 if (netif_queue_stopped(dev)) {
1084 if (qmap_check &&
1085 atomic_read(&wwan_ptr->outstanding_pkts) <
1086 wwan_ptr->outstanding_high_ctl) {
1087 pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
1088 goto send;
1089 } else {
1090 pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
1091 return NETDEV_TX_BUSY;
1092 }
1093 }
1094
1095 /* checking High WM hit */
1096 if (atomic_read(&wwan_ptr->outstanding_pkts) >=
1097 wwan_ptr->outstanding_high) {
1098 if (!qmap_check) {
1099 IPAWANDBG("pending(%d)/(%d)- stop(%d), qmap_chk(%d)\n",
1100 atomic_read(&wwan_ptr->outstanding_pkts),
1101 wwan_ptr->outstanding_high,
1102 netif_queue_stopped(dev),
1103 qmap_check);
1104 netif_stop_queue(dev);
1105 return NETDEV_TX_BUSY;
1106 }
1107 }
1108
1109send:
1110 /* IPA_RM checking start */
1111 ret = ipa_rm_inactivity_timer_request_resource(
1112 IPA_RM_RESOURCE_WWAN_0_PROD);
1113 if (ret == -EINPROGRESS) {
1114 netif_stop_queue(dev);
1115 return NETDEV_TX_BUSY;
1116 }
1117 if (ret) {
1118 pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
1119 dev->name, ret);
Sunil Paidimarri6c818e82016-10-17 18:33:13 -07001120 dev_kfree_skb_any(skb);
1121 dev->stats.tx_dropped++;
Amir Levy9659e592016-10-27 18:08:27 +03001122 return -EFAULT;
1123 }
1124 /* IPA_RM checking end */
1125
1126 if (qmap_check) {
1127 memset(&meta, 0, sizeof(meta));
1128 meta.pkt_init_dst_ep_valid = true;
1129 meta.pkt_init_dst_ep_remote = true;
1130 ret = ipa2_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta);
1131 } else {
1132 ret = ipa2_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL);
1133 }
1134
1135 if (ret) {
1136 ret = NETDEV_TX_BUSY;
Amir Levy9659e592016-10-27 18:08:27 +03001137 goto out;
1138 }
1139
1140 atomic_inc(&wwan_ptr->outstanding_pkts);
1141 dev->stats.tx_packets++;
1142 dev->stats.tx_bytes += skb->len;
1143 ret = NETDEV_TX_OK;
1144out:
1145 ipa_rm_inactivity_timer_release_resource(
1146 IPA_RM_RESOURCE_WWAN_0_PROD);
1147 return ret;
1148}
1149
1150static void ipa_wwan_tx_timeout(struct net_device *dev)
1151{
1152 IPAWANERR("[%s] ipa_wwan_tx_timeout(), data stall in UL\n", dev->name);
1153}
1154
1155/**
1156 * apps_ipa_tx_complete_notify() - Rx notify
1157 *
1158 * @priv: driver context
1159 * @evt: event type
1160 * @data: data provided with event
1161 *
1162 * Check that the packet is the one we sent and release it
1163 * This function will be called in defered context in IPA wq.
1164 */
1165static void apps_ipa_tx_complete_notify(void *priv,
1166 enum ipa_dp_evt_type evt,
1167 unsigned long data)
1168{
1169 struct sk_buff *skb = (struct sk_buff *)data;
1170 struct net_device *dev = (struct net_device *)priv;
1171 struct wwan_private *wwan_ptr;
1172
1173 if (dev != ipa_netdevs[0]) {
1174 IPAWANDBG("Received pre-SSR packet completion\n");
1175 dev_kfree_skb_any(skb);
1176 return;
1177 }
1178
1179 if (evt != IPA_WRITE_DONE) {
1180 IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
1181 dev_kfree_skb_any(skb);
1182 dev->stats.tx_dropped++;
1183 return;
1184 }
1185
1186 wwan_ptr = netdev_priv(dev);
1187 atomic_dec(&wwan_ptr->outstanding_pkts);
1188 __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
1189 if (!atomic_read(&is_ssr) &&
1190 netif_queue_stopped(wwan_ptr->net) &&
1191 atomic_read(&wwan_ptr->outstanding_pkts) <
1192 (wwan_ptr->outstanding_low)) {
1193 IPAWANDBG("Outstanding low (%d) - wake up queue\n",
1194 wwan_ptr->outstanding_low);
1195 netif_wake_queue(wwan_ptr->net);
1196 }
1197 __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
1198 dev_kfree_skb_any(skb);
1199 ipa_rm_inactivity_timer_release_resource(
1200 IPA_RM_RESOURCE_WWAN_0_PROD);
1201}
1202
1203/**
1204 * apps_ipa_packet_receive_notify() - Rx notify
1205 *
1206 * @priv: driver context
1207 * @evt: event type
1208 * @data: data provided with event
1209 *
1210 * IPA will pass a packet to the Linux network stack with skb->data
1211 */
1212static void apps_ipa_packet_receive_notify(void *priv,
1213 enum ipa_dp_evt_type evt,
1214 unsigned long data)
1215{
1216 struct net_device *dev = (struct net_device *)priv;
1217
1218 if (evt == IPA_RECEIVE) {
1219 struct sk_buff *skb = (struct sk_buff *)data;
1220 int result;
1221 unsigned int packet_len = skb->len;
1222
1223 IPAWANDBG("Rx packet was received");
1224 skb->dev = ipa_netdevs[0];
1225 skb->protocol = htons(ETH_P_MAP);
1226
1227 if (ipa_rmnet_res.ipa_napi_enable) {
1228 trace_rmnet_ipa_netif_rcv_skb(dev->stats.rx_packets);
1229 result = netif_receive_skb(skb);
1230 } else {
1231 if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
1232 == 0) {
1233 trace_rmnet_ipa_netifni(dev->stats.rx_packets);
1234 result = netif_rx_ni(skb);
1235 } else {
1236 trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
1237 result = netif_rx(skb);
1238 }
1239 }
1240
1241 if (result) {
1242 pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
1243 __func__, __LINE__);
1244 dev->stats.rx_dropped++;
1245 }
1246 dev->stats.rx_packets++;
1247 dev->stats.rx_bytes += packet_len;
1248 } else if (evt == IPA_CLIENT_START_POLL)
1249 ipa_rmnet_rx_cb(priv);
1250 else if (evt == IPA_CLIENT_COMP_NAPI) {
1251 struct wwan_private *wwan_ptr = netdev_priv(dev);
1252
1253 if (ipa_rmnet_res.ipa_napi_enable)
1254 napi_complete(&(wwan_ptr->napi));
1255 } else
1256 IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
1257
1258}
1259
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001260static int handle_ingress_format(struct net_device *dev,
1261 struct rmnet_ioctl_extended_s *in)
1262{
1263 int ret = 0;
1264 struct rmnet_phys_ep_conf_s *ep_cfg;
1265
1266 IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
1267 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
1268 ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.cs_offload_en =
1269 IPA_ENABLE_CS_OFFLOAD_DL;
1270
1271 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
1272 IPAWANERR("get AGG size %d count %d\n",
1273 in->u.ingress_format.agg_size,
1274 in->u.ingress_format.agg_count);
1275
1276 ret = ipa_disable_apps_wan_cons_deaggr(
1277 in->u.ingress_format.agg_size,
1278 in->u.ingress_format.agg_count);
1279
1280 if (!ret) {
1281 ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_byte_limit =
1282 in->u.ingress_format.agg_size;
1283 ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit =
1284 in->u.ingress_format.agg_count;
1285
1286 if (ipa_rmnet_res.ipa_napi_enable) {
1287 ipa_to_apps_ep_cfg.recycle_enabled = true;
1288 ep_cfg = (struct rmnet_phys_ep_conf_s *)
1289 rcu_dereference(dev->rx_handler_data);
1290 ep_cfg->recycle = ipa_recycle_wan_skb;
1291 pr_info("Wan Recycle Enabled\n");
1292 }
1293 }
1294 }
1295
1296 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
1297 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
1298 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
1299 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
1300 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
1301
1302 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
1303 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
1304 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding =
1305 true;
1306 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
1307 ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
1308 ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
1309
1310 ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
1311 ipa_to_apps_ep_cfg.notify = apps_ipa_packet_receive_notify;
1312 ipa_to_apps_ep_cfg.priv = dev;
1313
1314 ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001315 ipa_to_apps_ep_cfg.desc_fifo_sz =
1316 ipa_rmnet_res.wan_rx_desc_size * sizeof(struct sps_iovec);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001317
1318 mutex_lock(&ipa_to_apps_pipe_handle_guard);
1319 if (atomic_read(&is_ssr)) {
1320 IPAWANDBG("In SSR sequence/recovery\n");
1321 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
1322 return -EFAULT;
1323 }
1324 ret = ipa2_setup_sys_pipe(&ipa_to_apps_ep_cfg, &ipa_to_apps_hdl);
1325 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
1326
1327 if (ret)
1328 IPAWANERR("failed to configure ingress\n");
1329
1330 return ret;
1331}
1332
Amir Levy9659e592016-10-27 18:08:27 +03001333/**
1334 * ipa_wwan_ioctl() - I/O control for wwan network driver.
1335 *
1336 * @dev: network device
1337 * @ifr: ignored
1338 * @cmd: cmd to be excecuded. can be one of the following:
1339 * IPA_WWAN_IOCTL_OPEN - Open the network interface
1340 * IPA_WWAN_IOCTL_CLOSE - Close the network interface
1341 *
1342 * Return codes:
1343 * 0: success
1344 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1345 * later
1346 * -EFAULT: Error while transmitting the skb
1347 */
1348static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1349{
1350 int rc = 0;
1351 int mru = 1000, epid = 1, mux_index, len;
1352 struct ipa_msg_meta msg_meta;
1353 struct ipa_wan_msg *wan_msg = NULL;
1354 struct rmnet_ioctl_extended_s extend_ioctl_data;
1355 struct rmnet_ioctl_data_s ioctl_data;
1356
1357 IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
1358 switch (cmd) {
1359 /* Set Ethernet protocol */
1360 case RMNET_IOCTL_SET_LLP_ETHERNET:
1361 break;
1362 /* Set RAWIP protocol */
1363 case RMNET_IOCTL_SET_LLP_IP:
1364 break;
1365 /* Get link protocol */
1366 case RMNET_IOCTL_GET_LLP:
1367 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1368 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1369 sizeof(struct rmnet_ioctl_data_s)))
1370 rc = -EFAULT;
1371 break;
1372 /* Set QoS header enabled */
1373 case RMNET_IOCTL_SET_QOS_ENABLE:
1374 return -EINVAL;
1375 /* Set QoS header disabled */
1376 case RMNET_IOCTL_SET_QOS_DISABLE:
1377 break;
1378 /* Get QoS header state */
1379 case RMNET_IOCTL_GET_QOS:
1380 ioctl_data.u.operation_mode = RMNET_MODE_NONE;
1381 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1382 sizeof(struct rmnet_ioctl_data_s)))
1383 rc = -EFAULT;
1384 break;
1385 /* Get operation mode */
1386 case RMNET_IOCTL_GET_OPMODE:
1387 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1388 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1389 sizeof(struct rmnet_ioctl_data_s)))
1390 rc = -EFAULT;
1391 break;
1392 /* Open transport port */
1393 case RMNET_IOCTL_OPEN:
1394 break;
1395 /* Close transport port */
1396 case RMNET_IOCTL_CLOSE:
1397 break;
1398 /* Flow enable */
1399 case RMNET_IOCTL_FLOW_ENABLE:
1400 IPAWANDBG("Received flow enable\n");
1401 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1402 sizeof(struct rmnet_ioctl_data_s))) {
1403 rc = -EFAULT;
1404 break;
1405 }
1406 ipa_flow_control(IPA_CLIENT_USB_PROD, true,
1407 ioctl_data.u.tcm_handle);
1408 break;
1409 /* Flow disable */
1410 case RMNET_IOCTL_FLOW_DISABLE:
1411 IPAWANDBG("Received flow disable\n");
1412 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1413 sizeof(struct rmnet_ioctl_data_s))) {
1414 rc = -EFAULT;
1415 break;
1416 }
1417 ipa_flow_control(IPA_CLIENT_USB_PROD, false,
1418 ioctl_data.u.tcm_handle);
1419 break;
1420 /* Set flow handle */
1421 case RMNET_IOCTL_FLOW_SET_HNDL:
1422 break;
1423
1424 /* Extended IOCTLs */
1425 case RMNET_IOCTL_EXTENDED:
1426 IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
1427 if (copy_from_user(&extend_ioctl_data,
1428 (u8 *)ifr->ifr_ifru.ifru_data,
1429 sizeof(struct rmnet_ioctl_extended_s))) {
1430 IPAWANERR("failed to copy extended ioctl data\n");
1431 rc = -EFAULT;
1432 break;
1433 }
1434 switch (extend_ioctl_data.extended_ioctl) {
1435 /* Get features */
1436 case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
1437 IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n");
1438 extend_ioctl_data.u.data =
1439 (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL |
1440 RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT |
1441 RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT);
1442 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1443 &extend_ioctl_data,
1444 sizeof(struct rmnet_ioctl_extended_s)))
1445 rc = -EFAULT;
1446 break;
1447 /* Set MRU */
1448 case RMNET_IOCTL_SET_MRU:
1449 mru = extend_ioctl_data.u.data;
1450 IPAWANDBG("get MRU size %d\n",
1451 extend_ioctl_data.u.data);
1452 break;
1453 /* Get MRU */
1454 case RMNET_IOCTL_GET_MRU:
1455 extend_ioctl_data.u.data = mru;
1456 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1457 &extend_ioctl_data,
1458 sizeof(struct rmnet_ioctl_extended_s)))
1459 rc = -EFAULT;
1460 break;
1461 /* GET SG support */
1462 case RMNET_IOCTL_GET_SG_SUPPORT:
1463 extend_ioctl_data.u.data =
1464 ipa_rmnet_res.ipa_advertise_sg_support;
1465 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1466 &extend_ioctl_data,
1467 sizeof(struct rmnet_ioctl_extended_s)))
1468 rc = -EFAULT;
1469 break;
1470 /* Get endpoint ID */
1471 case RMNET_IOCTL_GET_EPID:
1472 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
1473 extend_ioctl_data.u.data = epid;
1474 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1475 &extend_ioctl_data,
1476 sizeof(struct rmnet_ioctl_extended_s)))
1477 rc = -EFAULT;
1478 if (copy_from_user(&extend_ioctl_data,
1479 (u8 *)ifr->ifr_ifru.ifru_data,
1480 sizeof(struct rmnet_ioctl_extended_s))) {
1481 IPAWANERR("copy extended ioctl data failed\n");
1482 rc = -EFAULT;
1483 break;
1484 }
1485 IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n",
1486 extend_ioctl_data.u.data);
1487 break;
1488 /* Endpoint pair */
1489 case RMNET_IOCTL_GET_EP_PAIR:
1490 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n");
1491 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
1492 ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
1493 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
1494 ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
1495 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1496 &extend_ioctl_data,
1497 sizeof(struct rmnet_ioctl_extended_s)))
1498 rc = -EFAULT;
1499 if (copy_from_user(&extend_ioctl_data,
1500 (u8 *)ifr->ifr_ifru.ifru_data,
1501 sizeof(struct rmnet_ioctl_extended_s))) {
1502 IPAWANERR("copy extended ioctl data failed\n");
1503 rc = -EFAULT;
1504 break;
1505 }
1506 IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n",
1507 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
1508 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
1509 break;
1510 /* Get driver name */
1511 case RMNET_IOCTL_GET_DRIVER_NAME:
1512 memcpy(&extend_ioctl_data.u.if_name,
1513 ipa_netdevs[0]->name,
1514 sizeof(IFNAMSIZ));
1515 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1516 &extend_ioctl_data,
1517 sizeof(struct rmnet_ioctl_extended_s)))
1518 rc = -EFAULT;
1519 break;
1520 /* Add MUX ID */
1521 case RMNET_IOCTL_ADD_MUX_CHANNEL:
1522 mux_index = find_mux_channel_index(
1523 extend_ioctl_data.u.rmnet_mux_val.mux_id);
1524 if (mux_index < MAX_NUM_OF_MUX_CHANNEL) {
1525 IPAWANDBG("already setup mux(%d)\n",
1526 extend_ioctl_data.u.
1527 rmnet_mux_val.mux_id);
1528 return rc;
1529 }
1530 if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
1531 IPAWANERR("Exceed mux_channel limit(%d)\n",
1532 rmnet_index);
1533 return -EFAULT;
1534 }
1535 IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
1536 extend_ioctl_data.u.rmnet_mux_val.mux_id,
1537 extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
1538 /* cache the mux name and id */
1539 mux_channel[rmnet_index].mux_id =
1540 extend_ioctl_data.u.rmnet_mux_val.mux_id;
1541 memcpy(mux_channel[rmnet_index].vchannel_name,
1542 extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
1543 sizeof(mux_channel[rmnet_index].vchannel_name));
1544 IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
1545 mux_channel[rmnet_index].vchannel_name,
1546 mux_channel[rmnet_index].mux_id,
1547 rmnet_index);
1548 /* check if UL filter rules coming*/
1549 if (num_q6_rule != 0) {
1550 IPAWANERR("dev(%s) register to IPA\n",
1551 extend_ioctl_data.u.rmnet_mux_val.
1552 vchannel_name);
1553 rc = wwan_register_to_ipa(rmnet_index);
1554 if (rc < 0) {
1555 IPAWANERR("device %s reg IPA failed\n",
1556 extend_ioctl_data.u.
1557 rmnet_mux_val.vchannel_name);
1558 return -ENODEV;
1559 }
1560 mux_channel[rmnet_index].mux_channel_set = true;
1561 mux_channel[rmnet_index].ul_flt_reg = true;
1562 } else {
1563 IPAWANDBG("dev(%s) haven't registered to IPA\n",
1564 extend_ioctl_data.u.
1565 rmnet_mux_val.vchannel_name);
1566 mux_channel[rmnet_index].mux_channel_set = true;
1567 mux_channel[rmnet_index].ul_flt_reg = false;
1568 }
1569 rmnet_index++;
1570 break;
1571 case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
1572 IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
1573 if ((extend_ioctl_data.u.data) &
1574 RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
1575 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 8;
1576 apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
1577 cs_offload_en =
1578 IPA_ENABLE_CS_OFFLOAD_UL;
1579 apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
1580 cs_metadata_hdr_offset = 1;
1581 } else {
1582 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
1583 }
1584 if ((extend_ioctl_data.u.data) &
1585 RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
1586 apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
1587 IPA_ENABLE_AGGR;
1588 else
1589 apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
1590 IPA_BYPASS_AGGR;
1591 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
1592 hdr_ofst_metadata_valid = 1;
1593 /* modem want offset at 0! */
1594 apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
1595 apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.dst =
1596 IPA_CLIENT_APPS_LAN_WAN_PROD;
1597 apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.mode = IPA_BASIC;
1598
1599 apps_to_ipa_ep_cfg.client =
1600 IPA_CLIENT_APPS_LAN_WAN_PROD;
1601 apps_to_ipa_ep_cfg.notify =
1602 apps_ipa_tx_complete_notify;
1603 apps_to_ipa_ep_cfg.desc_fifo_sz =
1604 IPA_SYS_TX_DATA_DESC_FIFO_SZ;
1605 apps_to_ipa_ep_cfg.priv = dev;
1606
1607 rc = ipa2_setup_sys_pipe(&apps_to_ipa_ep_cfg,
1608 &apps_to_ipa_hdl);
1609 if (rc)
1610 IPAWANERR("failed to config egress endpoint\n");
1611
1612 if (num_q6_rule != 0) {
1613 /* already got Q6 UL filter rules*/
1614 if (ipa_qmi_ctx &&
1615 ipa_qmi_ctx->modem_cfg_emb_pipe_flt
1616 == false)
1617 rc = wwan_add_ul_flt_rule_to_ipa();
1618 else
1619 rc = 0;
1620 egress_set = true;
1621 if (rc)
1622 IPAWANERR("install UL rules failed\n");
1623 else
1624 a7_ul_flt_set = true;
1625 } else {
1626 /* wait Q6 UL filter rules*/
1627 egress_set = true;
1628 IPAWANDBG("no UL-rules, egress_set(%d)\n",
1629 egress_set);
1630 }
1631 break;
1632 case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001633 rc = handle_ingress_format(dev, &extend_ioctl_data);
Amir Levy9659e592016-10-27 18:08:27 +03001634 break;
1635 case RMNET_IOCTL_SET_XLAT_DEV_INFO:
1636 wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
1637 GFP_KERNEL);
1638 if (!wan_msg) {
1639 IPAWANERR("Failed to allocate memory.\n");
1640 return -ENOMEM;
1641 }
1642 len = sizeof(wan_msg->upstream_ifname) >
1643 sizeof(extend_ioctl_data.u.if_name) ?
1644 sizeof(extend_ioctl_data.u.if_name) :
1645 sizeof(wan_msg->upstream_ifname);
1646 strlcpy(wan_msg->upstream_ifname,
1647 extend_ioctl_data.u.if_name, len);
1648 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
1649 msg_meta.msg_type = WAN_XLAT_CONNECT;
1650 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
1651 rc = ipa2_send_msg(&msg_meta, wan_msg,
1652 ipa_wwan_msg_free_cb);
1653 if (rc) {
1654 IPAWANERR("Failed to send XLAT_CONNECT msg\n");
1655 kfree(wan_msg);
1656 }
1657 break;
1658 /* Get agg count */
1659 case RMNET_IOCTL_GET_AGGREGATION_COUNT:
1660 break;
1661 /* Set agg count */
1662 case RMNET_IOCTL_SET_AGGREGATION_COUNT:
1663 break;
1664 /* Get agg size */
1665 case RMNET_IOCTL_GET_AGGREGATION_SIZE:
1666 break;
1667 /* Set agg size */
1668 case RMNET_IOCTL_SET_AGGREGATION_SIZE:
1669 break;
1670 /* Do flow control */
1671 case RMNET_IOCTL_FLOW_CONTROL:
1672 break;
1673 /* For legacy use */
1674 case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL:
1675 break;
1676 /* Get HW/SW map */
1677 case RMNET_IOCTL_GET_HWSW_MAP:
1678 break;
1679 /* Set RX Headroom */
1680 case RMNET_IOCTL_SET_RX_HEADROOM:
1681 break;
1682 default:
1683 IPAWANERR("[%s] unsupported extended cmd[%d]",
1684 dev->name,
1685 extend_ioctl_data.extended_ioctl);
1686 rc = -EINVAL;
1687 }
1688 break;
1689 default:
1690 IPAWANERR("[%s] unsupported cmd[%d]",
1691 dev->name, cmd);
1692 rc = -EINVAL;
1693 }
1694 return rc;
1695}
1696
1697static const struct net_device_ops ipa_wwan_ops_ip = {
1698 .ndo_open = ipa_wwan_open,
1699 .ndo_stop = ipa_wwan_stop,
1700 .ndo_start_xmit = ipa_wwan_xmit,
1701 .ndo_tx_timeout = ipa_wwan_tx_timeout,
1702 .ndo_do_ioctl = ipa_wwan_ioctl,
1703 .ndo_change_mtu = ipa_wwan_change_mtu,
1704 .ndo_set_mac_address = 0,
1705 .ndo_validate_addr = 0,
1706};
1707
1708/**
1709 * wwan_setup() - Setups the wwan network driver.
1710 *
1711 * @dev: network device
1712 *
1713 * Return codes:
1714 * None
1715 */
1716
1717static void ipa_wwan_setup(struct net_device *dev)
1718{
1719 dev->netdev_ops = &ipa_wwan_ops_ip;
1720 ether_setup(dev);
1721 /* set this after calling ether_setup */
1722 dev->header_ops = 0; /* No header */
1723 dev->type = ARPHRD_RAWIP;
1724 dev->hard_header_len = 0;
1725 dev->mtu = WWAN_DATA_LEN;
1726 dev->addr_len = 0;
1727 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
1728 dev->needed_headroom = HEADROOM_FOR_QMAP;
1729 dev->needed_tailroom = TAILROOM;
1730 dev->watchdog_timeo = 1000;
1731}
1732
1733/* IPA_RM related functions start*/
1734static void q6_prod_rm_request_resource(struct work_struct *work);
1735static DECLARE_DELAYED_WORK(q6_con_rm_request, q6_prod_rm_request_resource);
1736static void q6_prod_rm_release_resource(struct work_struct *work);
1737static DECLARE_DELAYED_WORK(q6_con_rm_release, q6_prod_rm_release_resource);
1738
1739static void q6_prod_rm_request_resource(struct work_struct *work)
1740{
1741 int ret = 0;
1742
1743 ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
1744 if (ret < 0 && ret != -EINPROGRESS) {
1745 IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
1746 ret);
1747 return;
1748 }
1749}
1750
1751static int q6_rm_request_resource(void)
1752{
1753 queue_delayed_work(ipa_rm_q6_workqueue,
1754 &q6_con_rm_request, 0);
1755 return 0;
1756}
1757
1758static void q6_prod_rm_release_resource(struct work_struct *work)
1759{
1760 int ret = 0;
1761
1762 ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
1763 if (ret < 0 && ret != -EINPROGRESS) {
1764 IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
1765 ret);
1766 return;
1767 }
1768}
1769
1770
1771static int q6_rm_release_resource(void)
1772{
1773 queue_delayed_work(ipa_rm_q6_workqueue,
1774 &q6_con_rm_release, 0);
1775 return 0;
1776}
1777
1778
1779static void q6_rm_notify_cb(void *user_data,
1780 enum ipa_rm_event event,
1781 unsigned long data)
1782{
1783 switch (event) {
1784 case IPA_RM_RESOURCE_GRANTED:
1785 IPAWANDBG("%s: Q6_PROD GRANTED CB\n", __func__);
1786 break;
1787 case IPA_RM_RESOURCE_RELEASED:
1788 IPAWANDBG("%s: Q6_PROD RELEASED CB\n", __func__);
1789 break;
1790 default:
1791 return;
1792 }
1793}
1794static int q6_initialize_rm(void)
1795{
1796 struct ipa_rm_create_params create_params;
1797 struct ipa_rm_perf_profile profile;
1798 int result;
1799
1800 /* Initialize IPA_RM workqueue */
1801 ipa_rm_q6_workqueue = create_singlethread_workqueue("clnt_req");
1802 if (!ipa_rm_q6_workqueue)
1803 return -ENOMEM;
1804
1805 memset(&create_params, 0, sizeof(create_params));
1806 create_params.name = IPA_RM_RESOURCE_Q6_PROD;
1807 create_params.reg_params.notify_cb = &q6_rm_notify_cb;
1808 result = ipa_rm_create_resource(&create_params);
1809 if (result)
1810 goto create_rsrc_err1;
1811 memset(&create_params, 0, sizeof(create_params));
1812 create_params.name = IPA_RM_RESOURCE_Q6_CONS;
1813 create_params.release_resource = &q6_rm_release_resource;
1814 create_params.request_resource = &q6_rm_request_resource;
1815 result = ipa_rm_create_resource(&create_params);
1816 if (result)
1817 goto create_rsrc_err2;
1818 /* add dependency*/
1819 result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
1820 IPA_RM_RESOURCE_APPS_CONS);
1821 if (result)
1822 goto add_dpnd_err;
1823 /* setup Performance profile */
1824 memset(&profile, 0, sizeof(profile));
1825 profile.max_supported_bandwidth_mbps = 100;
1826 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
1827 &profile);
1828 if (result)
1829 goto set_perf_err;
1830 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
1831 &profile);
1832 if (result)
1833 goto set_perf_err;
1834 return result;
1835
1836set_perf_err:
1837 ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1838 IPA_RM_RESOURCE_APPS_CONS);
1839add_dpnd_err:
1840 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1841 if (result < 0)
1842 IPAWANERR("Error deleting resource %d, ret=%d\n",
1843 IPA_RM_RESOURCE_Q6_CONS, result);
1844create_rsrc_err2:
1845 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1846 if (result < 0)
1847 IPAWANERR("Error deleting resource %d, ret=%d\n",
1848 IPA_RM_RESOURCE_Q6_PROD, result);
1849create_rsrc_err1:
1850 destroy_workqueue(ipa_rm_q6_workqueue);
1851 return result;
1852}
1853
1854void q6_deinitialize_rm(void)
1855{
1856 int ret;
1857
1858 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1859 IPA_RM_RESOURCE_APPS_CONS);
1860 if (ret < 0)
1861 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
1862 IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
1863 ret);
1864 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1865 if (ret < 0)
1866 IPAWANERR("Error deleting resource %d, ret=%d\n",
1867 IPA_RM_RESOURCE_Q6_CONS, ret);
1868 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1869 if (ret < 0)
1870 IPAWANERR("Error deleting resource %d, ret=%d\n",
1871 IPA_RM_RESOURCE_Q6_PROD, ret);
1872 destroy_workqueue(ipa_rm_q6_workqueue);
1873}
1874
1875static void wake_tx_queue(struct work_struct *work)
1876{
1877 if (ipa_netdevs[0]) {
1878 __netif_tx_lock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
1879 netif_wake_queue(ipa_netdevs[0]);
1880 __netif_tx_unlock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
1881 }
1882}
1883
1884/**
1885 * ipa_rm_resource_granted() - Called upon
1886 * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
1887 *
1888 * @work: work object supplied ny workqueue
1889 *
1890 * Return codes:
1891 * None
1892 */
1893static void ipa_rm_resource_granted(void *dev)
1894{
1895 IPAWANDBG("Resource Granted - starting queue\n");
1896 schedule_work(&ipa_tx_wakequeue_work);
1897}
1898
1899/**
1900 * ipa_rm_notify() - Callback function for RM events. Handles
1901 * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
1902 * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
1903 * workqueue.
1904 *
1905 * @dev: network device
1906 * @event: IPA RM event
1907 * @data: Additional data provided by IPA RM
1908 *
1909 * Return codes:
1910 * None
1911 */
1912static void ipa_rm_notify(void *dev, enum ipa_rm_event event,
1913 unsigned long data)
1914{
1915 struct wwan_private *wwan_ptr = netdev_priv(dev);
1916
1917 pr_debug("%s: event %d\n", __func__, event);
1918 switch (event) {
1919 case IPA_RM_RESOURCE_GRANTED:
1920 if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
1921 complete_all(&wwan_ptr->resource_granted_completion);
1922 break;
1923 }
1924 ipa_rm_resource_granted(dev);
1925 break;
1926 case IPA_RM_RESOURCE_RELEASED:
1927 break;
1928 default:
1929 pr_err("%s: unknown event %d\n", __func__, event);
1930 break;
1931 }
1932}
1933
1934/* IPA_RM related functions end*/
1935
1936static int ssr_notifier_cb(struct notifier_block *this,
1937 unsigned long code,
1938 void *data);
1939
1940static struct notifier_block ssr_notifier = {
1941 .notifier_call = ssr_notifier_cb,
1942};
1943
1944static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
1945 struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
1946{
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001947 int result;
1948
1949 ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
Amir Levy9659e592016-10-27 18:08:27 +03001950 ipa_rmnet_drv_res->ipa_rmnet_ssr =
1951 of_property_read_bool(pdev->dev.of_node,
1952 "qcom,rmnet-ipa-ssr");
1953 pr_info("IPA SSR support = %s\n",
1954 ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False");
1955 ipa_rmnet_drv_res->ipa_loaduC =
1956 of_property_read_bool(pdev->dev.of_node,
1957 "qcom,ipa-loaduC");
1958 pr_info("IPA ipa-loaduC = %s\n",
1959 ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
1960
1961 ipa_rmnet_drv_res->ipa_advertise_sg_support =
1962 of_property_read_bool(pdev->dev.of_node,
1963 "qcom,ipa-advertise-sg-support");
1964 pr_info("IPA SG support = %s\n",
1965 ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
1966
1967 ipa_rmnet_drv_res->ipa_napi_enable =
1968 of_property_read_bool(pdev->dev.of_node,
1969 "qcom,ipa-napi-enable");
1970 pr_info("IPA Napi Enable = %s\n",
1971 ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
Sunil Paidimarri226cf032016-10-14 13:33:08 -07001972
1973 /* Get IPA WAN RX desc fifo size */
1974 result = of_property_read_u32(pdev->dev.of_node,
1975 "qcom,wan-rx-desc-size",
1976 &ipa_rmnet_drv_res->wan_rx_desc_size);
1977 if (result)
1978 pr_info("using default for wan-rx-desc-size = %u\n",
1979 ipa_rmnet_drv_res->wan_rx_desc_size);
1980 else
1981 IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
1982 ipa_rmnet_drv_res->wan_rx_desc_size);
1983
Amir Levy9659e592016-10-27 18:08:27 +03001984 return 0;
1985}
1986
1987struct ipa_rmnet_context ipa_rmnet_ctx;
1988
1989/**
1990 * ipa_wwan_probe() - Initialized the module and registers as a
1991 * network interface to the network stack
1992 *
1993 * Return codes:
1994 * 0: success
1995 * -ENOMEM: No memory available
1996 * -EFAULT: Internal error
1997 * -ENODEV: IPA driver not loaded
1998 */
1999static int ipa_wwan_probe(struct platform_device *pdev)
2000{
2001 int ret, i;
2002 struct net_device *dev;
2003 struct wwan_private *wwan_ptr;
2004 struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */
2005 struct ipa_rm_perf_profile profile; /* IPA_RM */
2006
2007 pr_info("rmnet_ipa started initialization\n");
2008
2009 if (!ipa2_is_ready()) {
2010 IPAWANERR("IPA driver not loaded\n");
2011 return -ENODEV;
2012 }
2013
2014 ret = get_ipa_rmnet_dts_configuration(pdev, &ipa_rmnet_res);
2015 ipa_rmnet_ctx.ipa_rmnet_ssr = ipa_rmnet_res.ipa_rmnet_ssr;
2016
2017 ret = ipa_init_q6_smem();
2018 if (ret) {
2019 IPAWANERR("ipa_init_q6_smem failed!\n");
2020 return ret;
2021 }
2022
2023 /* initialize tx/rx enpoint setup */
2024 memset(&apps_to_ipa_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
2025 memset(&ipa_to_apps_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
2026
2027 /* initialize ex property setup */
2028 num_q6_rule = 0;
2029 old_num_q6_rule = 0;
2030 rmnet_index = 0;
2031 egress_set = false;
2032 a7_ul_flt_set = false;
2033 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
2034 memset(&mux_channel[i], 0, sizeof(struct rmnet_mux_val));
2035
2036 /* start A7 QMI service/client */
2037 if (ipa_rmnet_res.ipa_loaduC)
2038 /* Android platform loads uC */
2039 ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
2040 else
2041 /* LE platform not loads uC */
2042 ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
2043
2044 /* construct default WAN RT tbl for IPACM */
2045 ret = ipa_setup_a7_qmap_hdr();
2046 if (ret)
2047 goto setup_a7_qmap_hdr_err;
2048 ret = ipa_setup_dflt_wan_rt_tables();
2049 if (ret)
2050 goto setup_dflt_wan_rt_tables_err;
2051
2052 if (!atomic_read(&is_ssr)) {
2053 /* Start transport-driver fd ioctl for ipacm for first init */
2054 ret = wan_ioctl_init();
2055 if (ret)
2056 goto wan_ioctl_init_err;
2057 } else {
2058 /* Enable sending QMI messages after SSR */
2059 wan_ioctl_enable_qmi_messages();
2060 }
2061
2062 /* initialize wan-driver netdev */
2063 dev = alloc_netdev(sizeof(struct wwan_private),
2064 IPA_WWAN_DEV_NAME,
2065 NET_NAME_UNKNOWN,
2066 ipa_wwan_setup);
2067 if (!dev) {
2068 IPAWANERR("no memory for netdev\n");
2069 ret = -ENOMEM;
2070 goto alloc_netdev_err;
2071 }
2072 ipa_netdevs[0] = dev;
2073 wwan_ptr = netdev_priv(dev);
2074 memset(wwan_ptr, 0, sizeof(*wwan_ptr));
2075 IPAWANDBG("wwan_ptr (private) = %p", wwan_ptr);
2076 wwan_ptr->net = dev;
2077 wwan_ptr->outstanding_high_ctl = DEFAULT_OUTSTANDING_HIGH_CTL;
2078 wwan_ptr->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
2079 wwan_ptr->outstanding_low = DEFAULT_OUTSTANDING_LOW;
2080 atomic_set(&wwan_ptr->outstanding_pkts, 0);
2081 spin_lock_init(&wwan_ptr->lock);
2082 init_completion(&wwan_ptr->resource_granted_completion);
2083
2084 if (!atomic_read(&is_ssr)) {
2085 /* IPA_RM configuration starts */
2086 ret = q6_initialize_rm();
2087 if (ret) {
2088 IPAWANERR("%s: q6_initialize_rm failed, ret: %d\n",
2089 __func__, ret);
2090 goto q6_init_err;
2091 }
2092 }
2093
2094 memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
2095 ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
2096 ipa_rm_params.reg_params.user_data = dev;
2097 ipa_rm_params.reg_params.notify_cb = ipa_rm_notify;
2098 ret = ipa_rm_create_resource(&ipa_rm_params);
2099 if (ret) {
2100 pr_err("%s: unable to create resourse %d in IPA RM\n",
2101 __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
2102 goto create_rsrc_err;
2103 }
2104 ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
2105 IPA_RM_INACTIVITY_TIMER);
2106 if (ret) {
2107 pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
2108 __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
2109 goto timer_init_err;
2110 }
2111 /* add dependency */
2112 ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2113 IPA_RM_RESOURCE_Q6_CONS);
2114 if (ret)
2115 goto add_dpnd_err;
2116 /* setup Performance profile */
2117 memset(&profile, 0, sizeof(profile));
2118 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
2119 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
2120 &profile);
2121 if (ret)
2122 goto set_perf_err;
2123 /* IPA_RM configuration ends */
2124
2125 /* Enable SG support in netdevice. */
2126 if (ipa_rmnet_res.ipa_advertise_sg_support)
2127 dev->hw_features |= NETIF_F_SG;
2128
2129 /* Enable NAPI support in netdevice. */
2130 if (ipa_rmnet_res.ipa_napi_enable) {
2131 netif_napi_add(dev, &(wwan_ptr->napi),
2132 ipa_rmnet_poll, NAPI_WEIGHT);
2133 }
2134
2135 ret = register_netdev(dev);
2136 if (ret) {
2137 IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
2138 0, ret);
2139 goto set_perf_err;
2140 }
2141
2142 IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n",
2143 ipa_netdevs[0]->name);
2144 if (ret) {
2145 IPAWANERR("default configuration failed rc=%d\n",
2146 ret);
2147 goto config_err;
2148 }
2149 atomic_set(&is_initialized, 1);
2150 if (!atomic_read(&is_ssr)) {
2151 /* offline charging mode */
2152 ipa2_proxy_clk_unvote();
2153 }
2154 atomic_set(&is_ssr, 0);
2155
2156 pr_info("rmnet_ipa completed initialization\n");
2157 return 0;
2158config_err:
2159 if (ipa_rmnet_res.ipa_napi_enable)
2160 netif_napi_del(&(wwan_ptr->napi));
2161 unregister_netdev(ipa_netdevs[0]);
2162set_perf_err:
2163 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2164 IPA_RM_RESOURCE_Q6_CONS);
2165 if (ret)
2166 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2167 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2168 ret);
2169add_dpnd_err:
2170 ret = ipa_rm_inactivity_timer_destroy(
2171 IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
2172 if (ret)
2173 IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
2174 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2175timer_init_err:
2176 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2177 if (ret)
2178 IPAWANERR("Error deleting resource %d, ret=%d\n",
2179 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2180create_rsrc_err:
2181 q6_deinitialize_rm();
2182q6_init_err:
2183 free_netdev(ipa_netdevs[0]);
2184 ipa_netdevs[0] = NULL;
2185alloc_netdev_err:
2186 wan_ioctl_deinit();
2187wan_ioctl_init_err:
2188 ipa_del_dflt_wan_rt_tables();
2189setup_dflt_wan_rt_tables_err:
2190 ipa_del_a7_qmap_hdr();
2191setup_a7_qmap_hdr_err:
2192 ipa_qmi_service_exit();
2193 atomic_set(&is_ssr, 0);
2194 return ret;
2195}
2196
2197static int ipa_wwan_remove(struct platform_device *pdev)
2198{
2199 int ret;
2200 struct wwan_private *wwan_ptr;
2201
2202 wwan_ptr = netdev_priv(ipa_netdevs[0]);
2203
2204 pr_info("rmnet_ipa started deinitialization\n");
2205 mutex_lock(&ipa_to_apps_pipe_handle_guard);
2206 ret = ipa2_teardown_sys_pipe(ipa_to_apps_hdl);
2207 if (ret < 0)
2208 IPAWANERR("Failed to teardown IPA->APPS pipe\n");
2209 else
2210 ipa_to_apps_hdl = -1;
2211 if (ipa_rmnet_res.ipa_napi_enable)
2212 netif_napi_del(&(wwan_ptr->napi));
2213 mutex_unlock(&ipa_to_apps_pipe_handle_guard);
2214 unregister_netdev(ipa_netdevs[0]);
2215 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2216 IPA_RM_RESOURCE_Q6_CONS);
2217 if (ret < 0)
2218 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2219 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2220 ret);
2221 ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
2222 if (ret < 0)
2223 IPAWANERR(
2224 "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
2225 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2226 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2227 if (ret < 0)
2228 IPAWANERR("Error deleting resource %d, ret=%d\n",
2229 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2230 cancel_work_sync(&ipa_tx_wakequeue_work);
2231 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2232 free_netdev(ipa_netdevs[0]);
2233 ipa_netdevs[0] = NULL;
2234 /* No need to remove wwan_ioctl during SSR */
2235 if (!atomic_read(&is_ssr))
2236 wan_ioctl_deinit();
2237 ipa_del_dflt_wan_rt_tables();
2238 ipa_del_a7_qmap_hdr();
2239 ipa_del_mux_qmap_hdrs();
2240 if (ipa_qmi_ctx && ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false)
2241 wwan_del_ul_flt_rule_to_ipa();
2242 ipa_cleanup_deregister_intf();
2243 atomic_set(&is_initialized, 0);
2244 pr_info("rmnet_ipa completed deinitialization\n");
2245 return 0;
2246}
2247
2248/**
2249* rmnet_ipa_ap_suspend() - suspend callback for runtime_pm
2250* @dev: pointer to device
2251*
2252* This callback will be invoked by the runtime_pm framework when an AP suspend
2253* operation is invoked, usually by pressing a suspend button.
2254*
2255* Returns -EAGAIN to runtime_pm framework in case there are pending packets
2256* in the Tx queue. This will postpone the suspend operation until all the
2257* pending packets will be transmitted.
2258*
2259* In case there are no packets to send, releases the WWAN0_PROD entity.
2260* As an outcome, the number of IPA active clients should be decremented
2261* until IPA clocks can be gated.
2262*/
2263static int rmnet_ipa_ap_suspend(struct device *dev)
2264{
2265 struct net_device *netdev = ipa_netdevs[0];
2266 struct wwan_private *wwan_ptr = netdev_priv(netdev);
2267
2268 IPAWANDBG("Enter...\n");
2269 /* Do not allow A7 to suspend in case there are oustanding packets */
2270 if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
2271 IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
2272 return -EAGAIN;
2273 }
2274
2275 /* Make sure that there is no Tx operation ongoing */
2276 netif_tx_lock_bh(netdev);
2277 ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2278 netif_tx_unlock_bh(netdev);
2279 IPAWANDBG("Exit\n");
2280
2281 return 0;
2282}
2283
2284/**
2285* rmnet_ipa_ap_resume() - resume callback for runtime_pm
2286* @dev: pointer to device
2287*
2288* This callback will be invoked by the runtime_pm framework when an AP resume
2289* operation is invoked.
2290*
2291* Enables the network interface queue and returns success to the
2292* runtime_pm framework.
2293*/
2294static int rmnet_ipa_ap_resume(struct device *dev)
2295{
2296 struct net_device *netdev = ipa_netdevs[0];
2297
2298 IPAWANDBG("Enter...\n");
2299 netif_wake_queue(netdev);
2300 IPAWANDBG("Exit\n");
2301
2302 return 0;
2303}
2304
2305static void ipa_stop_polling_stats(void)
2306{
2307 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2308 ipa_rmnet_ctx.polling_interval = 0;
2309}
2310
2311static const struct of_device_id rmnet_ipa_dt_match[] = {
2312 {.compatible = "qcom,rmnet-ipa"},
2313 {},
2314};
2315MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
2316
2317static const struct dev_pm_ops rmnet_ipa_pm_ops = {
2318 .suspend_noirq = rmnet_ipa_ap_suspend,
2319 .resume_noirq = rmnet_ipa_ap_resume,
2320};
2321
2322static struct platform_driver rmnet_ipa_driver = {
2323 .driver = {
2324 .name = "rmnet_ipa",
2325 .owner = THIS_MODULE,
2326 .pm = &rmnet_ipa_pm_ops,
2327 .of_match_table = rmnet_ipa_dt_match,
2328 },
2329 .probe = ipa_wwan_probe,
2330 .remove = ipa_wwan_remove,
2331};
2332
2333static int ssr_notifier_cb(struct notifier_block *this,
2334 unsigned long code,
2335 void *data)
2336{
2337 if (ipa_rmnet_ctx.ipa_rmnet_ssr) {
2338 if (code == SUBSYS_BEFORE_SHUTDOWN) {
2339 pr_info("IPA received MPSS BEFORE_SHUTDOWN\n");
2340 atomic_set(&is_ssr, 1);
2341 ipa_q6_pre_shutdown_cleanup();
2342 if (ipa_netdevs[0])
2343 netif_stop_queue(ipa_netdevs[0]);
2344 ipa_qmi_stop_workqueues();
2345 wan_ioctl_stop_qmi_messages();
2346 ipa_stop_polling_stats();
2347 if (atomic_read(&is_initialized))
2348 platform_driver_unregister(&rmnet_ipa_driver);
2349 pr_info("IPA BEFORE_SHUTDOWN handling is complete\n");
2350 return NOTIFY_DONE;
2351 }
2352 if (code == SUBSYS_AFTER_SHUTDOWN) {
2353 pr_info("IPA received MPSS AFTER_SHUTDOWN\n");
2354 if (atomic_read(&is_ssr))
2355 ipa_q6_post_shutdown_cleanup();
2356 pr_info("IPA AFTER_SHUTDOWN handling is complete\n");
2357 return NOTIFY_DONE;
2358 }
2359 if (code == SUBSYS_AFTER_POWERUP) {
2360 pr_info("IPA received MPSS AFTER_POWERUP\n");
2361 if (!atomic_read(&is_initialized)
2362 && atomic_read(&is_ssr))
2363 platform_driver_register(&rmnet_ipa_driver);
2364 pr_info("IPA AFTER_POWERUP handling is complete\n");
2365 return NOTIFY_DONE;
2366 }
2367 if (code == SUBSYS_BEFORE_POWERUP) {
2368 pr_info("IPA received MPSS BEFORE_POWERUP\n");
2369 if (atomic_read(&is_ssr))
2370 /* clean up cached QMI msg/handlers */
2371 ipa_qmi_service_exit();
2372 ipa2_proxy_clk_vote();
2373 pr_info("IPA BEFORE_POWERUP handling is complete\n");
2374 return NOTIFY_DONE;
2375 }
2376 }
2377 return NOTIFY_DONE;
2378}
2379
2380/**
2381 * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa2_send_msg
2382 * @buff: pointer to buffer containing the message
2383 * @len: message len
2384 * @type: message type
2385 *
2386 * This function is invoked when ipa2_send_msg is complete (Provided as a
2387 * free function pointer along with the message).
2388 */
2389static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
2390{
2391 if (!buff) {
2392 IPAWANERR("Null buffer\n");
2393 return;
2394 }
2395
2396 if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
2397 type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
2398 IPAWANERR("Wrong type given. buff %p type %d\n",
2399 buff, type);
2400 }
2401 kfree(buff);
2402}
2403
2404/**
2405 * rmnet_ipa_get_stats_and_update(bool reset) - Gets pipe stats from Modem
2406 *
2407 * This function queries the IPA Modem driver for the pipe stats
2408 * via QMI, and updates the user space IPA entity.
2409 */
2410static void rmnet_ipa_get_stats_and_update(bool reset)
2411{
2412 struct ipa_get_data_stats_req_msg_v01 req;
2413 struct ipa_get_data_stats_resp_msg_v01 *resp;
2414 struct ipa_msg_meta msg_meta;
2415 int rc;
2416
2417 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2418 GFP_KERNEL);
2419 if (!resp) {
2420 IPAWANERR("Can't allocate memory for stats message\n");
2421 return;
2422 }
2423
2424 memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2425 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2426
2427 req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2428 if (reset == true) {
2429 req.reset_stats_valid = true;
2430 req.reset_stats = true;
2431 IPAWANERR("Get the latest pipe-stats and reset it\n");
2432 }
2433
2434 rc = ipa_qmi_get_data_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002435 if (rc) {
2436 IPAWANERR("ipa_qmi_get_data_stats failed: %d\n", rc);
2437 kfree(resp);
2438 return;
2439 }
Amir Levy9659e592016-10-27 18:08:27 +03002440
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002441 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2442 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
2443 msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01);
2444 rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2445 if (rc) {
2446 IPAWANERR("ipa2_send_msg failed: %d\n", rc);
2447 kfree(resp);
2448 return;
Amir Levy9659e592016-10-27 18:08:27 +03002449 }
2450}
2451
2452/**
2453 * tethering_stats_poll_queue() - Stats polling function
2454 * @work - Work entry
2455 *
2456 * This function is scheduled periodically (per the interval) in
2457 * order to poll the IPA Modem driver for the pipe stats.
2458 */
2459static void tethering_stats_poll_queue(struct work_struct *work)
2460{
2461 rmnet_ipa_get_stats_and_update(false);
2462
2463 /* Schedule again only if there's an active polling interval */
2464 if (ipa_rmnet_ctx.polling_interval != 0)
2465 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work,
2466 msecs_to_jiffies(ipa_rmnet_ctx.polling_interval*1000));
2467}
2468
2469/**
2470 * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem
2471 *
2472 * This function retrieves the data usage (used quota) from the IPA Modem driver
2473 * via QMI, and updates IPA user space entity.
2474 */
2475static void rmnet_ipa_get_network_stats_and_update(void)
2476{
2477 struct ipa_get_apn_data_stats_req_msg_v01 req;
2478 struct ipa_get_apn_data_stats_resp_msg_v01 *resp;
2479 struct ipa_msg_meta msg_meta;
2480 int rc;
2481
2482 resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
2483 GFP_KERNEL);
2484 if (!resp) {
2485 IPAWANERR("Can't allocate memory for network stats message\n");
2486 return;
2487 }
2488
2489 memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01));
2490 memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01));
2491
2492 req.mux_id_list_valid = true;
2493 req.mux_id_list_len = 1;
2494 req.mux_id_list[0] = ipa_rmnet_ctx.metered_mux_id;
2495
2496 rc = ipa_qmi_get_network_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002497 if (rc) {
2498 IPAWANERR("ipa_qmi_get_network_stats failed %d\n", rc);
2499 kfree(resp);
2500 return;
2501 }
Amir Levy9659e592016-10-27 18:08:27 +03002502
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002503 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2504 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
2505 msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
2506 rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2507 if (rc) {
2508 IPAWANERR("ipa2_send_msg failed: %d\n", rc);
2509 kfree(resp);
2510 return;
Amir Levy9659e592016-10-27 18:08:27 +03002511 }
2512}
2513
2514/**
2515 * rmnet_ipa_poll_tethering_stats() - Tethering stats polling IOCTL handler
2516 * @data - IOCTL data
2517 *
2518 * This function handles WAN_IOC_POLL_TETHERING_STATS.
2519 * In case polling interval received is 0, polling will stop
2520 * (If there's a polling in progress, it will allow it to finish), and then will
2521 * fetch network stats, and update the IPA user space.
2522 *
2523 * Return codes:
2524 * 0: Success
2525 */
2526int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
2527{
2528 ipa_rmnet_ctx.polling_interval = data->polling_interval_secs;
2529
2530 cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work);
2531
2532 if (ipa_rmnet_ctx.polling_interval == 0) {
2533 ipa_qmi_stop_data_qouta();
2534 rmnet_ipa_get_network_stats_and_update();
2535 rmnet_ipa_get_stats_and_update(true);
2536 return 0;
2537 }
2538
2539 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0);
2540 return 0;
2541}
2542
2543/**
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302544 * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler
Amir Levy9659e592016-10-27 18:08:27 +03002545 * @data - IOCTL data
2546 *
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302547 * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface.
Amir Levy9659e592016-10-27 18:08:27 +03002548 * It translates the given interface name to the Modem MUX ID and
2549 * sends the request of the quota to the IPA Modem driver via QMI.
2550 *
2551 * Return codes:
2552 * 0: Success
2553 * -EFAULT: Invalid interface name provided
2554 * other: See ipa_qmi_set_data_quota
2555 */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302556static int rmnet_ipa_set_data_quota_modem(struct wan_ioctl_set_data_quota *data)
Amir Levy9659e592016-10-27 18:08:27 +03002557{
2558 u32 mux_id;
2559 int index;
2560 struct ipa_set_data_usage_quota_req_msg_v01 req;
2561
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302562 /* stop quota */
2563 if (!data->set_quota)
2564 ipa_qmi_stop_data_qouta();
2565
Amir Levy9659e592016-10-27 18:08:27 +03002566 index = find_vchannel_name_index(data->interface_name);
2567 IPAWANERR("iface name %s, quota %lu\n",
2568 data->interface_name,
2569 (unsigned long int) data->quota_mbytes);
2570
2571 if (index == MAX_NUM_OF_MUX_CHANNEL) {
2572 IPAWANERR("%s is an invalid iface name\n",
2573 data->interface_name);
2574 return -EFAULT;
2575 }
2576
2577 mux_id = mux_channel[index].mux_id;
2578
2579 ipa_rmnet_ctx.metered_mux_id = mux_id;
2580
2581 memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
2582 req.apn_quota_list_valid = true;
2583 req.apn_quota_list_len = 1;
2584 req.apn_quota_list[0].mux_id = mux_id;
2585 req.apn_quota_list[0].num_Mbytes = data->quota_mbytes;
2586
2587 return ipa_qmi_set_data_quota(&req);
2588}
2589
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302590static int rmnet_ipa_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
2591{
2592 struct ipa_set_wifi_quota wifi_quota;
2593 int rc = 0;
2594
2595 memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota));
2596 wifi_quota.set_quota = data->set_quota;
2597 wifi_quota.quota_bytes = data->quota_mbytes;
2598 IPAWANDBG("iface name %s, quota %lu\n",
2599 data->interface_name,
2600 (unsigned long int) data->quota_mbytes);
2601
2602 rc = ipa2_set_wlan_quota(&wifi_quota);
2603 /* check if wlan-fw takes this quota-set */
2604 if (!wifi_quota.set_valid)
2605 rc = -EFAULT;
2606 return rc;
2607}
2608
2609/**
2610 * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
2611 * @data - IOCTL data
2612 *
2613 * This function handles WAN_IOC_SET_DATA_QUOTA.
2614 * It translates the given interface name to the Modem MUX ID and
2615 * sends the request of the quota to the IPA Modem driver via QMI.
2616 *
2617 * Return codes:
2618 * 0: Success
2619 * -EFAULT: Invalid interface name provided
2620 * other: See ipa_qmi_set_data_quota
2621 */
2622int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data)
2623{
2624 enum ipa_upstream_type upstream_type;
2625 int rc = 0;
2626
2627 /* get IPA backhaul type */
2628 upstream_type = find_upstream_type(data->interface_name);
2629
2630 if (upstream_type == IPA_UPSTEAM_MAX) {
2631 IPAWANERR("upstream iface %s not supported\n",
2632 data->interface_name);
2633 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2634 rc = rmnet_ipa_set_data_quota_wifi(data);
2635 if (rc) {
2636 IPAWANERR("set quota on wifi failed\n");
2637 return rc;
2638 }
2639 } else {
2640 rc = rmnet_ipa_set_data_quota_modem(data);
2641 if (rc) {
2642 IPAWANERR("set quota on modem failed\n");
2643 return rc;
2644 }
2645 }
2646 return rc;
2647}
2648
Amir Levy9659e592016-10-27 18:08:27 +03002649 /* rmnet_ipa_set_tether_client_pipe() -
2650 * @data - IOCTL data
2651 *
2652 * This function handles WAN_IOC_SET_DATA_QUOTA.
2653 * It translates the given interface name to the Modem MUX ID and
2654 * sends the request of the quota to the IPA Modem driver via QMI.
2655 *
2656 * Return codes:
2657 * 0: Success
Skylar Chang345c8142016-11-30 14:41:24 -08002658 * -EFAULT: Invalid src/dst pipes provided
Amir Levy9659e592016-10-27 18:08:27 +03002659 * other: See ipa_qmi_set_data_quota
2660 */
2661int rmnet_ipa_set_tether_client_pipe(
2662 struct wan_ioctl_set_tether_client_pipe *data)
2663{
2664 int number, i;
2665
Skylar Chang345c8142016-11-30 14:41:24 -08002666 /* error checking if ul_src_pipe_len valid or not*/
2667 if (data->ul_src_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
2668 data->ul_src_pipe_len < 0) {
2669 IPAWANERR("UL src pipes %d exceeding max %d\n",
2670 data->ul_src_pipe_len,
2671 QMI_IPA_MAX_PIPES_V01);
2672 return -EFAULT;
2673 }
2674 /* error checking if dl_dst_pipe_len valid or not*/
2675 if (data->dl_dst_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
2676 data->dl_dst_pipe_len < 0) {
2677 IPAWANERR("DL dst pipes %d exceeding max %d\n",
2678 data->dl_dst_pipe_len,
2679 QMI_IPA_MAX_PIPES_V01);
2680 return -EFAULT;
2681 }
2682
Amir Levy9659e592016-10-27 18:08:27 +03002683 IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
2684 data->ipa_client,
2685 data->ul_src_pipe_len,
2686 data->dl_dst_pipe_len,
2687 data->reset_client);
2688 number = data->ul_src_pipe_len;
2689 for (i = 0; i < number; i++) {
2690 IPAWANDBG("UL index-%d pipe %d\n", i,
2691 data->ul_src_pipe_list[i]);
2692 if (data->reset_client)
2693 ipa_set_client(data->ul_src_pipe_list[i],
2694 0, false);
2695 else
2696 ipa_set_client(data->ul_src_pipe_list[i],
2697 data->ipa_client, true);
2698 }
2699 number = data->dl_dst_pipe_len;
2700 for (i = 0; i < number; i++) {
2701 IPAWANDBG("DL index-%d pipe %d\n", i,
2702 data->dl_dst_pipe_list[i]);
2703 if (data->reset_client)
2704 ipa_set_client(data->dl_dst_pipe_list[i],
2705 0, false);
2706 else
2707 ipa_set_client(data->dl_dst_pipe_list[i],
2708 data->ipa_client, false);
2709 }
2710 return 0;
2711}
2712
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302713static int rmnet_ipa_query_tethering_stats_wifi(
2714 struct wan_ioctl_query_tether_stats *data, bool reset)
2715{
2716 struct ipa_get_wdi_sap_stats *sap_stats;
2717 int rc;
2718
2719 sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats),
2720 GFP_KERNEL);
2721 if (!sap_stats)
2722 return -ENOMEM;
2723
2724 sap_stats->reset_stats = reset;
2725 IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats);
2726
2727 rc = ipa2_get_wlan_stats(sap_stats);
2728 if (rc) {
2729 kfree(sap_stats);
2730 return rc;
2731 } else if (reset) {
2732 kfree(sap_stats);
2733 return 0;
2734 }
2735
2736 if (sap_stats->stats_valid) {
2737 data->ipv4_tx_packets = sap_stats->ipv4_tx_packets;
2738 data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes;
2739 data->ipv4_rx_packets = sap_stats->ipv4_rx_packets;
2740 data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes;
2741 data->ipv6_tx_packets = sap_stats->ipv6_tx_packets;
2742 data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes;
2743 data->ipv6_rx_packets = sap_stats->ipv6_rx_packets;
2744 data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes;
2745 }
2746
2747 IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2748 (unsigned long int) data->ipv4_rx_packets,
2749 (unsigned long int) data->ipv6_rx_packets,
2750 (unsigned long int) data->ipv4_rx_bytes,
2751 (unsigned long int) data->ipv6_rx_bytes);
2752 IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2753 (unsigned long int) data->ipv4_tx_packets,
2754 (unsigned long int) data->ipv6_tx_packets,
2755 (unsigned long int) data->ipv4_tx_bytes,
2756 (unsigned long int) data->ipv6_tx_bytes);
2757
2758 kfree(sap_stats);
2759 return rc;
2760}
2761
2762int rmnet_ipa_query_tethering_stats_modem(
2763 struct wan_ioctl_query_tether_stats *data,
2764 bool reset
2765)
Amir Levy9659e592016-10-27 18:08:27 +03002766{
2767 struct ipa_get_data_stats_req_msg_v01 *req;
2768 struct ipa_get_data_stats_resp_msg_v01 *resp;
2769 int pipe_len, rc;
2770
2771 req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
2772 GFP_KERNEL);
2773 if (!req) {
2774 IPAWANERR("failed to allocate memory for stats message\n");
2775 return -ENOMEM;
2776 }
2777 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2778 GFP_KERNEL);
2779 if (!resp) {
2780 IPAWANERR("failed to allocate memory for stats message\n");
2781 kfree(req);
2782 return -ENOMEM;
2783 }
2784 memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2785 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2786
2787 req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2788 if (reset) {
2789 req->reset_stats_valid = true;
2790 req->reset_stats = true;
2791 IPAWANERR("reset the pipe stats\n");
2792 } else {
2793 /* print tethered-client enum */
2794 IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
2795 }
2796
2797 rc = ipa_qmi_get_data_stats(req, resp);
2798 if (rc) {
2799 IPAWANERR("can't get ipa_qmi_get_data_stats\n");
2800 kfree(req);
2801 kfree(resp);
2802 return rc;
2803 } else if (reset) {
2804 kfree(req);
2805 kfree(resp);
2806 return 0;
2807 }
2808
2809 if (resp->dl_dst_pipe_stats_list_valid) {
2810 for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len;
2811 pipe_len++) {
2812 IPAWANDBG("Check entry(%d) dl_dst_pipe(%d)\n",
2813 pipe_len, resp->dl_dst_pipe_stats_list
2814 [pipe_len].pipe_index);
2815 IPAWANDBG("dl_p_v4(%lu)v6(%lu) dl_b_v4(%lu)v6(%lu)\n",
2816 (unsigned long int) resp->
2817 dl_dst_pipe_stats_list[pipe_len].
2818 num_ipv4_packets,
2819 (unsigned long int) resp->
2820 dl_dst_pipe_stats_list[pipe_len].
2821 num_ipv6_packets,
2822 (unsigned long int) resp->
2823 dl_dst_pipe_stats_list[pipe_len].
2824 num_ipv4_bytes,
2825 (unsigned long int) resp->
2826 dl_dst_pipe_stats_list[pipe_len].
2827 num_ipv6_bytes);
2828 if (ipa_get_client_uplink(resp->
2829 dl_dst_pipe_stats_list[pipe_len].
2830 pipe_index) == false) {
2831 if (data->ipa_client == ipa_get_client(resp->
2832 dl_dst_pipe_stats_list[pipe_len].
2833 pipe_index)) {
2834 /* update the DL stats */
2835 data->ipv4_rx_packets += resp->
2836 dl_dst_pipe_stats_list[pipe_len].
2837 num_ipv4_packets;
2838 data->ipv6_rx_packets += resp->
2839 dl_dst_pipe_stats_list[pipe_len].
2840 num_ipv6_packets;
2841 data->ipv4_rx_bytes += resp->
2842 dl_dst_pipe_stats_list[pipe_len].
2843 num_ipv4_bytes;
2844 data->ipv6_rx_bytes += resp->
2845 dl_dst_pipe_stats_list[pipe_len].
2846 num_ipv6_bytes;
2847 }
2848 }
2849 }
2850 }
2851 IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2852 (unsigned long int) data->ipv4_rx_packets,
2853 (unsigned long int) data->ipv6_rx_packets,
2854 (unsigned long int) data->ipv4_rx_bytes,
2855 (unsigned long int) data->ipv6_rx_bytes);
2856
2857 if (resp->ul_src_pipe_stats_list_valid) {
2858 for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len;
2859 pipe_len++) {
2860 IPAWANDBG("Check entry(%d) ul_dst_pipe(%d)\n",
2861 pipe_len,
2862 resp->ul_src_pipe_stats_list[pipe_len].
2863 pipe_index);
2864 IPAWANDBG("ul_p_v4(%lu)v6(%lu)ul_b_v4(%lu)v6(%lu)\n",
2865 (unsigned long int) resp->
2866 ul_src_pipe_stats_list[pipe_len].
2867 num_ipv4_packets,
2868 (unsigned long int) resp->
2869 ul_src_pipe_stats_list[pipe_len].
2870 num_ipv6_packets,
2871 (unsigned long int) resp->
2872 ul_src_pipe_stats_list[pipe_len].
2873 num_ipv4_bytes,
2874 (unsigned long int) resp->
2875 ul_src_pipe_stats_list[pipe_len].
2876 num_ipv6_bytes);
2877 if (ipa_get_client_uplink(resp->
2878 ul_src_pipe_stats_list[pipe_len].
2879 pipe_index) == true) {
2880 if (data->ipa_client == ipa_get_client(resp->
2881 ul_src_pipe_stats_list[pipe_len].
2882 pipe_index)) {
2883 /* update the DL stats */
2884 data->ipv4_tx_packets += resp->
2885 ul_src_pipe_stats_list[pipe_len].
2886 num_ipv4_packets;
2887 data->ipv6_tx_packets += resp->
2888 ul_src_pipe_stats_list[pipe_len].
2889 num_ipv6_packets;
2890 data->ipv4_tx_bytes += resp->
2891 ul_src_pipe_stats_list[pipe_len].
2892 num_ipv4_bytes;
2893 data->ipv6_tx_bytes += resp->
2894 ul_src_pipe_stats_list[pipe_len].
2895 num_ipv6_bytes;
2896 }
2897 }
2898 }
2899 }
2900 IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2901 (unsigned long int) data->ipv4_tx_packets,
2902 (unsigned long int) data->ipv6_tx_packets,
2903 (unsigned long int) data->ipv4_tx_bytes,
2904 (unsigned long int) data->ipv6_tx_bytes);
2905 kfree(req);
2906 kfree(resp);
2907 return 0;
2908}
2909
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302910int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
2911 bool reset)
2912{
2913 enum ipa_upstream_type upstream_type;
2914 int rc = 0;
2915
2916 /* get IPA backhaul type */
2917 upstream_type = find_upstream_type(data->upstreamIface);
2918
2919 if (upstream_type == IPA_UPSTEAM_MAX) {
2920 IPAWANERR("upstreamIface %s not supported\n",
2921 data->upstreamIface);
2922 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2923 IPAWANDBG_LOW(" query wifi-backhaul stats\n");
2924 rc = rmnet_ipa_query_tethering_stats_wifi(
2925 data, false);
2926 if (rc) {
2927 IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
2928 return rc;
2929 }
2930 } else {
2931 IPAWANDBG_LOW(" query modem-backhaul stats\n");
2932 rc = rmnet_ipa_query_tethering_stats_modem(
2933 data, false);
2934 if (rc) {
2935 IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
2936 return rc;
2937 }
2938 }
2939 return rc;
2940}
2941
2942int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
2943{
2944 enum ipa_upstream_type upstream_type;
2945 int rc = 0;
2946
2947 /* get IPA backhaul type */
2948 upstream_type = find_upstream_type(data->upstreamIface);
2949
2950 if (upstream_type == IPA_UPSTEAM_MAX) {
2951 IPAWANERR("upstream iface %s not supported\n",
2952 data->upstreamIface);
2953 } else if (upstream_type == IPA_UPSTEAM_WLAN) {
2954 IPAWANDBG(" reset wifi-backhaul stats\n");
2955 rc = rmnet_ipa_query_tethering_stats_wifi(
2956 NULL, true);
2957 if (rc) {
2958 IPAWANERR("reset WLAN stats failed\n");
2959 return rc;
2960 }
2961 } else {
2962 IPAWANDBG(" reset modem-backhaul stats\n");
2963 rc = rmnet_ipa_query_tethering_stats_modem(
2964 NULL, true);
2965 if (rc) {
2966 IPAWANERR("reset MODEM stats failed\n");
2967 return rc;
2968 }
2969 }
2970 return rc;
2971}
2972
2973
Amir Levy9659e592016-10-27 18:08:27 +03002974/**
2975 * ipa_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
2976 * @mux_id - The MUX ID on which the quota has been reached
2977 *
2978 * This function broadcasts a Netlink event using the kobject of the
2979 * rmnet_ipa interface in order to alert the user space that the quota
2980 * on the specific interface which matches the mux_id has been reached.
2981 *
2982 */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302983void ipa_broadcast_quota_reach_ind(u32 mux_id,
2984 enum ipa_upstream_type upstream_type)
Amir Levy9659e592016-10-27 18:08:27 +03002985{
2986 char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
2987 char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
2988 char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
2989 char *envp[IPA_UEVENT_NUM_EVNP] = {
2990 alert_msg, iface_name_l, iface_name_m, NULL };
2991 int res;
2992 int index;
2993
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302994 /* check upstream_type*/
2995 if (upstream_type == IPA_UPSTEAM_MAX) {
2996 IPAWANERR("upstreamIface type %d not supported\n",
2997 upstream_type);
Amir Levy9659e592016-10-27 18:08:27 +03002998 return;
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05302999 } else if (upstream_type == IPA_UPSTEAM_MODEM) {
3000 index = find_mux_channel_index(mux_id);
3001 if (index == MAX_NUM_OF_MUX_CHANNEL) {
3002 IPAWANERR("%u is an mux ID\n", mux_id);
3003 return;
3004 }
Amir Levy9659e592016-10-27 18:08:27 +03003005 }
3006
3007 res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
3008 "ALERT_NAME=%s", "quotaReachedAlert");
3009 if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
3010 IPAWANERR("message too long (%d)", res);
3011 return;
3012 }
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303013
Amir Levy9659e592016-10-27 18:08:27 +03003014 /* posting msg for L-release for CNE */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303015 if (upstream_type == IPA_UPSTEAM_MODEM) {
Amir Levy9659e592016-10-27 18:08:27 +03003016 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303017 "UPSTREAM=%s", mux_channel[index].vchannel_name);
3018 } else {
3019 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
3020 "UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
3021 }
Amir Levy9659e592016-10-27 18:08:27 +03003022 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
3023 IPAWANERR("message too long (%d)", res);
3024 return;
3025 }
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303026
Amir Levy9659e592016-10-27 18:08:27 +03003027 /* posting msg for M-release for CNE */
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303028 if (upstream_type == IPA_UPSTEAM_MODEM) {
Amir Levy9659e592016-10-27 18:08:27 +03003029 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
Utkarsh Saxenab89ce3a2017-02-16 22:29:56 +05303030 "INTERFACE=%s", mux_channel[index].vchannel_name);
3031 } else {
3032 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
3033 "INTERFACE=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
3034 }
Amir Levy9659e592016-10-27 18:08:27 +03003035 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
3036 IPAWANERR("message too long (%d)", res);
3037 return;
3038 }
3039
3040 IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
3041 alert_msg, iface_name_l, iface_name_m);
3042 kobject_uevent_env(&(ipa_netdevs[0]->dev.kobj), KOBJ_CHANGE, envp);
3043}
3044
3045/**
3046 * ipa_q6_handshake_complete() - Perform operations once Q6 is up
3047 * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR.
3048 *
3049 * This function is invoked once the handshake between the IPA AP driver
3050 * and IPA Q6 driver is complete. At this point, it is possible to perform
3051 * operations which can't be performed until IPA Q6 driver is up.
3052 *
3053 */
3054void ipa_q6_handshake_complete(bool ssr_bootup)
3055{
3056 /* It is required to recover the network stats after SSR recovery */
3057 if (ssr_bootup) {
3058 /*
3059 * In case the uC is required to be loaded by the Modem,
3060 * the proxy vote will be removed only when uC loading is
3061 * complete and indication is received by the AP. After SSR,
3062 * uC is already loaded. Therefore, proxy vote can be removed
3063 * once Modem init is complete.
3064 */
3065 ipa2_proxy_clk_unvote();
3066
3067 /*
3068 * It is required to recover the network stats after
3069 * SSR recovery
3070 */
3071 rmnet_ipa_get_network_stats_and_update();
3072
3073 /* Enable holb monitoring on Q6 pipes. */
3074 ipa_q6_monitor_holb_mitigation(true);
3075 }
3076}
3077
3078static int __init ipa_wwan_init(void)
3079{
3080 atomic_set(&is_initialized, 0);
3081 atomic_set(&is_ssr, 0);
3082
3083 mutex_init(&ipa_to_apps_pipe_handle_guard);
3084 ipa_to_apps_hdl = -1;
3085
3086 ipa_qmi_init();
3087
3088 /* Register for Modem SSR */
3089 subsys_notify_handle = subsys_notif_register_notifier(SUBSYS_MODEM,
3090 &ssr_notifier);
3091 if (!IS_ERR(subsys_notify_handle))
3092 return platform_driver_register(&rmnet_ipa_driver);
3093 else
3094 return (int)PTR_ERR(subsys_notify_handle);
3095}
3096
3097static void __exit ipa_wwan_cleanup(void)
3098{
3099 int ret;
3100
3101 ipa_qmi_cleanup();
3102 mutex_destroy(&ipa_to_apps_pipe_handle_guard);
3103 ret = subsys_notif_unregister_notifier(subsys_notify_handle,
3104 &ssr_notifier);
3105 if (ret)
3106 IPAWANERR(
3107 "Error subsys_notif_unregister_notifier system %s, ret=%d\n",
3108 SUBSYS_MODEM, ret);
3109 platform_driver_unregister(&rmnet_ipa_driver);
3110}
3111
3112static void ipa_wwan_msg_free_cb(void *buff, u32 len, u32 type)
3113{
3114 if (!buff)
3115 IPAWANERR("Null buffer.\n");
3116 kfree(buff);
3117}
3118
3119static void ipa_rmnet_rx_cb(void *priv)
3120{
3121 struct net_device *dev = priv;
3122 struct wwan_private *wwan_ptr;
3123
3124 IPAWANDBG("\n");
3125
3126 if (dev != ipa_netdevs[0]) {
3127 IPAWANERR("Not matching with netdev\n");
3128 return;
3129 }
3130
3131 wwan_ptr = netdev_priv(dev);
3132 napi_schedule(&(wwan_ptr->napi));
3133}
3134
3135static int ipa_rmnet_poll(struct napi_struct *napi, int budget)
3136{
3137 int rcvd_pkts = 0;
3138
3139 rcvd_pkts = ipa_rx_poll(ipa_to_apps_hdl, NAPI_WEIGHT);
3140 IPAWANDBG("rcvd packets: %d\n", rcvd_pkts);
3141 return rcvd_pkts;
3142}
3143
3144late_initcall(ipa_wwan_init);
3145module_exit(ipa_wwan_cleanup);
3146MODULE_DESCRIPTION("WWAN Network Interface");
3147MODULE_LICENSE("GPL v2");