blob: 56ec538cdd9f71e72488598d94e622ce94045e04 [file] [log] [blame]
Amir Levy9659e592016-10-27 18:08:27 +03001/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * WWAN Transport Network Driver.
15 */
16
17#include <linux/completion.h>
18#include <linux/errno.h>
19#include <linux/if_arp.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/of_device.h>
26#include <linux/string.h>
27#include <linux/skbuff.h>
28#include <linux/version.h>
29#include <linux/workqueue.h>
30#include <net/pkt_sched.h>
31#include <soc/qcom/subsystem_restart.h>
32#include <soc/qcom/subsystem_notif.h>
33#include "ipa_qmi_service.h"
34#include <linux/rmnet_ipa_fd_ioctl.h>
35#include <linux/ipa.h>
36#include <uapi/linux/net_map.h>
37
38#include "ipa_trace.h"
39
40#define WWAN_METADATA_SHFT 24
41#define WWAN_METADATA_MASK 0xFF000000
42#define WWAN_DATA_LEN 2000
43#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
44#define HEADROOM_FOR_QMAP 8 /* for mux header */
45#define TAILROOM 0 /* for padding by mux layer */
46#define MAX_NUM_OF_MUX_CHANNEL 10 /* max mux channels */
47#define UL_FILTER_RULE_HANDLE_START 69
48#define DEFAULT_OUTSTANDING_HIGH_CTL 96
49#define DEFAULT_OUTSTANDING_HIGH 64
50#define DEFAULT_OUTSTANDING_LOW 32
51
52#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
53
54#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
55
56#define INVALID_MUX_ID 0xFF
57#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
58#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
59#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
60#define NAPI_WEIGHT 60
61
62#define IPA_NETDEV() \
63 ((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \
64 rmnet_ipa3_ctx->wwan_priv->net : NULL)
65
66
67static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
68static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
69static void ipa3_wwan_msg_free_cb(void*, u32, u32);
70static void ipa3_rmnet_rx_cb(void *priv);
71static int ipa3_rmnet_poll(struct napi_struct *napi, int budget);
72
73static void ipa3_wake_tx_queue(struct work_struct *work);
74static DECLARE_WORK(ipa3_tx_wakequeue_work, ipa3_wake_tx_queue);
75
76static void tethering_stats_poll_queue(struct work_struct *work);
77static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work,
78 tethering_stats_poll_queue);
79
80enum ipa3_wwan_device_status {
81 WWAN_DEVICE_INACTIVE = 0,
82 WWAN_DEVICE_ACTIVE = 1
83};
84
85struct ipa3_rmnet_plat_drv_res {
86 bool ipa_rmnet_ssr;
87 bool ipa_loaduC;
88 bool ipa_advertise_sg_support;
89 bool ipa_napi_enable;
90};
91
92/**
93 * struct ipa3_wwan_private - WWAN private data
94 * @net: network interface struct implemented by this driver
95 * @stats: iface statistics
96 * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
97 * @outstanding_high: number of outstanding packets allowed
98 * @outstanding_low: number of outstanding packets which shall cause
99 * @ch_id: channel id
100 * @lock: spinlock for mutual exclusion
101 * @device_status: holds device status
102 *
103 * WWAN private - holds all relevant info about WWAN driver
104 */
105struct ipa3_wwan_private {
106 struct net_device *net;
107 struct net_device_stats stats;
108 atomic_t outstanding_pkts;
109 int outstanding_high_ctl;
110 int outstanding_high;
111 int outstanding_low;
112 uint32_t ch_id;
113 spinlock_t lock;
114 struct completion resource_granted_completion;
115 enum ipa3_wwan_device_status device_status;
116 struct napi_struct napi;
117};
118
119struct rmnet_ipa3_context {
120 struct ipa3_wwan_private *wwan_priv;
121 struct ipa_sys_connect_params apps_to_ipa_ep_cfg;
122 struct ipa_sys_connect_params ipa_to_apps_ep_cfg;
123 u32 qmap_hdr_hdl;
124 u32 dflt_v4_wan_rt_hdl;
125 u32 dflt_v6_wan_rt_hdl;
126 struct ipa3_rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
127 int num_q6_rules;
128 int old_num_q6_rules;
129 int rmnet_index;
130 bool egress_set;
131 bool a7_ul_flt_set;
132 struct workqueue_struct *rm_q6_wq;
133 atomic_t is_initialized;
134 atomic_t is_ssr;
135 void *subsys_notify_handle;
136 u32 apps_to_ipa3_hdl;
137 u32 ipa3_to_apps_hdl;
138 struct mutex ipa_to_apps_pipe_handle_guard;
139};
140
141static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
142static struct ipa3_rmnet_plat_drv_res ipa3_rmnet_res;
143
144/**
145* ipa3_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
146*
147* Return codes:
148* 0: success
149* -ENOMEM: failed to allocate memory
150* -EPERM: failed to add the tables
151*/
152static int ipa3_setup_a7_qmap_hdr(void)
153{
154 struct ipa_ioc_add_hdr *hdr;
155 struct ipa_hdr_add *hdr_entry;
156 u32 pyld_sz;
157 int ret;
158
159 /* install the basic exception header */
160 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
161 sizeof(struct ipa_hdr_add);
162 hdr = kzalloc(pyld_sz, GFP_KERNEL);
163 if (!hdr) {
164 IPAWANERR("fail to alloc exception hdr\n");
165 return -ENOMEM;
166 }
167 hdr->num_hdrs = 1;
168 hdr->commit = 1;
169 hdr_entry = &hdr->hdr[0];
170
171 strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
172 IPA_RESOURCE_NAME_MAX);
173 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
174
175 if (ipa3_add_hdr(hdr)) {
176 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
177 ret = -EPERM;
178 goto bail;
179 }
180
181 if (hdr_entry->status) {
182 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
183 ret = -EPERM;
184 goto bail;
185 }
186 rmnet_ipa3_ctx->qmap_hdr_hdl = hdr_entry->hdr_hdl;
187
188 ret = 0;
189bail:
190 kfree(hdr);
191 return ret;
192}
193
194static void ipa3_del_a7_qmap_hdr(void)
195{
196 struct ipa_ioc_del_hdr *del_hdr;
197 struct ipa_hdr_del *hdl_entry;
198 u32 pyld_sz;
199 int ret;
200
201 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
202 sizeof(struct ipa_hdr_del);
203 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
204 if (!del_hdr) {
205 IPAWANERR("fail to alloc exception hdr_del\n");
206 return;
207 }
208
209 del_hdr->commit = 1;
210 del_hdr->num_hdls = 1;
211 hdl_entry = &del_hdr->hdl[0];
212 hdl_entry->hdl = rmnet_ipa3_ctx->qmap_hdr_hdl;
213
214 ret = ipa3_del_hdr(del_hdr);
215 if (ret || hdl_entry->status)
216 IPAWANERR("ipa3_del_hdr failed\n");
217 else
218 IPAWANDBG("hdrs deletion done\n");
219
220 rmnet_ipa3_ctx->qmap_hdr_hdl = 0;
221 kfree(del_hdr);
222}
223
224static void ipa3_del_qmap_hdr(uint32_t hdr_hdl)
225{
226 struct ipa_ioc_del_hdr *del_hdr;
227 struct ipa_hdr_del *hdl_entry;
228 u32 pyld_sz;
229 int ret;
230
231 if (hdr_hdl == 0) {
232 IPAWANERR("Invalid hdr_hdl provided\n");
233 return;
234 }
235
236 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
237 sizeof(struct ipa_hdr_del);
238 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
239 if (!del_hdr) {
240 IPAWANERR("fail to alloc exception hdr_del\n");
241 return;
242 }
243
244 del_hdr->commit = 1;
245 del_hdr->num_hdls = 1;
246 hdl_entry = &del_hdr->hdl[0];
247 hdl_entry->hdl = hdr_hdl;
248
249 ret = ipa3_del_hdr(del_hdr);
250 if (ret || hdl_entry->status)
251 IPAWANERR("ipa3_del_hdr failed\n");
252 else
253 IPAWANDBG("header deletion done\n");
254
255 rmnet_ipa3_ctx->qmap_hdr_hdl = 0;
256 kfree(del_hdr);
257}
258
259static void ipa3_del_mux_qmap_hdrs(void)
260{
261 int index;
262
263 for (index = 0; index < rmnet_ipa3_ctx->rmnet_index; index++) {
264 ipa3_del_qmap_hdr(rmnet_ipa3_ctx->mux_channel[index].hdr_hdl);
265 rmnet_ipa3_ctx->mux_channel[index].hdr_hdl = 0;
266 }
267}
268
269static int ipa3_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
270{
271 struct ipa_ioc_add_hdr *hdr;
272 struct ipa_hdr_add *hdr_entry;
273 char hdr_name[IPA_RESOURCE_NAME_MAX];
274 u32 pyld_sz;
275 int ret;
276
277 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
278 sizeof(struct ipa_hdr_add);
279 hdr = kzalloc(pyld_sz, GFP_KERNEL);
280 if (!hdr) {
281 IPAWANERR("fail to alloc exception hdr\n");
282 return -ENOMEM;
283 }
284 hdr->num_hdrs = 1;
285 hdr->commit = 1;
286 hdr_entry = &hdr->hdr[0];
287
288 snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
289 A2_MUX_HDR_NAME_V4_PREF,
290 mux_id);
291 strlcpy(hdr_entry->name, hdr_name,
292 IPA_RESOURCE_NAME_MAX);
293
294 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
295 hdr_entry->hdr[1] = (uint8_t) mux_id;
296 IPAWANDBG("header (%s) with mux-id: (%d)\n",
297 hdr_name,
298 hdr_entry->hdr[1]);
299 if (ipa3_add_hdr(hdr)) {
300 IPAWANERR("fail to add IPA_QMAP hdr\n");
301 ret = -EPERM;
302 goto bail;
303 }
304
305 if (hdr_entry->status) {
306 IPAWANERR("fail to add IPA_QMAP hdr\n");
307 ret = -EPERM;
308 goto bail;
309 }
310
311 ret = 0;
312 *hdr_hdl = hdr_entry->hdr_hdl;
313bail:
314 kfree(hdr);
315 return ret;
316}
317
318/**
319* ipa3_setup_dflt_wan_rt_tables() - Setup default wan routing tables
320*
321* Return codes:
322* 0: success
323* -ENOMEM: failed to allocate memory
324* -EPERM: failed to add the tables
325*/
326static int ipa3_setup_dflt_wan_rt_tables(void)
327{
328 struct ipa_ioc_add_rt_rule *rt_rule;
329 struct ipa_rt_rule_add *rt_rule_entry;
330
331 rt_rule =
332 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
333 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
334 if (!rt_rule) {
335 IPAWANERR("fail to alloc mem\n");
336 return -ENOMEM;
337 }
338 /* setup a default v4 route to point to Apps */
339 rt_rule->num_rules = 1;
340 rt_rule->commit = 1;
341 rt_rule->ip = IPA_IP_v4;
342 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
343 IPA_RESOURCE_NAME_MAX);
344
345 rt_rule_entry = &rt_rule->rules[0];
346 rt_rule_entry->at_rear = 1;
347 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
348 rt_rule_entry->rule.hdr_hdl = rmnet_ipa3_ctx->qmap_hdr_hdl;
349
350 if (ipa3_add_rt_rule(rt_rule)) {
351 IPAWANERR("fail to add dflt_wan v4 rule\n");
352 kfree(rt_rule);
353 return -EPERM;
354 }
355
356 IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
357 rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
358
359 /* setup a default v6 route to point to A5 */
360 rt_rule->ip = IPA_IP_v6;
361 if (ipa3_add_rt_rule(rt_rule)) {
362 IPAWANERR("fail to add dflt_wan v6 rule\n");
363 kfree(rt_rule);
364 return -EPERM;
365 }
366 IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
367 rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
368
369 kfree(rt_rule);
370 return 0;
371}
372
373static void ipa3_del_dflt_wan_rt_tables(void)
374{
375 struct ipa_ioc_del_rt_rule *rt_rule;
376 struct ipa_rt_rule_del *rt_rule_entry;
377 int len;
378
379 len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
380 sizeof(struct ipa_rt_rule_del);
381 rt_rule = kzalloc(len, GFP_KERNEL);
382 if (!rt_rule) {
383 IPAWANERR("unable to allocate memory for del route rule\n");
384 return;
385 }
386
387 memset(rt_rule, 0, len);
388 rt_rule->commit = 1;
389 rt_rule->num_hdls = 1;
390 rt_rule->ip = IPA_IP_v4;
391
392 rt_rule_entry = &rt_rule->hdl[0];
393 rt_rule_entry->status = -1;
394 rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl;
395
396 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
397 rt_rule_entry->hdl, IPA_IP_v4);
398 if (ipa3_del_rt_rule(rt_rule) ||
399 (rt_rule_entry->status)) {
400 IPAWANERR("Routing rule deletion failed!\n");
401 }
402
403 rt_rule->ip = IPA_IP_v6;
404 rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl;
405 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
406 rt_rule_entry->hdl, IPA_IP_v6);
407 if (ipa3_del_rt_rule(rt_rule) ||
408 (rt_rule_entry->status)) {
409 IPAWANERR("Routing rule deletion failed!\n");
410 }
411
412 kfree(rt_rule);
413}
414
415int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
416 *rule_req)
417{
418 int i, j;
419
420 if (rule_req->filter_spec_ex_list_valid == true) {
421 rmnet_ipa3_ctx->num_q6_rules =
422 rule_req->filter_spec_ex_list_len;
423 IPAWANDBG("Received (%d) install_flt_req\n",
424 rmnet_ipa3_ctx->num_q6_rules);
425 } else {
426 rmnet_ipa3_ctx->num_q6_rules = 0;
427 IPAWANERR("got no UL rules from modem\n");
428 return -EINVAL;
429 }
430
431 /* copy UL filter rules from Modem*/
432 for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
433 /* check if rules overside the cache*/
434 if (i == MAX_NUM_Q6_RULE) {
435 IPAWANERR("Reaching (%d) max cache ",
436 MAX_NUM_Q6_RULE);
437 IPAWANERR(" however total (%d)\n",
438 rmnet_ipa3_ctx->num_q6_rules);
439 goto failure;
440 }
441 ipa3_qmi_ctx->q6_ul_filter_rule[i].ip =
442 rule_req->filter_spec_ex_list[i].ip_type;
443 ipa3_qmi_ctx->q6_ul_filter_rule[i].action =
444 rule_req->filter_spec_ex_list[i].filter_action;
445 if (rule_req->filter_spec_ex_list[i].
446 is_routing_table_index_valid == true)
447 ipa3_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
448 rule_req->filter_spec_ex_list[i].route_table_index;
449 if (rule_req->filter_spec_ex_list[i].is_mux_id_valid == true)
450 ipa3_qmi_ctx->q6_ul_filter_rule[i].mux_id =
451 rule_req->filter_spec_ex_list[i].mux_id;
452 ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id =
453 rule_req->filter_spec_ex_list[i].rule_id;
454 ipa3_qmi_ctx->q6_ul_filter_rule[i].is_rule_hashable =
455 rule_req->filter_spec_ex_list[i].is_rule_hashable;
456 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap =
457 rule_req->filter_spec_ex_list[i].filter_rule.
458 rule_eq_bitmap;
459 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present =
460 rule_req->filter_spec_ex_list[i].filter_rule.
461 tos_eq_present;
462 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq =
463 rule_req->filter_spec_ex_list[i].filter_rule.tos_eq;
464 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
465 protocol_eq_present = rule_req->filter_spec_ex_list[i].
466 filter_rule.protocol_eq_present;
467 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq =
468 rule_req->filter_spec_ex_list[i].filter_rule.
469 protocol_eq;
470
471 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
472 num_ihl_offset_range_16 =
473 rule_req->filter_spec_ex_list[i].
474 filter_rule.num_ihl_offset_range_16;
475 for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
476 num_ihl_offset_range_16; j++) {
477 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
478 ihl_offset_range_16[j].offset = rule_req->
479 filter_spec_ex_list[i].filter_rule.
480 ihl_offset_range_16[j].offset;
481 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
482 ihl_offset_range_16[j].range_low = rule_req->
483 filter_spec_ex_list[i].filter_rule.
484 ihl_offset_range_16[j].range_low;
485 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
486 ihl_offset_range_16[j].range_high = rule_req->
487 filter_spec_ex_list[i].filter_rule.
488 ihl_offset_range_16[j].range_high;
489 }
490 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 =
491 rule_req->filter_spec_ex_list[i].filter_rule.
492 num_offset_meq_32;
493 for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
494 num_offset_meq_32; j++) {
495 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
496 offset_meq_32[j].offset =
497 rule_req->filter_spec_ex_list[i].
498 filter_rule.offset_meq_32[j].offset;
499 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
500 offset_meq_32[j].mask =
501 rule_req->filter_spec_ex_list[i].
502 filter_rule.offset_meq_32[j].mask;
503 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
504 offset_meq_32[j].value =
505 rule_req->filter_spec_ex_list[i].
506 filter_rule.offset_meq_32[j].value;
507 }
508
509 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present =
510 rule_req->filter_spec_ex_list[i].
511 filter_rule.tc_eq_present;
512 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq =
513 rule_req->filter_spec_ex_list[i].filter_rule.tc_eq;
514 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present =
515 rule_req->filter_spec_ex_list[i].filter_rule.
516 flow_eq_present;
517 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq =
518 rule_req->filter_spec_ex_list[i].filter_rule.flow_eq;
519 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
520 ihl_offset_eq_16_present = rule_req->filter_spec_ex_list[i].
521 filter_rule.ihl_offset_eq_16_present;
522 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
523 ihl_offset_eq_16.offset = rule_req->filter_spec_ex_list[i].
524 filter_rule.ihl_offset_eq_16.offset;
525 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
526 ihl_offset_eq_16.value = rule_req->filter_spec_ex_list[i].
527 filter_rule.ihl_offset_eq_16.value;
528
529 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
530 ihl_offset_eq_32_present = rule_req->filter_spec_ex_list[i].
531 filter_rule.ihl_offset_eq_32_present;
532 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
533 ihl_offset_eq_32.offset = rule_req->filter_spec_ex_list[i].
534 filter_rule.ihl_offset_eq_32.offset;
535 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
536 ihl_offset_eq_32.value = rule_req->filter_spec_ex_list[i].
537 filter_rule.ihl_offset_eq_32.value;
538
539 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
540 num_ihl_offset_meq_32 = rule_req->filter_spec_ex_list[i].
541 filter_rule.num_ihl_offset_meq_32;
542 for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].
543 eq_attrib.num_ihl_offset_meq_32; j++) {
544 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
545 ihl_offset_meq_32[j].offset = rule_req->
546 filter_spec_ex_list[i].filter_rule.
547 ihl_offset_meq_32[j].offset;
548 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
549 ihl_offset_meq_32[j].mask = rule_req->
550 filter_spec_ex_list[i].filter_rule.
551 ihl_offset_meq_32[j].mask;
552 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
553 ihl_offset_meq_32[j].value = rule_req->
554 filter_spec_ex_list[i].filter_rule.
555 ihl_offset_meq_32[j].value;
556 }
557 ipa3_qmi_ctx->
558 q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 =
559 rule_req->filter_spec_ex_list[i].filter_rule.
560 num_offset_meq_128;
561 for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
562 num_offset_meq_128; j++) {
563 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
564 offset_meq_128[j].offset = rule_req->
565 filter_spec_ex_list[i].filter_rule.
566 offset_meq_128[j].offset;
567 memcpy(ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
568 offset_meq_128[j].mask,
569 rule_req->filter_spec_ex_list[i].
570 filter_rule.offset_meq_128[j].mask, 16);
571 memcpy(ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
572 offset_meq_128[j].value, rule_req->
573 filter_spec_ex_list[i].filter_rule.
574 offset_meq_128[j].value, 16);
575 }
576
577 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
578 metadata_meq32_present =
579 rule_req->filter_spec_ex_list[i].
580 filter_rule.metadata_meq32_present;
581 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
582 metadata_meq32.offset =
583 rule_req->filter_spec_ex_list[i].
584 filter_rule.metadata_meq32.offset;
585 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
586 metadata_meq32.mask = rule_req->filter_spec_ex_list[i].
587 filter_rule.metadata_meq32.mask;
588 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.
589 value = rule_req->filter_spec_ex_list[i].filter_rule.
590 metadata_meq32.value;
591 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
592 ipv4_frag_eq_present = rule_req->filter_spec_ex_list[i].
593 filter_rule.ipv4_frag_eq_present;
594 }
595
596 if (rule_req->xlat_filter_indices_list_valid) {
597 if (rule_req->xlat_filter_indices_list_len >
598 rmnet_ipa3_ctx->num_q6_rules) {
599 IPAWANERR("Number of xlat indices is not valid: %d\n",
600 rule_req->xlat_filter_indices_list_len);
601 goto failure;
602 }
603 IPAWANDBG("Receive %d XLAT indices: ",
604 rule_req->xlat_filter_indices_list_len);
605 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++)
606 IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]);
607 IPAWANDBG("\n");
608
609 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
610 if (rule_req->xlat_filter_indices_list[i]
611 >= rmnet_ipa3_ctx->num_q6_rules) {
612 IPAWANERR("Xlat rule idx is wrong: %d\n",
613 rule_req->xlat_filter_indices_list[i]);
614 goto failure;
615 } else {
616 ipa3_qmi_ctx->q6_ul_filter_rule
617 [rule_req->xlat_filter_indices_list[i]]
618 .is_xlat_rule = 1;
619 IPAWANDBG("Rule %d is xlat rule\n",
620 rule_req->xlat_filter_indices_list[i]);
621 }
622 }
623 }
624 goto success;
625
626failure:
627 rmnet_ipa3_ctx->num_q6_rules = 0;
628 memset(ipa3_qmi_ctx->q6_ul_filter_rule, 0,
629 sizeof(ipa3_qmi_ctx->q6_ul_filter_rule));
630 return -EINVAL;
631
632success:
633 return 0;
634}
635
636static int ipa3_wwan_add_ul_flt_rule_to_ipa(void)
637{
638 u32 pyld_sz;
639 int i, retval = 0;
640 struct ipa_ioc_add_flt_rule *param;
641 struct ipa_flt_rule_add flt_rule_entry;
642 struct ipa_fltr_installed_notif_req_msg_v01 *req;
643
644 pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) +
645 sizeof(struct ipa_flt_rule_add);
646 param = kzalloc(pyld_sz, GFP_KERNEL);
647 if (!param)
648 return -ENOMEM;
649
650 req = (struct ipa_fltr_installed_notif_req_msg_v01 *)
651 kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
652 GFP_KERNEL);
653 if (!req) {
654 kfree(param);
655 return -ENOMEM;
656 }
657
658 param->commit = 1;
659 param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD;
660 param->global = false;
661 param->num_rules = (uint8_t)1;
662
663 for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
664 param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip;
665 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
666 flt_rule_entry.at_rear = true;
667 flt_rule_entry.rule.action =
668 ipa3_qmi_ctx->q6_ul_filter_rule[i].action;
669 flt_rule_entry.rule.rt_tbl_idx
670 = ipa3_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx;
671 flt_rule_entry.rule.retain_hdr = true;
672 flt_rule_entry.rule.hashable =
673 ipa3_qmi_ctx->q6_ul_filter_rule[i].is_rule_hashable;
674 flt_rule_entry.rule.rule_id =
675 ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id;
676
677 /* debug rt-hdl*/
678 IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n",
679 i, flt_rule_entry.rule.rt_tbl_idx);
680 flt_rule_entry.rule.eq_attrib_type = true;
681 memcpy(&(flt_rule_entry.rule.eq_attrib),
682 &ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib,
683 sizeof(struct ipa_ipfltri_rule_eq));
684 memcpy(&(param->rules[0]), &flt_rule_entry,
685 sizeof(struct ipa_flt_rule_add));
686 if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
687 retval = -EFAULT;
688 IPAWANERR("add A7 UL filter rule(%d) failed\n", i);
689 } else {
690 /* store the rule handler */
691 ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i] =
692 param->rules[0].flt_rule_hdl;
693 }
694 }
695
696 /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
697 req->source_pipe_index =
698 ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
699 req->install_status = QMI_RESULT_SUCCESS_V01;
700 req->rule_id_valid = 1;
701 req->rule_id_len = rmnet_ipa3_ctx->num_q6_rules;
702 for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
703 req->rule_id[i] =
704 ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id;
705 }
706 if (ipa3_qmi_filter_notify_send(req)) {
707 IPAWANDBG("add filter rule index on A7-RX failed\n");
708 retval = -EFAULT;
709 }
710 rmnet_ipa3_ctx->old_num_q6_rules = rmnet_ipa3_ctx->num_q6_rules;
711 IPAWANDBG("add (%d) filter rule index on A7-RX\n",
712 rmnet_ipa3_ctx->old_num_q6_rules);
713 kfree(param);
714 kfree(req);
715 return retval;
716}
717
718static int ipa3_wwan_del_ul_flt_rule_to_ipa(void)
719{
720 u32 pyld_sz;
721 int i, retval = 0;
722 struct ipa_ioc_del_flt_rule *param;
723 struct ipa_flt_rule_del flt_rule_entry;
724
725 pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) +
726 sizeof(struct ipa_flt_rule_del);
727 param = kzalloc(pyld_sz, GFP_KERNEL);
728 if (!param) {
729 IPAWANERR("kzalloc failed\n");
730 return -ENOMEM;
731 }
732
733 param->commit = 1;
734 param->num_hdls = (uint8_t) 1;
735
736 for (i = 0; i < rmnet_ipa3_ctx->old_num_q6_rules; i++) {
737 param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip;
738 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
739 flt_rule_entry.hdl = ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i];
740 /* debug rt-hdl*/
741 IPAWANDBG("delete-IPA rule index(%d)\n", i);
742 memcpy(&(param->hdl[0]), &flt_rule_entry,
743 sizeof(struct ipa_flt_rule_del));
744 if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
745 IPAWANERR("del A7 UL filter rule(%d) failed\n", i);
746 kfree(param);
747 return -EFAULT;
748 }
749 }
750
751 /* set UL filter-rule add-indication */
752 rmnet_ipa3_ctx->a7_ul_flt_set = false;
753 rmnet_ipa3_ctx->old_num_q6_rules = 0;
754
755 kfree(param);
756 return retval;
757}
758
759static int ipa3_find_mux_channel_index(uint32_t mux_id)
760{
761 int i;
762
763 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
764 if (mux_id == rmnet_ipa3_ctx->mux_channel[i].mux_id)
765 return i;
766 }
767 return MAX_NUM_OF_MUX_CHANNEL;
768}
769
770static int find_vchannel_name_index(const char *vchannel_name)
771{
772 int i;
773
774 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
775 if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
776 vchannel_name) == 0)
777 return i;
778 }
779 return MAX_NUM_OF_MUX_CHANNEL;
780}
781
782static int ipa3_wwan_register_to_ipa(int index)
783{
784 struct ipa_tx_intf tx_properties = {0};
785 struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
786 struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
787 struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
788 struct ipa_rx_intf rx_properties = {0};
789 struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
790 struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
791 struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
792 struct ipa_ext_intf ext_properties = {0};
793 struct ipa_ioc_ext_intf_prop *ext_ioc_properties;
794 u32 pyld_sz;
795 int ret = 0, i;
796
797 IPAWANDBG("index(%d) device[%s]:\n", index,
798 rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
799 if (!rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set) {
800 ret = ipa3_add_qmap_hdr(
801 rmnet_ipa3_ctx->mux_channel[index].mux_id,
802 &rmnet_ipa3_ctx->mux_channel[index].hdr_hdl);
803 if (ret) {
804 IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
805 return ret;
806 }
807 rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set = true;
808 }
809 tx_properties.prop = tx_ioc_properties;
810 tx_ipv4_property = &tx_properties.prop[0];
811 tx_ipv4_property->ip = IPA_IP_v4;
812 tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
813 snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
814 A2_MUX_HDR_NAME_V4_PREF,
815 rmnet_ipa3_ctx->mux_channel[index].mux_id);
816 tx_ipv6_property = &tx_properties.prop[1];
817 tx_ipv6_property->ip = IPA_IP_v6;
818 tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
819 /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
820 snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
821 A2_MUX_HDR_NAME_V4_PREF,
822 rmnet_ipa3_ctx->mux_channel[index].mux_id);
823 tx_properties.num_props = 2;
824
825 rx_properties.prop = rx_ioc_properties;
826 rx_ipv4_property = &rx_properties.prop[0];
827 rx_ipv4_property->ip = IPA_IP_v4;
828 rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
829 rx_ipv4_property->attrib.meta_data =
830 rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT;
831 rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
832 rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
833 rx_ipv6_property = &rx_properties.prop[1];
834 rx_ipv6_property->ip = IPA_IP_v6;
835 rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
836 rx_ipv6_property->attrib.meta_data =
837 rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT;
838 rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
839 rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
840 rx_properties.num_props = 2;
841
842 pyld_sz = rmnet_ipa3_ctx->num_q6_rules *
843 sizeof(struct ipa_ioc_ext_intf_prop);
844 ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
845 if (!ext_ioc_properties) {
846 IPAWANERR("Error allocate memory\n");
847 return -ENOMEM;
848 }
849
850 ext_properties.prop = ext_ioc_properties;
851 ext_properties.excp_pipe_valid = true;
852 ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
853 ext_properties.num_props = rmnet_ipa3_ctx->num_q6_rules;
854 for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
855 memcpy(&(ext_properties.prop[i]),
856 &(ipa3_qmi_ctx->q6_ul_filter_rule[i]),
857 sizeof(struct ipa_ioc_ext_intf_prop));
858 ext_properties.prop[i].mux_id =
859 rmnet_ipa3_ctx->mux_channel[index].mux_id;
860 IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
861 ext_properties.prop[i].ip,
862 ext_properties.prop[i].rt_tbl_idx);
863 IPAWANDBG("action: %d mux:%d\n",
864 ext_properties.prop[i].action,
865 ext_properties.prop[i].mux_id);
866 }
867 ret = ipa3_register_intf_ext(rmnet_ipa3_ctx->mux_channel[index].
868 vchannel_name, &tx_properties,
869 &rx_properties, &ext_properties);
870 if (ret) {
871 IPAWANERR("[%s]:ipa3_register_intf failed %d\n",
872 rmnet_ipa3_ctx->mux_channel[index].vchannel_name, ret);
873 goto fail;
874 }
875 rmnet_ipa3_ctx->mux_channel[index].ul_flt_reg = true;
876fail:
877 kfree(ext_ioc_properties);
878 return ret;
879}
880
881static void ipa3_cleanup_deregister_intf(void)
882{
883 int i;
884 int ret;
885
886 for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) {
887 if (rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg) {
888 ret = ipa3_deregister_intf(
889 rmnet_ipa3_ctx->mux_channel[i].vchannel_name);
890 if (ret < 0) {
891 IPAWANERR("de-register device %s(%d) failed\n",
892 rmnet_ipa3_ctx->mux_channel[i].
893 vchannel_name,
894 i);
895 return;
896 }
897 IPAWANDBG("de-register device %s(%d) success\n",
898 rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
899 i);
900 }
901 rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = false;
902 }
903}
904
905int ipa3_wwan_update_mux_channel_prop(void)
906{
907 int ret = 0, i;
908 /* install UL filter rules */
909 if (rmnet_ipa3_ctx->egress_set) {
910 if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
911 IPAWANDBG("setup UL filter rules\n");
912 if (rmnet_ipa3_ctx->a7_ul_flt_set) {
913 IPAWANDBG("del previous UL filter rules\n");
914 /* delete rule hdlers */
915 ret = ipa3_wwan_del_ul_flt_rule_to_ipa();
916 if (ret) {
917 IPAWANERR("failed to del old rules\n");
918 return -EINVAL;
919 }
920 IPAWANDBG("deleted old UL rules\n");
921 }
922 ret = ipa3_wwan_add_ul_flt_rule_to_ipa();
923 }
924 if (ret)
925 IPAWANERR("failed to install UL rules\n");
926 else
927 rmnet_ipa3_ctx->a7_ul_flt_set = true;
928 }
929 /* update Tx/Rx/Ext property */
930 IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
931 if (rmnet_ipa3_ctx->rmnet_index == 0) {
932 IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
933 return ret;
934 }
935
936 ipa3_cleanup_deregister_intf();
937
938 for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) {
939 ret = ipa3_wwan_register_to_ipa(i);
940 if (ret < 0) {
941 IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
942 rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
943 rmnet_ipa3_ctx->mux_channel[i].mux_id,
944 i);
945 return -ENODEV;
946 }
947 IPAWANERR("dev(%s) has registered to IPA\n",
948 rmnet_ipa3_ctx->mux_channel[i].vchannel_name);
949 rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = true;
950 }
951 return ret;
952}
953
954#ifdef INIT_COMPLETION
955#define reinit_completion(x) INIT_COMPLETION(*(x))
956#endif /* INIT_COMPLETION */
957
958static int __ipa_wwan_open(struct net_device *dev)
959{
960 struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
961
962 IPAWANDBG("[%s] __wwan_open()\n", dev->name);
963 if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
964 reinit_completion(&wwan_ptr->resource_granted_completion);
965 wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
966
967 if (ipa3_rmnet_res.ipa_napi_enable)
968 napi_enable(&(wwan_ptr->napi));
969 return 0;
970}
971
972/**
973 * wwan_open() - Opens the wwan network interface. Opens logical
974 * channel on A2 MUX driver and starts the network stack queue
975 *
976 * @dev: network device
977 *
978 * Return codes:
979 * 0: success
980 * -ENODEV: Error while opening logical channel on A2 MUX driver
981 */
982static int ipa3_wwan_open(struct net_device *dev)
983{
984 int rc = 0;
985
986 IPAWANDBG("[%s] wwan_open()\n", dev->name);
987 rc = __ipa_wwan_open(dev);
988 if (rc == 0)
989 netif_start_queue(dev);
990 return rc;
991}
992
993static int __ipa_wwan_close(struct net_device *dev)
994{
995 struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
996 int rc = 0;
997
998 if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
999 wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
1000 /* do not close wwan port once up, this causes
1001 * remote side to hang if tried to open again
1002 */
1003 reinit_completion(&wwan_ptr->resource_granted_completion);
1004 rc = ipa3_deregister_intf(dev->name);
1005 if (rc) {
1006 IPAWANERR("[%s]: ipa3_deregister_intf failed %d\n",
1007 dev->name, rc);
1008 return rc;
1009 }
1010 return rc;
1011 } else {
1012 return -EBADF;
1013 }
1014}
1015
1016/**
1017 * ipa3_wwan_stop() - Stops the wwan network interface. Closes
1018 * logical channel on A2 MUX driver and stops the network stack
1019 * queue
1020 *
1021 * @dev: network device
1022 *
1023 * Return codes:
1024 * 0: success
1025 * -ENODEV: Error while opening logical channel on A2 MUX driver
1026 */
1027static int ipa3_wwan_stop(struct net_device *dev)
1028{
1029 IPAWANDBG("[%s] ipa3_wwan_stop()\n", dev->name);
1030 __ipa_wwan_close(dev);
1031 netif_stop_queue(dev);
1032 return 0;
1033}
1034
1035static int ipa3_wwan_change_mtu(struct net_device *dev, int new_mtu)
1036{
1037 if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
1038 return -EINVAL;
1039 IPAWANDBG("[%s] MTU change: old=%d new=%d\n",
1040 dev->name, dev->mtu, new_mtu);
1041 dev->mtu = new_mtu;
1042 return 0;
1043}
1044
1045/**
1046 * ipa3_wwan_xmit() - Transmits an skb.
1047 *
1048 * @skb: skb to be transmitted
1049 * @dev: network device
1050 *
1051 * Return codes:
1052 * 0: success
1053 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1054 * later
1055 * -EFAULT: Error while transmitting the skb
1056 */
1057static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
1058{
1059 int ret = 0;
1060 bool qmap_check;
1061 struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
1062 struct ipa_tx_meta meta;
1063
1064 if (skb->protocol != htons(ETH_P_MAP)) {
1065 IPAWANDBG_LOW
1066 ("SW filtering out none QMAP packet received from %s",
1067 current->comm);
1068 return NETDEV_TX_OK;
1069 }
1070
1071 qmap_check = RMNET_MAP_GET_CD_BIT(skb);
1072 if (netif_queue_stopped(dev)) {
1073 if (qmap_check &&
1074 atomic_read(&wwan_ptr->outstanding_pkts) <
1075 wwan_ptr->outstanding_high_ctl) {
1076 pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
1077 goto send;
1078 } else {
1079 pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
1080 return NETDEV_TX_BUSY;
1081 }
1082 }
1083
1084 /* checking High WM hit */
1085 if (atomic_read(&wwan_ptr->outstanding_pkts) >=
1086 wwan_ptr->outstanding_high) {
1087 if (!qmap_check) {
1088 IPAWANDBG_LOW("pending(%d)/(%d)- stop(%d)\n",
1089 atomic_read(&wwan_ptr->outstanding_pkts),
1090 wwan_ptr->outstanding_high,
1091 netif_queue_stopped(dev));
1092 IPAWANDBG_LOW("qmap_chk(%d)\n", qmap_check);
1093 netif_stop_queue(dev);
1094 return NETDEV_TX_BUSY;
1095 }
1096 }
1097
1098send:
1099 /* IPA_RM checking start */
1100 ret = ipa_rm_inactivity_timer_request_resource(
1101 IPA_RM_RESOURCE_WWAN_0_PROD);
1102 if (ret == -EINPROGRESS) {
1103 netif_stop_queue(dev);
1104 return NETDEV_TX_BUSY;
1105 }
1106 if (ret) {
1107 pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
1108 dev->name, ret);
1109 return -EFAULT;
1110 }
1111 /* IPA_RM checking end */
1112
1113 if (RMNET_MAP_GET_CD_BIT(skb)) {
1114 memset(&meta, 0, sizeof(meta));
1115 meta.pkt_init_dst_ep_valid = true;
1116 meta.pkt_init_dst_ep_remote = true;
1117 ret = ipa3_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta);
1118 } else {
1119 ret = ipa3_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL);
1120 }
1121
1122 if (ret) {
1123 ret = NETDEV_TX_BUSY;
1124 dev->stats.tx_dropped++;
1125 goto out;
1126 }
1127
1128 atomic_inc(&wwan_ptr->outstanding_pkts);
1129 dev->stats.tx_packets++;
1130 dev->stats.tx_bytes += skb->len;
1131 ret = NETDEV_TX_OK;
1132out:
1133 ipa_rm_inactivity_timer_release_resource(
1134 IPA_RM_RESOURCE_WWAN_0_PROD);
1135 return ret;
1136}
1137
1138static void ipa3_wwan_tx_timeout(struct net_device *dev)
1139{
1140 IPAWANERR("[%s] ipa3_wwan_tx_timeout(), data stall in UL\n", dev->name);
1141}
1142
1143/**
1144 * apps_ipa_tx_complete_notify() - Rx notify
1145 *
1146 * @priv: driver context
1147 * @evt: event type
1148 * @data: data provided with event
1149 *
1150 * Check that the packet is the one we sent and release it
1151 * This function will be called in defered context in IPA wq.
1152 */
1153static void apps_ipa_tx_complete_notify(void *priv,
1154 enum ipa_dp_evt_type evt,
1155 unsigned long data)
1156{
1157 struct sk_buff *skb = (struct sk_buff *)data;
1158 struct net_device *dev = (struct net_device *)priv;
1159 struct ipa3_wwan_private *wwan_ptr;
1160
1161 if (dev != IPA_NETDEV()) {
1162 IPAWANDBG("Received pre-SSR packet completion\n");
1163 dev_kfree_skb_any(skb);
1164 return;
1165 }
1166
1167 if (evt != IPA_WRITE_DONE) {
1168 IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
1169 dev_kfree_skb_any(skb);
1170 dev->stats.tx_dropped++;
1171 return;
1172 }
1173
1174 wwan_ptr = netdev_priv(dev);
1175 atomic_dec(&wwan_ptr->outstanding_pkts);
1176 __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
1177 if (!atomic_read(&rmnet_ipa3_ctx->is_ssr) &&
1178 netif_queue_stopped(wwan_ptr->net) &&
1179 atomic_read(&wwan_ptr->outstanding_pkts) <
1180 (wwan_ptr->outstanding_low)) {
1181 IPAWANDBG_LOW("Outstanding low (%d) - waking up queue\n",
1182 wwan_ptr->outstanding_low);
1183 netif_wake_queue(wwan_ptr->net);
1184 }
1185 __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
1186 dev_kfree_skb_any(skb);
1187 ipa_rm_inactivity_timer_release_resource(
1188 IPA_RM_RESOURCE_WWAN_0_PROD);
1189}
1190
1191/**
1192 * apps_ipa_packet_receive_notify() - Rx notify
1193 *
1194 * @priv: driver context
1195 * @evt: event type
1196 * @data: data provided with event
1197 *
1198 * IPA will pass a packet to the Linux network stack with skb->data
1199 */
1200static void apps_ipa_packet_receive_notify(void *priv,
1201 enum ipa_dp_evt_type evt,
1202 unsigned long data)
1203{
1204 struct net_device *dev = (struct net_device *)priv;
1205
1206 if (evt == IPA_RECEIVE) {
1207 struct sk_buff *skb = (struct sk_buff *)data;
1208 int result;
1209 unsigned int packet_len = skb->len;
1210
1211 IPAWANDBG_LOW("Rx packet was received");
1212 skb->dev = IPA_NETDEV();
1213 skb->protocol = htons(ETH_P_MAP);
1214
1215 if (ipa3_rmnet_res.ipa_napi_enable) {
1216 trace_rmnet_ipa_netif_rcv_skb3(dev->stats.rx_packets);
1217 result = netif_receive_skb(skb);
1218 } else {
1219 if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
1220 == 0) {
1221 trace_rmnet_ipa_netifni3(dev->stats.rx_packets);
1222 result = netif_rx_ni(skb);
1223 } else {
1224 trace_rmnet_ipa_netifrx3(dev->stats.rx_packets);
1225 result = netif_rx(skb);
1226 }
1227 }
1228
1229 if (result) {
1230 pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
1231 __func__, __LINE__);
1232 dev->stats.rx_dropped++;
1233 }
1234 dev->stats.rx_packets++;
1235 dev->stats.rx_bytes += packet_len;
1236 } else if (evt == IPA_CLIENT_START_POLL)
1237 ipa3_rmnet_rx_cb(priv);
1238 else if (evt == IPA_CLIENT_COMP_NAPI) {
1239 if (ipa3_rmnet_res.ipa_napi_enable)
1240 napi_complete(&(rmnet_ipa3_ctx->wwan_priv->napi));
1241 } else
1242 IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
1243}
1244
1245/**
1246 * ipa3_wwan_ioctl() - I/O control for wwan network driver.
1247 *
1248 * @dev: network device
1249 * @ifr: ignored
1250 * @cmd: cmd to be excecuded. can be one of the following:
1251 * IPA_WWAN_IOCTL_OPEN - Open the network interface
1252 * IPA_WWAN_IOCTL_CLOSE - Close the network interface
1253 *
1254 * Return codes:
1255 * 0: success
1256 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1257 * later
1258 * -EFAULT: Error while transmitting the skb
1259 */
1260static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1261{
1262 int rc = 0;
1263 int mru = 1000, epid = 1, mux_index, len;
1264 struct ipa_msg_meta msg_meta;
1265 struct ipa_wan_msg *wan_msg = NULL;
1266 struct rmnet_ioctl_extended_s extend_ioctl_data;
1267 struct rmnet_ioctl_data_s ioctl_data;
1268 struct ipa3_rmnet_mux_val *mux_channel;
1269 int rmnet_index;
1270
1271 IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
1272 switch (cmd) {
1273 /* Set Ethernet protocol */
1274 case RMNET_IOCTL_SET_LLP_ETHERNET:
1275 break;
1276 /* Set RAWIP protocol */
1277 case RMNET_IOCTL_SET_LLP_IP:
1278 break;
1279 /* Get link protocol */
1280 case RMNET_IOCTL_GET_LLP:
1281 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1282 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1283 sizeof(struct rmnet_ioctl_data_s)))
1284 rc = -EFAULT;
1285 break;
1286 /* Set QoS header enabled */
1287 case RMNET_IOCTL_SET_QOS_ENABLE:
1288 return -EINVAL;
1289 /* Set QoS header disabled */
1290 case RMNET_IOCTL_SET_QOS_DISABLE:
1291 break;
1292 /* Get QoS header state */
1293 case RMNET_IOCTL_GET_QOS:
1294 ioctl_data.u.operation_mode = RMNET_MODE_NONE;
1295 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1296 sizeof(struct rmnet_ioctl_data_s)))
1297 rc = -EFAULT;
1298 break;
1299 /* Get operation mode */
1300 case RMNET_IOCTL_GET_OPMODE:
1301 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1302 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1303 sizeof(struct rmnet_ioctl_data_s)))
1304 rc = -EFAULT;
1305 break;
1306 /* Open transport port */
1307 case RMNET_IOCTL_OPEN:
1308 break;
1309 /* Close transport port */
1310 case RMNET_IOCTL_CLOSE:
1311 break;
1312 /* Flow enable */
1313 case RMNET_IOCTL_FLOW_ENABLE:
1314 IPAWANDBG("Received flow enable\n");
1315 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1316 sizeof(struct rmnet_ioctl_data_s))) {
1317 rc = -EFAULT;
1318 break;
1319 }
1320 ipa3_flow_control(IPA_CLIENT_USB_PROD, true,
1321 ioctl_data.u.tcm_handle);
1322 break;
1323 /* Flow disable */
1324 case RMNET_IOCTL_FLOW_DISABLE:
1325 IPAWANDBG("Received flow disable\n");
1326 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1327 sizeof(struct rmnet_ioctl_data_s))) {
1328 rc = -EFAULT;
1329 break;
1330 }
1331 ipa3_flow_control(IPA_CLIENT_USB_PROD, false,
1332 ioctl_data.u.tcm_handle);
1333 break;
1334 /* Set flow handle */
1335 case RMNET_IOCTL_FLOW_SET_HNDL:
1336 break;
1337
1338 /* Extended IOCTLs */
1339 case RMNET_IOCTL_EXTENDED:
1340 IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
1341 if (copy_from_user(&extend_ioctl_data,
1342 (u8 *)ifr->ifr_ifru.ifru_data,
1343 sizeof(struct rmnet_ioctl_extended_s))) {
1344 IPAWANERR("failed to copy extended ioctl data\n");
1345 rc = -EFAULT;
1346 break;
1347 }
1348 switch (extend_ioctl_data.extended_ioctl) {
1349 /* Get features */
1350 case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
1351 IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n");
1352 extend_ioctl_data.u.data =
1353 (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL |
1354 RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT |
1355 RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT);
1356 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1357 &extend_ioctl_data,
1358 sizeof(struct rmnet_ioctl_extended_s)))
1359 rc = -EFAULT;
1360 break;
1361 /* Set MRU */
1362 case RMNET_IOCTL_SET_MRU:
1363 mru = extend_ioctl_data.u.data;
1364 IPAWANDBG("get MRU size %d\n",
1365 extend_ioctl_data.u.data);
1366 break;
1367 /* Get MRU */
1368 case RMNET_IOCTL_GET_MRU:
1369 extend_ioctl_data.u.data = mru;
1370 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1371 &extend_ioctl_data,
1372 sizeof(struct rmnet_ioctl_extended_s)))
1373 rc = -EFAULT;
1374 break;
1375 /* GET SG support */
1376 case RMNET_IOCTL_GET_SG_SUPPORT:
1377 extend_ioctl_data.u.data =
1378 ipa3_rmnet_res.ipa_advertise_sg_support;
1379 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1380 &extend_ioctl_data,
1381 sizeof(struct rmnet_ioctl_extended_s)))
1382 rc = -EFAULT;
1383 break;
1384 /* Get endpoint ID */
1385 case RMNET_IOCTL_GET_EPID:
1386 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
1387 extend_ioctl_data.u.data = epid;
1388 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1389 &extend_ioctl_data,
1390 sizeof(struct rmnet_ioctl_extended_s)))
1391 rc = -EFAULT;
1392 if (copy_from_user(&extend_ioctl_data,
1393 (u8 *)ifr->ifr_ifru.ifru_data,
1394 sizeof(struct rmnet_ioctl_extended_s))) {
1395 IPAWANERR("copy extended ioctl data failed\n");
1396 rc = -EFAULT;
1397 break;
1398 }
1399 IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n",
1400 extend_ioctl_data.u.data);
1401 break;
1402 /* Endpoint pair */
1403 case RMNET_IOCTL_GET_EP_PAIR:
1404 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n");
1405 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
1406 ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
1407 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
1408 ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
1409 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1410 &extend_ioctl_data,
1411 sizeof(struct rmnet_ioctl_extended_s)))
1412 rc = -EFAULT;
1413 if (copy_from_user(&extend_ioctl_data,
1414 (u8 *)ifr->ifr_ifru.ifru_data,
1415 sizeof(struct rmnet_ioctl_extended_s))) {
1416 IPAWANERR("copy extended ioctl data failed\n");
1417 rc = -EFAULT;
1418 break;
1419 }
1420 IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n",
1421 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
1422 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
1423 break;
1424 /* Get driver name */
1425 case RMNET_IOCTL_GET_DRIVER_NAME:
1426 memcpy(&extend_ioctl_data.u.if_name,
1427 IPA_NETDEV()->name,
1428 sizeof(IFNAMSIZ));
1429 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1430 &extend_ioctl_data,
1431 sizeof(struct rmnet_ioctl_extended_s)))
1432 rc = -EFAULT;
1433 break;
1434 /* Add MUX ID */
1435 case RMNET_IOCTL_ADD_MUX_CHANNEL:
1436 mux_index = ipa3_find_mux_channel_index(
1437 extend_ioctl_data.u.rmnet_mux_val.mux_id);
1438 if (mux_index < MAX_NUM_OF_MUX_CHANNEL) {
1439 IPAWANDBG("already setup mux(%d)\n",
1440 extend_ioctl_data.u.
1441 rmnet_mux_val.mux_id);
1442 return rc;
1443 }
1444 if (rmnet_ipa3_ctx->rmnet_index
1445 >= MAX_NUM_OF_MUX_CHANNEL) {
1446 IPAWANERR("Exceed mux_channel limit(%d)\n",
1447 rmnet_ipa3_ctx->rmnet_index);
1448 return -EFAULT;
1449 }
1450 IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
1451 extend_ioctl_data.u.rmnet_mux_val.mux_id,
1452 extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
1453 /* cache the mux name and id */
1454 mux_channel = rmnet_ipa3_ctx->mux_channel;
1455 rmnet_index = rmnet_ipa3_ctx->rmnet_index;
1456
1457 mux_channel[rmnet_index].mux_id =
1458 extend_ioctl_data.u.rmnet_mux_val.mux_id;
1459 memcpy(mux_channel[rmnet_index].vchannel_name,
1460 extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
1461 sizeof(mux_channel[rmnet_index]
1462 .vchannel_name));
1463 IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
1464 mux_channel[rmnet_index].vchannel_name,
1465 mux_channel[rmnet_index].mux_id,
1466 rmnet_index);
1467 /* check if UL filter rules coming*/
1468 if (rmnet_ipa3_ctx->num_q6_rules != 0) {
1469 IPAWANERR("dev(%s) register to IPA\n",
1470 extend_ioctl_data.u.rmnet_mux_val.
1471 vchannel_name);
1472 rc = ipa3_wwan_register_to_ipa(
1473 rmnet_ipa3_ctx->rmnet_index);
1474 if (rc < 0) {
1475 IPAWANERR("device %s reg IPA failed\n",
1476 extend_ioctl_data.u.
1477 rmnet_mux_val.vchannel_name);
1478 return -ENODEV;
1479 }
1480 mux_channel[rmnet_index].mux_channel_set = true;
1481 mux_channel[rmnet_index].ul_flt_reg = true;
1482 } else {
1483 IPAWANDBG("dev(%s) haven't registered to IPA\n",
1484 extend_ioctl_data.u.
1485 rmnet_mux_val.vchannel_name);
1486 mux_channel[rmnet_index].mux_channel_set = true;
1487 mux_channel[rmnet_index].ul_flt_reg = false;
1488 }
1489 rmnet_ipa3_ctx->rmnet_index++;
1490 break;
1491 case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
1492 IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
1493 if ((extend_ioctl_data.u.data) &
1494 RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
1495 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
1496 ipa_ep_cfg.hdr.hdr_len = 8;
1497 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
1498 ipa_ep_cfg.cfg.cs_offload_en =
1499 IPA_ENABLE_CS_OFFLOAD_UL;
1500 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
1501 ipa_ep_cfg.cfg.cs_metadata_hdr_offset
1502 = 1;
1503 } else {
1504 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
1505 ipa_ep_cfg.hdr.hdr_len = 4;
1506 }
1507 if ((extend_ioctl_data.u.data) &
1508 RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
1509 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
1510 ipa_ep_cfg.aggr.aggr_en =
1511 IPA_ENABLE_AGGR;
1512 else
1513 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
1514 ipa_ep_cfg.aggr.aggr_en =
1515 IPA_BYPASS_AGGR;
1516 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
1517 hdr_ofst_metadata_valid = 1;
1518 /* modem want offset at 0! */
1519 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
1520 hdr_ofst_metadata = 0;
1521 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.
1522 dst = IPA_CLIENT_APPS_LAN_WAN_PROD;
1523 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.
1524 mode = IPA_BASIC;
1525
1526 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.client =
1527 IPA_CLIENT_APPS_LAN_WAN_PROD;
1528 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.notify =
1529 apps_ipa_tx_complete_notify;
1530 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.desc_fifo_sz =
1531 IPA_SYS_TX_DATA_DESC_FIFO_SZ;
1532 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.priv = dev;
1533
1534 rc = ipa3_setup_sys_pipe(
1535 &rmnet_ipa3_ctx->apps_to_ipa_ep_cfg,
1536 &rmnet_ipa3_ctx->apps_to_ipa3_hdl);
1537 if (rc)
1538 IPAWANERR("failed to config egress endpoint\n");
1539
1540 if (rmnet_ipa3_ctx->num_q6_rules != 0) {
1541 /* already got Q6 UL filter rules*/
1542 if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt
1543 == false)
1544 rc = ipa3_wwan_add_ul_flt_rule_to_ipa();
1545 else
1546 rc = 0;
1547 rmnet_ipa3_ctx->egress_set = true;
1548 if (rc)
1549 IPAWANERR("install UL rules failed\n");
1550 else
1551 rmnet_ipa3_ctx->a7_ul_flt_set = true;
1552 } else {
1553 /* wait Q6 UL filter rules*/
1554 rmnet_ipa3_ctx->egress_set = true;
1555 IPAWANDBG("no UL-rules, egress_set(%d)\n",
1556 rmnet_ipa3_ctx->egress_set);
1557 }
1558 break;
1559 case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */
1560 IPAWANDBG("get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
1561 if ((extend_ioctl_data.u.data) &
1562 RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
1563 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
1564 ipa_ep_cfg.cfg.cs_offload_en =
1565 IPA_ENABLE_CS_OFFLOAD_DL;
1566
1567 if ((extend_ioctl_data.u.data) &
1568 RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
1569 IPAWANERR("get AGG size %d count %d\n",
1570 extend_ioctl_data.u.
1571 ingress_format.agg_size,
1572 extend_ioctl_data.u.
1573 ingress_format.agg_count);
1574 if (!ipa_disable_apps_wan_cons_deaggr(
1575 extend_ioctl_data.u.
1576 ingress_format.agg_size,
1577 extend_ioctl_data.
1578 u.ingress_format.agg_count)) {
1579 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
1580 ipa_ep_cfg.aggr.aggr_byte_limit =
1581 extend_ioctl_data.u.ingress_format.
1582 agg_size;
1583 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
1584 ipa_ep_cfg.aggr.aggr_pkt_limit =
1585 extend_ioctl_data.u.ingress_format.
1586 agg_count;
1587 }
1588 }
1589
1590 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
1591 hdr_len = 4;
1592 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
1593 hdr_ofst_metadata_valid = 1;
1594 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.
1595 hdr.hdr_ofst_metadata = 1;
1596 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
1597 hdr_ofst_pkt_size_valid = 1;
1598 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
1599 hdr_ofst_pkt_size = 2;
1600
1601 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
1602 hdr_total_len_or_pad_valid = true;
1603 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
1604 hdr_total_len_or_pad = 0;
1605 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
1606 hdr_payload_len_inc_padding = true;
1607 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
1608 hdr_total_len_or_pad_offset = 0;
1609 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
1610 hdr_little_endian = 0;
1611 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.
1612 metadata_mask.metadata_mask = 0xFF000000;
1613
1614 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.client =
1615 IPA_CLIENT_APPS_WAN_CONS;
1616 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.notify =
1617 apps_ipa_packet_receive_notify;
1618 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.priv = dev;
1619
1620 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled =
1621 ipa3_rmnet_res.ipa_napi_enable;
1622 if (rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled)
1623 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
1624 desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
1625 else
1626 rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
1627 desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
1628
1629 mutex_lock(
1630 &rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
1631 if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
1632 IPAWANDBG("In SSR sequence/recovery\n");
1633 mutex_unlock(&rmnet_ipa3_ctx->
1634 ipa_to_apps_pipe_handle_guard);
1635 rc = -EFAULT;
1636 break;
1637 }
1638 rc = ipa3_setup_sys_pipe(
1639 &rmnet_ipa3_ctx->ipa_to_apps_ep_cfg,
1640 &rmnet_ipa3_ctx->ipa3_to_apps_hdl);
1641 mutex_unlock(&rmnet_ipa3_ctx->
1642 ipa_to_apps_pipe_handle_guard);
1643 if (rc)
1644 IPAWANERR("failed to configure ingress\n");
1645 break;
1646 case RMNET_IOCTL_SET_XLAT_DEV_INFO:
1647 wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
1648 GFP_KERNEL);
1649 if (!wan_msg) {
1650 IPAWANERR("Failed to allocate memory.\n");
1651 return -ENOMEM;
1652 }
1653 len = sizeof(wan_msg->upstream_ifname) >
1654 sizeof(extend_ioctl_data.u.if_name) ?
1655 sizeof(extend_ioctl_data.u.if_name) :
1656 sizeof(wan_msg->upstream_ifname);
1657 strlcpy(wan_msg->upstream_ifname,
1658 extend_ioctl_data.u.if_name, len);
1659 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
1660 msg_meta.msg_type = WAN_XLAT_CONNECT;
1661 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
1662 rc = ipa3_send_msg(&msg_meta, wan_msg,
1663 ipa3_wwan_msg_free_cb);
1664 if (rc) {
1665 IPAWANERR("Failed to send XLAT_CONNECT msg\n");
1666 kfree(wan_msg);
1667 }
1668 break;
1669 /* Get agg count */
1670 case RMNET_IOCTL_GET_AGGREGATION_COUNT:
1671 break;
1672 /* Set agg count */
1673 case RMNET_IOCTL_SET_AGGREGATION_COUNT:
1674 break;
1675 /* Get agg size */
1676 case RMNET_IOCTL_GET_AGGREGATION_SIZE:
1677 break;
1678 /* Set agg size */
1679 case RMNET_IOCTL_SET_AGGREGATION_SIZE:
1680 break;
1681 /* Do flow control */
1682 case RMNET_IOCTL_FLOW_CONTROL:
1683 break;
1684 /* For legacy use */
1685 case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL:
1686 break;
1687 /* Get HW/SW map */
1688 case RMNET_IOCTL_GET_HWSW_MAP:
1689 break;
1690 /* Set RX Headroom */
1691 case RMNET_IOCTL_SET_RX_HEADROOM:
1692 break;
1693 default:
1694 IPAWANERR("[%s] unsupported extended cmd[%d]",
1695 dev->name,
1696 extend_ioctl_data.extended_ioctl);
1697 rc = -EINVAL;
1698 }
1699 break;
1700 default:
1701 IPAWANERR("[%s] unsupported cmd[%d]",
1702 dev->name, cmd);
1703 rc = -EINVAL;
1704 }
1705 return rc;
1706}
1707
1708static const struct net_device_ops ipa3_wwan_ops_ip = {
1709 .ndo_open = ipa3_wwan_open,
1710 .ndo_stop = ipa3_wwan_stop,
1711 .ndo_start_xmit = ipa3_wwan_xmit,
1712 .ndo_tx_timeout = ipa3_wwan_tx_timeout,
1713 .ndo_do_ioctl = ipa3_wwan_ioctl,
1714 .ndo_change_mtu = ipa3_wwan_change_mtu,
1715 .ndo_set_mac_address = 0,
1716 .ndo_validate_addr = 0,
1717};
1718
1719/**
1720 * wwan_setup() - Setups the wwan network driver.
1721 *
1722 * @dev: network device
1723 *
1724 * Return codes:
1725 * None
1726 */
1727
1728static void ipa3_wwan_setup(struct net_device *dev)
1729{
1730 dev->netdev_ops = &ipa3_wwan_ops_ip;
1731 ether_setup(dev);
1732 /* set this after calling ether_setup */
1733 dev->header_ops = 0; /* No header */
1734 dev->type = ARPHRD_RAWIP;
1735 dev->hard_header_len = 0;
1736 dev->mtu = WWAN_DATA_LEN;
1737 dev->addr_len = 0;
1738 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
1739 dev->needed_headroom = HEADROOM_FOR_QMAP;
1740 dev->needed_tailroom = TAILROOM;
1741 dev->watchdog_timeo = 1000;
1742}
1743
1744/* IPA_RM related functions start*/
1745static void ipa3_q6_prod_rm_request_resource(struct work_struct *work);
1746static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_request,
1747 ipa3_q6_prod_rm_request_resource);
1748static void ipa3_q6_prod_rm_release_resource(struct work_struct *work);
1749static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_release,
1750 ipa3_q6_prod_rm_release_resource);
1751
1752static void ipa3_q6_prod_rm_request_resource(struct work_struct *work)
1753{
1754 int ret = 0;
1755
1756 ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
1757 if (ret < 0 && ret != -EINPROGRESS) {
1758 IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
1759 ret);
1760 return;
1761 }
1762}
1763
1764static int ipa3_q6_rm_request_resource(void)
1765{
1766 queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
1767 &ipa3_q6_con_rm_request, 0);
1768 return 0;
1769}
1770
1771static void ipa3_q6_prod_rm_release_resource(struct work_struct *work)
1772{
1773 int ret = 0;
1774
1775 ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
1776 if (ret < 0 && ret != -EINPROGRESS) {
1777 IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
1778 ret);
1779 return;
1780 }
1781}
1782
1783
1784static int ipa3_q6_rm_release_resource(void)
1785{
1786 queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
1787 &ipa3_q6_con_rm_release, 0);
1788 return 0;
1789}
1790
1791
1792static void ipa3_q6_rm_notify_cb(void *user_data,
1793 enum ipa_rm_event event,
1794 unsigned long data)
1795{
1796 switch (event) {
1797 case IPA_RM_RESOURCE_GRANTED:
1798 IPAWANDBG_LOW("%s: Q6_PROD GRANTED CB\n", __func__);
1799 break;
1800 case IPA_RM_RESOURCE_RELEASED:
1801 IPAWANDBG_LOW("%s: Q6_PROD RELEASED CB\n", __func__);
1802 break;
1803 default:
1804 return;
1805 }
1806}
1807static int ipa3_q6_initialize_rm(void)
1808{
1809 struct ipa_rm_create_params create_params;
1810 struct ipa_rm_perf_profile profile;
1811 int result;
1812
1813 /* Initialize IPA_RM workqueue */
1814 rmnet_ipa3_ctx->rm_q6_wq = create_singlethread_workqueue("clnt_req");
1815 if (!rmnet_ipa3_ctx->rm_q6_wq)
1816 return -ENOMEM;
1817
1818 memset(&create_params, 0, sizeof(create_params));
1819 create_params.name = IPA_RM_RESOURCE_Q6_PROD;
1820 create_params.reg_params.notify_cb = &ipa3_q6_rm_notify_cb;
1821 result = ipa_rm_create_resource(&create_params);
1822 if (result)
1823 goto create_rsrc_err1;
1824 memset(&create_params, 0, sizeof(create_params));
1825 create_params.name = IPA_RM_RESOURCE_Q6_CONS;
1826 create_params.release_resource = &ipa3_q6_rm_release_resource;
1827 create_params.request_resource = &ipa3_q6_rm_request_resource;
1828 result = ipa_rm_create_resource(&create_params);
1829 if (result)
1830 goto create_rsrc_err2;
1831 /* add dependency*/
1832 result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
1833 IPA_RM_RESOURCE_APPS_CONS);
1834 if (result)
1835 goto add_dpnd_err;
1836 /* setup Performance profile */
1837 memset(&profile, 0, sizeof(profile));
1838 profile.max_supported_bandwidth_mbps = 100;
1839 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
1840 &profile);
1841 if (result)
1842 goto set_perf_err;
1843 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
1844 &profile);
1845 if (result)
1846 goto set_perf_err;
1847 return result;
1848
1849set_perf_err:
1850 ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1851 IPA_RM_RESOURCE_APPS_CONS);
1852add_dpnd_err:
1853 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1854 if (result < 0)
1855 IPAWANERR("Error deleting resource %d, ret=%d\n",
1856 IPA_RM_RESOURCE_Q6_CONS, result);
1857create_rsrc_err2:
1858 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1859 if (result < 0)
1860 IPAWANERR("Error deleting resource %d, ret=%d\n",
1861 IPA_RM_RESOURCE_Q6_PROD, result);
1862create_rsrc_err1:
1863 destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
1864 return result;
1865}
1866
1867void ipa3_q6_deinitialize_rm(void)
1868{
1869 int ret;
1870
1871 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1872 IPA_RM_RESOURCE_APPS_CONS);
1873 if (ret < 0)
1874 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
1875 IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
1876 ret);
1877 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1878 if (ret < 0)
1879 IPAWANERR("Error deleting resource %d, ret=%d\n",
1880 IPA_RM_RESOURCE_Q6_CONS, ret);
1881 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1882 if (ret < 0)
1883 IPAWANERR("Error deleting resource %d, ret=%d\n",
1884 IPA_RM_RESOURCE_Q6_PROD, ret);
1885 destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
1886}
1887
1888static void ipa3_wake_tx_queue(struct work_struct *work)
1889{
1890 if (IPA_NETDEV()) {
1891 __netif_tx_lock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0));
1892 netif_wake_queue(IPA_NETDEV());
1893 __netif_tx_unlock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0));
1894 }
1895}
1896
1897/**
1898 * ipa3_rm_resource_granted() - Called upon
1899 * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
1900 *
1901 * @work: work object supplied ny workqueue
1902 *
1903 * Return codes:
1904 * None
1905 */
1906static void ipa3_rm_resource_granted(void *dev)
1907{
1908 IPAWANDBG_LOW("Resource Granted - starting queue\n");
1909 schedule_work(&ipa3_tx_wakequeue_work);
1910}
1911
1912/**
1913 * ipa3_rm_notify() - Callback function for RM events. Handles
1914 * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
1915 * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
1916 * workqueue.
1917 *
1918 * @dev: network device
1919 * @event: IPA RM event
1920 * @data: Additional data provided by IPA RM
1921 *
1922 * Return codes:
1923 * None
1924 */
1925static void ipa3_rm_notify(void *dev, enum ipa_rm_event event,
1926 unsigned long data)
1927{
1928 struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
1929
1930 pr_debug("%s: event %d\n", __func__, event);
1931 switch (event) {
1932 case IPA_RM_RESOURCE_GRANTED:
1933 if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
1934 complete_all(&wwan_ptr->resource_granted_completion);
1935 break;
1936 }
1937 ipa3_rm_resource_granted(dev);
1938 break;
1939 case IPA_RM_RESOURCE_RELEASED:
1940 break;
1941 default:
1942 pr_err("%s: unknown event %d\n", __func__, event);
1943 break;
1944 }
1945}
1946
1947/* IPA_RM related functions end*/
1948
1949static int ipa3_ssr_notifier_cb(struct notifier_block *this,
1950 unsigned long code,
1951 void *data);
1952
1953static struct notifier_block ipa3_ssr_notifier = {
1954 .notifier_call = ipa3_ssr_notifier_cb,
1955};
1956
1957static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
1958 struct ipa3_rmnet_plat_drv_res *ipa_rmnet_drv_res)
1959{
1960 ipa_rmnet_drv_res->ipa_rmnet_ssr =
1961 of_property_read_bool(pdev->dev.of_node,
1962 "qcom,rmnet-ipa-ssr");
1963 pr_info("IPA SSR support = %s\n",
1964 ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False");
1965 ipa_rmnet_drv_res->ipa_loaduC =
1966 of_property_read_bool(pdev->dev.of_node,
1967 "qcom,ipa-loaduC");
1968 pr_info("IPA ipa-loaduC = %s\n",
1969 ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
1970
1971 ipa_rmnet_drv_res->ipa_advertise_sg_support =
1972 of_property_read_bool(pdev->dev.of_node,
1973 "qcom,ipa-advertise-sg-support");
1974 pr_info("IPA SG support = %s\n",
1975 ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
1976 return 0;
1977}
1978
1979struct ipa3_rmnet_context ipa3_rmnet_ctx;
1980static int ipa3_wwan_probe(struct platform_device *pdev);
1981struct platform_device *m_pdev;
1982
1983static void ipa3_delayed_probe(struct work_struct *work)
1984{
1985 (void)ipa3_wwan_probe(m_pdev);
1986}
1987
1988static DECLARE_WORK(ipa3_scheduled_probe, ipa3_delayed_probe);
1989
1990static void ipa3_ready_cb(void *user_data)
1991{
1992 struct platform_device *pdev = (struct platform_device *)(user_data);
1993
1994 m_pdev = pdev;
1995
1996 IPAWANDBG("IPA ready callback has been triggered!\n");
1997
1998 schedule_work(&ipa3_scheduled_probe);
1999}
2000
2001/**
2002 * ipa3_wwan_probe() - Initialized the module and registers as a
2003 * network interface to the network stack
2004 *
2005 * Note: In case IPA driver hasn't initialized already, the probe function
2006 * will return immediately after registering a callback to be invoked when
2007 * IPA driver initialization is complete.
2008 *
2009 * Return codes:
2010 * 0: success
2011 * -ENOMEM: No memory available
2012 * -EFAULT: Internal error
2013 */
2014static int ipa3_wwan_probe(struct platform_device *pdev)
2015{
2016 int ret, i;
2017 struct net_device *dev;
2018 struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */
2019 struct ipa_rm_perf_profile profile; /* IPA_RM */
2020
2021 pr_info("rmnet_ipa3 started initialization\n");
2022
2023 if (!ipa3_is_ready()) {
2024 IPAWANDBG("IPA driver not ready, registering callback\n");
2025 ret = ipa_register_ipa_ready_cb(ipa3_ready_cb, (void *)pdev);
2026
2027 /*
2028 * If we received -EEXIST, IPA has initialized. So we need
2029 * to continue the probing process.
2030 */
2031 if (ret != -EEXIST) {
2032 if (ret)
2033 IPAWANERR("IPA CB reg failed - %d\n", ret);
2034 return ret;
2035 }
2036 }
2037
2038 ret = get_ipa_rmnet_dts_configuration(pdev, &ipa3_rmnet_res);
2039 ipa3_rmnet_ctx.ipa_rmnet_ssr = ipa3_rmnet_res.ipa_rmnet_ssr;
2040
2041 ret = ipa3_init_q6_smem();
2042 if (ret) {
2043 IPAWANERR("ipa3_init_q6_smem failed!\n");
2044 return ret;
2045 }
2046
2047 /* initialize tx/rx endpoint setup */
2048 memset(&rmnet_ipa3_ctx->apps_to_ipa_ep_cfg, 0,
2049 sizeof(struct ipa_sys_connect_params));
2050 memset(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg, 0,
2051 sizeof(struct ipa_sys_connect_params));
2052
2053 /* initialize ex property setup */
2054 rmnet_ipa3_ctx->num_q6_rules = 0;
2055 rmnet_ipa3_ctx->old_num_q6_rules = 0;
2056 rmnet_ipa3_ctx->rmnet_index = 0;
2057 rmnet_ipa3_ctx->egress_set = false;
2058 rmnet_ipa3_ctx->a7_ul_flt_set = false;
2059 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
2060 memset(&rmnet_ipa3_ctx->mux_channel[i], 0,
2061 sizeof(struct ipa3_rmnet_mux_val));
2062
2063 /* start A7 QMI service/client */
2064 if (ipa3_rmnet_res.ipa_loaduC)
2065 /* Android platform loads uC */
2066 ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
2067 else
2068 /* LE platform not loads uC */
2069 ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
2070
2071 /* construct default WAN RT tbl for IPACM */
2072 ret = ipa3_setup_a7_qmap_hdr();
2073 if (ret)
2074 goto setup_a7_qmap_hdr_err;
2075 ret = ipa3_setup_dflt_wan_rt_tables();
2076 if (ret)
2077 goto setup_dflt_wan_rt_tables_err;
2078
2079 if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
2080 /* Start transport-driver fd ioctl for ipacm for first init */
2081 ret = ipa3_wan_ioctl_init();
2082 if (ret)
2083 goto wan_ioctl_init_err;
2084 } else {
2085 /* Enable sending QMI messages after SSR */
2086 ipa3_wan_ioctl_enable_qmi_messages();
2087 }
2088
2089 /* initialize wan-driver netdev */
2090 dev = alloc_netdev(sizeof(struct ipa3_wwan_private),
2091 IPA_WWAN_DEV_NAME,
2092 NET_NAME_UNKNOWN,
2093 ipa3_wwan_setup);
2094 if (!dev) {
2095 IPAWANERR("no memory for netdev\n");
2096 ret = -ENOMEM;
2097 goto alloc_netdev_err;
2098 }
2099 rmnet_ipa3_ctx->wwan_priv = netdev_priv(dev);
2100 memset(rmnet_ipa3_ctx->wwan_priv, 0,
2101 sizeof(*(rmnet_ipa3_ctx->wwan_priv)));
2102 IPAWANDBG("wwan_ptr (private) = %p", rmnet_ipa3_ctx->wwan_priv);
2103 rmnet_ipa3_ctx->wwan_priv->net = dev;
2104 rmnet_ipa3_ctx->wwan_priv->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
2105 rmnet_ipa3_ctx->wwan_priv->outstanding_low = DEFAULT_OUTSTANDING_LOW;
2106 atomic_set(&rmnet_ipa3_ctx->wwan_priv->outstanding_pkts, 0);
2107 spin_lock_init(&rmnet_ipa3_ctx->wwan_priv->lock);
2108 init_completion(
2109 &rmnet_ipa3_ctx->wwan_priv->resource_granted_completion);
2110
2111 if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
2112 /* IPA_RM configuration starts */
2113 ret = ipa3_q6_initialize_rm();
2114 if (ret) {
2115 IPAWANERR("%s: ipa3_q6_initialize_rm failed, ret: %d\n",
2116 __func__, ret);
2117 goto q6_init_err;
2118 }
2119 }
2120
2121 memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
2122 ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
2123 ipa_rm_params.reg_params.user_data = dev;
2124 ipa_rm_params.reg_params.notify_cb = ipa3_rm_notify;
2125 ret = ipa_rm_create_resource(&ipa_rm_params);
2126 if (ret) {
2127 pr_err("%s: unable to create resourse %d in IPA RM\n",
2128 __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
2129 goto create_rsrc_err;
2130 }
2131 ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
2132 IPA_RM_INACTIVITY_TIMER);
2133 if (ret) {
2134 pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
2135 __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
2136 goto timer_init_err;
2137 }
2138 /* add dependency */
2139 ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2140 IPA_RM_RESOURCE_Q6_CONS);
2141 if (ret)
2142 goto add_dpnd_err;
2143 /* setup Performance profile */
2144 memset(&profile, 0, sizeof(profile));
2145 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
2146 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
2147 &profile);
2148 if (ret)
2149 goto set_perf_err;
2150 /* IPA_RM configuration ends */
2151
2152 /* Enable SG support in netdevice. */
2153 if (ipa3_rmnet_res.ipa_advertise_sg_support)
2154 dev->hw_features |= NETIF_F_SG;
2155
2156 if (ipa3_rmnet_res.ipa_napi_enable)
2157 netif_napi_add(dev, &(rmnet_ipa3_ctx->wwan_priv->napi),
2158 ipa3_rmnet_poll, NAPI_WEIGHT);
2159 ret = register_netdev(dev);
2160 if (ret) {
2161 IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
2162 0, ret);
2163 goto set_perf_err;
2164 }
2165
2166 IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n", dev->name);
2167 if (ret) {
2168 IPAWANERR("default configuration failed rc=%d\n",
2169 ret);
2170 goto config_err;
2171 }
2172 atomic_set(&rmnet_ipa3_ctx->is_initialized, 1);
2173 if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
2174 /* offline charging mode */
2175 ipa3_proxy_clk_unvote();
2176 }
2177 atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
2178
2179 pr_info("rmnet_ipa completed initialization\n");
2180 return 0;
2181config_err:
2182 if (ipa3_rmnet_res.ipa_napi_enable)
2183 netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
2184 unregister_netdev(dev);
2185set_perf_err:
2186 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2187 IPA_RM_RESOURCE_Q6_CONS);
2188 if (ret)
2189 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2190 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2191 ret);
2192add_dpnd_err:
2193 ret = ipa_rm_inactivity_timer_destroy(
2194 IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
2195 if (ret)
2196 IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
2197 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2198timer_init_err:
2199 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2200 if (ret)
2201 IPAWANERR("Error deleting resource %d, ret=%d\n",
2202 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2203create_rsrc_err:
2204 ipa3_q6_deinitialize_rm();
2205q6_init_err:
2206 free_netdev(dev);
2207 rmnet_ipa3_ctx->wwan_priv = NULL;
2208alloc_netdev_err:
2209 ipa3_wan_ioctl_deinit();
2210wan_ioctl_init_err:
2211 ipa3_del_dflt_wan_rt_tables();
2212setup_dflt_wan_rt_tables_err:
2213 ipa3_del_a7_qmap_hdr();
2214setup_a7_qmap_hdr_err:
2215 ipa3_qmi_service_exit();
2216 atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
2217 return ret;
2218}
2219
2220static int ipa3_wwan_remove(struct platform_device *pdev)
2221{
2222 int ret;
2223
2224 pr_info("rmnet_ipa started deinitialization\n");
2225 mutex_lock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
2226 ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->ipa3_to_apps_hdl);
2227 if (ret < 0)
2228 IPAWANERR("Failed to teardown IPA->APPS pipe\n");
2229 else
2230 rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
2231 if (ipa3_rmnet_res.ipa_napi_enable)
2232 netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
2233 mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
2234 unregister_netdev(IPA_NETDEV());
2235 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2236 IPA_RM_RESOURCE_Q6_CONS);
2237 if (ret < 0)
2238 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2239 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2240 ret);
2241 ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
2242 if (ret < 0)
2243 IPAWANERR(
2244 "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
2245 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2246 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2247 if (ret < 0)
2248 IPAWANERR("Error deleting resource %d, ret=%d\n",
2249 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2250 cancel_work_sync(&ipa3_tx_wakequeue_work);
2251 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2252 if (IPA_NETDEV())
2253 free_netdev(IPA_NETDEV());
2254 rmnet_ipa3_ctx->wwan_priv = NULL;
2255 /* No need to remove wwan_ioctl during SSR */
2256 if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
2257 ipa3_wan_ioctl_deinit();
2258 ipa3_del_dflt_wan_rt_tables();
2259 ipa3_del_a7_qmap_hdr();
2260 ipa3_del_mux_qmap_hdrs();
2261 if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false)
2262 ipa3_wwan_del_ul_flt_rule_to_ipa();
2263 ipa3_cleanup_deregister_intf();
2264 atomic_set(&rmnet_ipa3_ctx->is_initialized, 0);
2265 pr_info("rmnet_ipa completed deinitialization\n");
2266 return 0;
2267}
2268
2269/**
2270* rmnet_ipa_ap_suspend() - suspend callback for runtime_pm
2271* @dev: pointer to device
2272*
2273* This callback will be invoked by the runtime_pm framework when an AP suspend
2274* operation is invoked, usually by pressing a suspend button.
2275*
2276* Returns -EAGAIN to runtime_pm framework in case there are pending packets
2277* in the Tx queue. This will postpone the suspend operation until all the
2278* pending packets will be transmitted.
2279*
2280* In case there are no packets to send, releases the WWAN0_PROD entity.
2281* As an outcome, the number of IPA active clients should be decremented
2282* until IPA clocks can be gated.
2283*/
2284static int rmnet_ipa_ap_suspend(struct device *dev)
2285{
2286 struct net_device *netdev = IPA_NETDEV();
2287 struct ipa3_wwan_private *wwan_ptr;
2288
2289 IPAWANDBG_LOW("Enter...\n");
2290 if (netdev == NULL) {
2291 IPAWANERR("netdev is NULL.\n");
2292 return 0;
2293 }
2294
2295 wwan_ptr = netdev_priv(netdev);
2296 if (wwan_ptr == NULL) {
2297 IPAWANERR("wwan_ptr is NULL.\n");
2298 return 0;
2299 }
2300
2301 /* Do not allow A7 to suspend in case there are oustanding packets */
2302 if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
2303 IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
2304 return -EAGAIN;
2305 }
2306
2307 /* Make sure that there is no Tx operation ongoing */
2308 netif_tx_lock_bh(netdev);
2309 ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2310 netif_tx_unlock_bh(netdev);
2311 IPAWANDBG_LOW("Exit\n");
2312
2313 return 0;
2314}
2315
2316/**
2317* rmnet_ipa_ap_resume() - resume callback for runtime_pm
2318* @dev: pointer to device
2319*
2320* This callback will be invoked by the runtime_pm framework when an AP resume
2321* operation is invoked.
2322*
2323* Enables the network interface queue and returns success to the
2324* runtime_pm framework.
2325*/
2326static int rmnet_ipa_ap_resume(struct device *dev)
2327{
2328 struct net_device *netdev = IPA_NETDEV();
2329
2330 IPAWANDBG_LOW("Enter...\n");
2331 if (netdev)
2332 netif_wake_queue(netdev);
2333 IPAWANDBG_LOW("Exit\n");
2334
2335 return 0;
2336}
2337
2338static void ipa_stop_polling_stats(void)
2339{
2340 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2341 ipa3_rmnet_ctx.polling_interval = 0;
2342}
2343
2344static const struct of_device_id rmnet_ipa_dt_match[] = {
2345 {.compatible = "qcom,rmnet-ipa3"},
2346 {},
2347};
2348MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
2349
2350static const struct dev_pm_ops rmnet_ipa_pm_ops = {
2351 .suspend_noirq = rmnet_ipa_ap_suspend,
2352 .resume_noirq = rmnet_ipa_ap_resume,
2353};
2354
2355static struct platform_driver rmnet_ipa_driver = {
2356 .driver = {
2357 .name = "rmnet_ipa3",
2358 .owner = THIS_MODULE,
2359 .pm = &rmnet_ipa_pm_ops,
2360 .of_match_table = rmnet_ipa_dt_match,
2361 },
2362 .probe = ipa3_wwan_probe,
2363 .remove = ipa3_wwan_remove,
2364};
2365
2366static int ipa3_ssr_notifier_cb(struct notifier_block *this,
2367 unsigned long code,
2368 void *data)
2369{
2370 if (!ipa3_rmnet_ctx.ipa_rmnet_ssr)
2371 return NOTIFY_DONE;
2372
2373 switch (code) {
2374 case SUBSYS_BEFORE_SHUTDOWN:
2375 IPAWANINFO("IPA received MPSS BEFORE_SHUTDOWN\n");
2376 atomic_set(&rmnet_ipa3_ctx->is_ssr, 1);
2377 ipa3_q6_pre_shutdown_cleanup();
2378 if (IPA_NETDEV())
2379 netif_stop_queue(IPA_NETDEV());
2380 ipa3_qmi_stop_workqueues();
2381 ipa3_wan_ioctl_stop_qmi_messages();
2382 ipa_stop_polling_stats();
2383 if (atomic_read(&rmnet_ipa3_ctx->is_initialized))
2384 platform_driver_unregister(&rmnet_ipa_driver);
2385 IPAWANINFO("IPA BEFORE_SHUTDOWN handling is complete\n");
2386 break;
2387 case SUBSYS_AFTER_SHUTDOWN:
2388 IPAWANINFO("IPA Received MPSS AFTER_SHUTDOWN\n");
2389 if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
2390 ipa3_q6_post_shutdown_cleanup();
2391 IPAWANINFO("IPA AFTER_SHUTDOWN handling is complete\n");
2392 break;
2393 case SUBSYS_BEFORE_POWERUP:
2394 IPAWANINFO("IPA received MPSS BEFORE_POWERUP\n");
2395 if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
2396 /* clean up cached QMI msg/handlers */
2397 ipa3_qmi_service_exit();
2398 /*hold a proxy vote for the modem*/
2399 ipa3_proxy_clk_vote();
2400 IPAWANINFO("IPA BEFORE_POWERUP handling is complete\n");
2401 break;
2402 case SUBSYS_AFTER_POWERUP:
2403 IPAWANINFO("%s:%d IPA received MPSS AFTER_POWERUP\n",
2404 __func__, __LINE__);
2405 if (!atomic_read(&rmnet_ipa3_ctx->is_initialized) &&
2406 atomic_read(&rmnet_ipa3_ctx->is_ssr))
2407 platform_driver_register(&rmnet_ipa_driver);
2408
2409 IPAWANINFO("IPA AFTER_POWERUP handling is complete\n");
2410 break;
2411 default:
2412 IPAWANDBG("Unsupported subsys notification, IPA received: %lu",
2413 code);
2414 break;
2415 }
2416
2417 IPAWANDBG_LOW("Exit\n");
2418 return NOTIFY_DONE;
2419}
2420
2421/**
2422 * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa_send_msg
2423 * @buff: pointer to buffer containing the message
2424 * @len: message len
2425 * @type: message type
2426 *
2427 * This function is invoked when ipa_send_msg is complete (Provided as a
2428 * free function pointer along with the message).
2429 */
2430static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
2431{
2432 if (!buff) {
2433 IPAWANERR("Null buffer\n");
2434 return;
2435 }
2436
2437 if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
2438 type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
2439 IPAWANERR("Wrong type given. buff %p type %d\n",
2440 buff, type);
2441 }
2442 kfree(buff);
2443}
2444
2445/**
2446 * rmnet_ipa_get_stats_and_update() - Gets pipe stats from Modem
2447 *
2448 * This function queries the IPA Modem driver for the pipe stats
2449 * via QMI, and updates the user space IPA entity.
2450 */
2451static void rmnet_ipa_get_stats_and_update(void)
2452{
2453 struct ipa_get_data_stats_req_msg_v01 req;
2454 struct ipa_get_data_stats_resp_msg_v01 *resp;
2455 struct ipa_msg_meta msg_meta;
2456 int rc;
2457
2458 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2459 GFP_KERNEL);
2460 if (!resp) {
2461 IPAWANERR("Can't allocate memory for stats message\n");
2462 return;
2463 }
2464
2465 memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2466 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2467
2468 req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2469
2470 rc = ipa3_qmi_get_data_stats(&req, resp);
2471
2472 if (!rc) {
2473 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2474 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
2475 msg_meta.msg_len =
2476 sizeof(struct ipa_get_data_stats_resp_msg_v01);
2477 rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2478 if (rc) {
2479 IPAWANERR("ipa_send_msg failed: %d\n", rc);
2480 kfree(resp);
2481 return;
2482 }
2483 }
2484}
2485
2486/**
2487 * tethering_stats_poll_queue() - Stats polling function
2488 * @work - Work entry
2489 *
2490 * This function is scheduled periodically (per the interval) in
2491 * order to poll the IPA Modem driver for the pipe stats.
2492 */
2493static void tethering_stats_poll_queue(struct work_struct *work)
2494{
2495 rmnet_ipa_get_stats_and_update();
2496
2497 /* Schedule again only if there's an active polling interval */
2498 if (ipa3_rmnet_ctx.polling_interval != 0)
2499 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work,
2500 msecs_to_jiffies(ipa3_rmnet_ctx.polling_interval*1000));
2501}
2502
2503/**
2504 * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem
2505 *
2506 * This function retrieves the data usage (used quota) from the IPA Modem driver
2507 * via QMI, and updates IPA user space entity.
2508 */
2509static void rmnet_ipa_get_network_stats_and_update(void)
2510{
2511 struct ipa_get_apn_data_stats_req_msg_v01 req;
2512 struct ipa_get_apn_data_stats_resp_msg_v01 *resp;
2513 struct ipa_msg_meta msg_meta;
2514 int rc;
2515
2516 resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
2517 GFP_KERNEL);
2518 if (!resp) {
2519 IPAWANERR("Can't allocate memory for network stats message\n");
2520 return;
2521 }
2522
2523 memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01));
2524 memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01));
2525
2526 req.mux_id_list_valid = true;
2527 req.mux_id_list_len = 1;
2528 req.mux_id_list[0] = ipa3_rmnet_ctx.metered_mux_id;
2529
2530 rc = ipa3_qmi_get_network_stats(&req, resp);
2531
2532 if (!rc) {
2533 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2534 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
2535 msg_meta.msg_len =
2536 sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
2537 rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2538 if (rc) {
2539 IPAWANERR("ipa_send_msg failed: %d\n", rc);
2540 kfree(resp);
2541 return;
2542 }
2543 }
2544}
2545
2546/**
2547 * rmnet_ipa3_poll_tethering_stats() - Tethering stats polling IOCTL handler
2548 * @data - IOCTL data
2549 *
2550 * This function handles WAN_IOC_POLL_TETHERING_STATS.
2551 * In case polling interval received is 0, polling will stop
2552 * (If there's a polling in progress, it will allow it to finish), and then will
2553 * fetch network stats, and update the IPA user space.
2554 *
2555 * Return codes:
2556 * 0: Success
2557 */
2558int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
2559{
2560 ipa3_rmnet_ctx.polling_interval = data->polling_interval_secs;
2561
2562 cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work);
2563
2564 if (ipa3_rmnet_ctx.polling_interval == 0) {
2565 ipa3_qmi_stop_data_qouta();
2566 rmnet_ipa_get_network_stats_and_update();
2567 rmnet_ipa_get_stats_and_update();
2568 return 0;
2569 }
2570
2571 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0);
2572 return 0;
2573}
2574
2575/**
2576 * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
2577 * @data - IOCTL data
2578 *
2579 * This function handles WAN_IOC_SET_DATA_QUOTA.
2580 * It translates the given interface name to the Modem MUX ID and
2581 * sends the request of the quota to the IPA Modem driver via QMI.
2582 *
2583 * Return codes:
2584 * 0: Success
2585 * -EFAULT: Invalid interface name provided
2586 * other: See ipa_qmi_set_data_quota
2587 */
2588int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
2589{
2590 u32 mux_id;
2591 int index;
2592 struct ipa_set_data_usage_quota_req_msg_v01 req;
2593
2594 index = find_vchannel_name_index(data->interface_name);
2595 IPAWANERR("iface name %s, quota %lu\n",
2596 data->interface_name,
2597 (unsigned long int) data->quota_mbytes);
2598
2599 if (index == MAX_NUM_OF_MUX_CHANNEL) {
2600 IPAWANERR("%s is an invalid iface name\n",
2601 data->interface_name);
2602 return -EFAULT;
2603 }
2604
2605 mux_id = rmnet_ipa3_ctx->mux_channel[index].mux_id;
2606 ipa3_rmnet_ctx.metered_mux_id = mux_id;
2607
2608 memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
2609 req.apn_quota_list_valid = true;
2610 req.apn_quota_list_len = 1;
2611 req.apn_quota_list[0].mux_id = mux_id;
2612 req.apn_quota_list[0].num_Mbytes = data->quota_mbytes;
2613
2614 return ipa3_qmi_set_data_quota(&req);
2615}
2616
2617 /* rmnet_ipa_set_tether_client_pipe() -
2618 * @data - IOCTL data
2619 *
2620 * This function handles WAN_IOC_SET_DATA_QUOTA.
2621 * It translates the given interface name to the Modem MUX ID and
2622 * sends the request of the quota to the IPA Modem driver via QMI.
2623 *
2624 * Return codes:
2625 * 0: Success
2626 * -EFAULT: Invalid interface name provided
2627 * other: See ipa_qmi_set_data_quota
2628 */
2629int rmnet_ipa3_set_tether_client_pipe(
2630 struct wan_ioctl_set_tether_client_pipe *data)
2631{
2632 int number, i;
2633
2634 IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
2635 data->ipa_client,
2636 data->ul_src_pipe_len,
2637 data->dl_dst_pipe_len,
2638 data->reset_client);
2639 number = data->ul_src_pipe_len;
2640 for (i = 0; i < number; i++) {
2641 IPAWANDBG("UL index-%d pipe %d\n", i,
2642 data->ul_src_pipe_list[i]);
2643 if (data->reset_client)
2644 ipa3_set_client(data->ul_src_pipe_list[i],
2645 0, false);
2646 else
2647 ipa3_set_client(data->ul_src_pipe_list[i],
2648 data->ipa_client, true);
2649 }
2650 number = data->dl_dst_pipe_len;
2651 for (i = 0; i < number; i++) {
2652 IPAWANDBG("DL index-%d pipe %d\n", i,
2653 data->dl_dst_pipe_list[i]);
2654 if (data->reset_client)
2655 ipa3_set_client(data->dl_dst_pipe_list[i],
2656 0, false);
2657 else
2658 ipa3_set_client(data->dl_dst_pipe_list[i],
2659 data->ipa_client, false);
2660 }
2661 return 0;
2662}
2663
2664int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
2665 bool reset)
2666{
2667 struct ipa_get_data_stats_req_msg_v01 *req;
2668 struct ipa_get_data_stats_resp_msg_v01 *resp;
2669 int pipe_len, rc;
2670
2671 req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
2672 GFP_KERNEL);
2673 if (!req) {
2674 IPAWANERR("Can't allocate memory for stats message\n");
2675 return -ENOMEM;
2676 }
2677 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2678 GFP_KERNEL);
2679 if (!resp) {
2680 IPAWANERR("Can't allocate memory for stats message\n");
2681 kfree(req);
2682 return -ENOMEM;
2683 }
2684 memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2685 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2686
2687 req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2688 if (reset) {
2689 req->reset_stats_valid = true;
2690 req->reset_stats = true;
2691 IPAWANERR("reset the pipe stats\n");
2692 } else {
2693 /* print tethered-client enum */
2694 IPAWANDBG_LOW("Tethered-client enum(%d)\n", data->ipa_client);
2695 }
2696
2697 rc = ipa3_qmi_get_data_stats(req, resp);
2698 if (rc) {
2699 IPAWANERR("can't get ipa_qmi_get_data_stats\n");
2700 kfree(req);
2701 kfree(resp);
2702 return rc;
2703 } else if (reset) {
2704 kfree(req);
2705 kfree(resp);
2706 return 0;
2707 }
2708
2709 if (resp->dl_dst_pipe_stats_list_valid) {
2710 for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len;
2711 pipe_len++) {
2712 IPAWANDBG_LOW("Check entry(%d) dl_dst_pipe(%d)\n",
2713 pipe_len, resp->dl_dst_pipe_stats_list
2714 [pipe_len].pipe_index);
2715 IPAWANDBG_LOW("dl_p_v4(%lu)v6(%lu)\n",
2716 (unsigned long int) resp->
2717 dl_dst_pipe_stats_list[pipe_len].
2718 num_ipv4_packets,
2719 (unsigned long int) resp->
2720 dl_dst_pipe_stats_list[pipe_len].
2721 num_ipv6_packets);
2722 IPAWANDBG_LOW("dl_b_v4(%lu)v6(%lu)\n",
2723 (unsigned long int) resp->
2724 dl_dst_pipe_stats_list[pipe_len].
2725 num_ipv4_bytes,
2726 (unsigned long int) resp->
2727 dl_dst_pipe_stats_list[pipe_len].
2728 num_ipv6_bytes);
2729 if (ipa_get_client_uplink(resp->
2730 dl_dst_pipe_stats_list[pipe_len].
2731 pipe_index) == false) {
2732 if (data->ipa_client == ipa_get_client(resp->
2733 dl_dst_pipe_stats_list[pipe_len].
2734 pipe_index)) {
2735 /* update the DL stats */
2736 data->ipv4_rx_packets += resp->
2737 dl_dst_pipe_stats_list[pipe_len].
2738 num_ipv4_packets;
2739 data->ipv6_rx_packets += resp->
2740 dl_dst_pipe_stats_list[pipe_len].
2741 num_ipv6_packets;
2742 data->ipv4_rx_bytes += resp->
2743 dl_dst_pipe_stats_list[pipe_len].
2744 num_ipv4_bytes;
2745 data->ipv6_rx_bytes += resp->
2746 dl_dst_pipe_stats_list[pipe_len].
2747 num_ipv6_bytes;
2748 }
2749 }
2750 }
2751 }
2752 IPAWANDBG_LOW("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2753 (unsigned long int) data->ipv4_rx_packets,
2754 (unsigned long int) data->ipv6_rx_packets,
2755 (unsigned long int) data->ipv4_rx_bytes,
2756 (unsigned long int) data->ipv6_rx_bytes);
2757
2758 if (resp->ul_src_pipe_stats_list_valid) {
2759 for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len;
2760 pipe_len++) {
2761 IPAWANDBG_LOW("Check entry(%d) ul_dst_pipe(%d)\n",
2762 pipe_len,
2763 resp->ul_src_pipe_stats_list[pipe_len].
2764 pipe_index);
2765 IPAWANDBG_LOW("ul_p_v4(%lu)v6(%lu)\n",
2766 (unsigned long int) resp->
2767 ul_src_pipe_stats_list[pipe_len].
2768 num_ipv4_packets,
2769 (unsigned long int) resp->
2770 ul_src_pipe_stats_list[pipe_len].
2771 num_ipv6_packets);
2772 IPAWANDBG_LOW("ul_b_v4(%lu)v6(%lu)\n",
2773 (unsigned long int) resp->
2774 ul_src_pipe_stats_list[pipe_len].
2775 num_ipv4_bytes,
2776 (unsigned long int) resp->
2777 ul_src_pipe_stats_list[pipe_len].
2778 num_ipv6_bytes);
2779 if (ipa_get_client_uplink(resp->
2780 ul_src_pipe_stats_list[pipe_len].
2781 pipe_index) == true) {
2782 if (data->ipa_client == ipa_get_client(resp->
2783 ul_src_pipe_stats_list[pipe_len].
2784 pipe_index)) {
2785 /* update the DL stats */
2786 data->ipv4_tx_packets += resp->
2787 ul_src_pipe_stats_list[pipe_len].
2788 num_ipv4_packets;
2789 data->ipv6_tx_packets += resp->
2790 ul_src_pipe_stats_list[pipe_len].
2791 num_ipv6_packets;
2792 data->ipv4_tx_bytes += resp->
2793 ul_src_pipe_stats_list[pipe_len].
2794 num_ipv4_bytes;
2795 data->ipv6_tx_bytes += resp->
2796 ul_src_pipe_stats_list[pipe_len].
2797 num_ipv6_bytes;
2798 }
2799 }
2800 }
2801 }
2802 IPAWANDBG_LOW("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2803 (unsigned long int) data->ipv4_tx_packets,
2804 (unsigned long int) data->ipv6_tx_packets,
2805 (unsigned long int) data->ipv4_tx_bytes,
2806 (unsigned long int) data->ipv6_tx_bytes);
2807 kfree(req);
2808 kfree(resp);
2809 return 0;
2810}
2811
2812/**
2813 * ipa3_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
2814 * @mux_id - The MUX ID on which the quota has been reached
2815 *
2816 * This function broadcasts a Netlink event using the kobject of the
2817 * rmnet_ipa interface in order to alert the user space that the quota
2818 * on the specific interface which matches the mux_id has been reached.
2819 *
2820 */
2821void ipa3_broadcast_quota_reach_ind(u32 mux_id)
2822{
2823 char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
2824 char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
2825 char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
2826 char *envp[IPA_UEVENT_NUM_EVNP] = {
2827 alert_msg, iface_name_l, iface_name_m, NULL };
2828 int res;
2829 int index;
2830
2831 index = ipa3_find_mux_channel_index(mux_id);
2832
2833 if (index == MAX_NUM_OF_MUX_CHANNEL) {
2834 IPAWANERR("%u is an mux ID\n", mux_id);
2835 return;
2836 }
2837
2838 res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
2839 "ALERT_NAME=%s", "quotaReachedAlert");
2840 if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
2841 IPAWANERR("message too long (%d)", res);
2842 return;
2843 }
2844 /* posting msg for L-release for CNE */
2845 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
2846 "UPSTREAM=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
2847 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
2848 IPAWANERR("message too long (%d)", res);
2849 return;
2850 }
2851 /* posting msg for M-release for CNE */
2852 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
2853 "INTERFACE=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
2854 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
2855 IPAWANERR("message too long (%d)", res);
2856 return;
2857 }
2858
2859 IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
2860 alert_msg, iface_name_l, iface_name_m);
2861 kobject_uevent_env(&(IPA_NETDEV()->dev.kobj),
2862 KOBJ_CHANGE, envp);
2863}
2864
2865/**
2866 * ipa3_q6_handshake_complete() - Perform operations once Q6 is up
2867 * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR.
2868 *
2869 * This function is invoked once the handshake between the IPA AP driver
2870 * and IPA Q6 driver is complete. At this point, it is possible to perform
2871 * operations which can't be performed until IPA Q6 driver is up.
2872 *
2873 */
2874void ipa3_q6_handshake_complete(bool ssr_bootup)
2875{
2876 /* It is required to recover the network stats after SSR recovery */
2877 if (ssr_bootup) {
2878 /*
2879 * In case the uC is required to be loaded by the Modem,
2880 * the proxy vote will be removed only when uC loading is
2881 * complete and indication is received by the AP. After SSR,
2882 * uC is already loaded. Therefore, proxy vote can be removed
2883 * once Modem init is complete.
2884 */
2885 ipa3_proxy_clk_unvote();
2886
2887 /*
2888 * It is required to recover the network stats after
2889 * SSR recovery
2890 */
2891 rmnet_ipa_get_network_stats_and_update();
2892 }
2893}
2894
2895static int __init ipa3_wwan_init(void)
2896{
2897 rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL);
2898 if (!rmnet_ipa3_ctx) {
2899 IPAWANERR("no memory\n");
2900 return -ENOMEM;
2901 }
2902
2903 atomic_set(&rmnet_ipa3_ctx->is_initialized, 0);
2904 atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
2905
2906 mutex_init(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
2907 rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
2908 /* Register for Modem SSR */
2909 rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier(
2910 SUBSYS_MODEM,
2911 &ipa3_ssr_notifier);
2912 if (!IS_ERR(rmnet_ipa3_ctx->subsys_notify_handle))
2913 return platform_driver_register(&rmnet_ipa_driver);
2914 else
2915 return (int)PTR_ERR(rmnet_ipa3_ctx->subsys_notify_handle);
2916}
2917
2918static void __exit ipa3_wwan_cleanup(void)
2919{
2920 int ret;
2921
2922 mutex_destroy(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
2923 ret = subsys_notif_unregister_notifier(
2924 rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
2925 if (ret)
2926 IPAWANERR(
2927 "Error subsys_notif_unregister_notifier system %s, ret=%d\n",
2928 SUBSYS_MODEM, ret);
2929 platform_driver_unregister(&rmnet_ipa_driver);
2930 kfree(rmnet_ipa3_ctx);
2931 rmnet_ipa3_ctx = NULL;
2932}
2933
2934static void ipa3_wwan_msg_free_cb(void *buff, u32 len, u32 type)
2935{
2936 if (!buff)
2937 IPAWANERR("Null buffer.\n");
2938 kfree(buff);
2939}
2940
2941static void ipa3_rmnet_rx_cb(void *priv)
2942{
2943 IPAWANDBG_LOW("\n");
2944 napi_schedule(&(rmnet_ipa3_ctx->wwan_priv->napi));
2945}
2946
2947static int ipa3_rmnet_poll(struct napi_struct *napi, int budget)
2948{
2949 int rcvd_pkts = 0;
2950
2951 rcvd_pkts = ipa_rx_poll(rmnet_ipa3_ctx->ipa3_to_apps_hdl,
2952 NAPI_WEIGHT);
2953 IPAWANDBG_LOW("rcvd packets: %d\n", rcvd_pkts);
2954 return rcvd_pkts;
2955}
2956
2957late_initcall(ipa3_wwan_init);
2958module_exit(ipa3_wwan_cleanup);
2959MODULE_DESCRIPTION("WWAN Network Interface");
2960MODULE_LICENSE("GPL v2");