blob: e653bcd1a31200c7e11ad5631fcbc29a482d37ee [file] [log] [blame]
Amir Levy9659e592016-10-27 18:08:27 +03001/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * WWAN Transport Network Driver.
15 */
16
17#include <linux/completion.h>
18#include <linux/errno.h>
19#include <linux/if_arp.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/of_device.h>
26#include <linux/string.h>
27#include <linux/skbuff.h>
28#include <linux/version.h>
29#include <linux/workqueue.h>
30#include <net/pkt_sched.h>
31#include <soc/qcom/subsystem_restart.h>
32#include <soc/qcom/subsystem_notif.h>
33#include "ipa_qmi_service.h"
34#include <linux/rmnet_ipa_fd_ioctl.h>
35#include <linux/ipa.h>
36#include <uapi/linux/net_map.h>
Gidon Studinski3021a6f2016-11-10 12:48:48 +020037#include <uapi/linux/msm_rmnet.h>
38#include <net/rmnet_config.h>
Amir Levy9659e592016-10-27 18:08:27 +030039
40#include "ipa_trace.h"
41
42#define WWAN_METADATA_SHFT 24
43#define WWAN_METADATA_MASK 0xFF000000
44#define WWAN_DATA_LEN 2000
45#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
46#define HEADROOM_FOR_QMAP 8 /* for mux header */
47#define TAILROOM 0 /* for padding by mux layer */
48#define MAX_NUM_OF_MUX_CHANNEL 10 /* max mux channels */
49#define UL_FILTER_RULE_HANDLE_START 69
50#define DEFAULT_OUTSTANDING_HIGH_CTL 96
51#define DEFAULT_OUTSTANDING_HIGH 64
52#define DEFAULT_OUTSTANDING_LOW 32
53
54#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
55
56#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
57
58#define INVALID_MUX_ID 0xFF
59#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
60#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
61#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
62#define NAPI_WEIGHT 60
63
64#define IPA_NETDEV() \
65 ((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \
66 rmnet_ipa3_ctx->wwan_priv->net : NULL)
67
68
69static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
70static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
71static void ipa3_wwan_msg_free_cb(void*, u32, u32);
72static void ipa3_rmnet_rx_cb(void *priv);
73static int ipa3_rmnet_poll(struct napi_struct *napi, int budget);
74
75static void ipa3_wake_tx_queue(struct work_struct *work);
76static DECLARE_WORK(ipa3_tx_wakequeue_work, ipa3_wake_tx_queue);
77
78static void tethering_stats_poll_queue(struct work_struct *work);
79static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work,
80 tethering_stats_poll_queue);
81
82enum ipa3_wwan_device_status {
83 WWAN_DEVICE_INACTIVE = 0,
84 WWAN_DEVICE_ACTIVE = 1
85};
86
87struct ipa3_rmnet_plat_drv_res {
88 bool ipa_rmnet_ssr;
89 bool ipa_loaduC;
90 bool ipa_advertise_sg_support;
91 bool ipa_napi_enable;
92};
93
94/**
95 * struct ipa3_wwan_private - WWAN private data
96 * @net: network interface struct implemented by this driver
97 * @stats: iface statistics
98 * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
99 * @outstanding_high: number of outstanding packets allowed
100 * @outstanding_low: number of outstanding packets which shall cause
101 * @ch_id: channel id
102 * @lock: spinlock for mutual exclusion
103 * @device_status: holds device status
104 *
105 * WWAN private - holds all relevant info about WWAN driver
106 */
107struct ipa3_wwan_private {
108 struct net_device *net;
109 struct net_device_stats stats;
110 atomic_t outstanding_pkts;
111 int outstanding_high_ctl;
112 int outstanding_high;
113 int outstanding_low;
114 uint32_t ch_id;
115 spinlock_t lock;
116 struct completion resource_granted_completion;
117 enum ipa3_wwan_device_status device_status;
118 struct napi_struct napi;
119};
120
121struct rmnet_ipa3_context {
122 struct ipa3_wwan_private *wwan_priv;
123 struct ipa_sys_connect_params apps_to_ipa_ep_cfg;
124 struct ipa_sys_connect_params ipa_to_apps_ep_cfg;
125 u32 qmap_hdr_hdl;
126 u32 dflt_v4_wan_rt_hdl;
127 u32 dflt_v6_wan_rt_hdl;
128 struct ipa3_rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
129 int num_q6_rules;
130 int old_num_q6_rules;
131 int rmnet_index;
132 bool egress_set;
133 bool a7_ul_flt_set;
134 struct workqueue_struct *rm_q6_wq;
135 atomic_t is_initialized;
136 atomic_t is_ssr;
137 void *subsys_notify_handle;
138 u32 apps_to_ipa3_hdl;
139 u32 ipa3_to_apps_hdl;
140 struct mutex ipa_to_apps_pipe_handle_guard;
141};
142
143static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
144static struct ipa3_rmnet_plat_drv_res ipa3_rmnet_res;
145
146/**
147* ipa3_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
148*
149* Return codes:
150* 0: success
151* -ENOMEM: failed to allocate memory
152* -EPERM: failed to add the tables
153*/
154static int ipa3_setup_a7_qmap_hdr(void)
155{
156 struct ipa_ioc_add_hdr *hdr;
157 struct ipa_hdr_add *hdr_entry;
158 u32 pyld_sz;
159 int ret;
160
161 /* install the basic exception header */
162 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
163 sizeof(struct ipa_hdr_add);
164 hdr = kzalloc(pyld_sz, GFP_KERNEL);
165 if (!hdr) {
166 IPAWANERR("fail to alloc exception hdr\n");
167 return -ENOMEM;
168 }
169 hdr->num_hdrs = 1;
170 hdr->commit = 1;
171 hdr_entry = &hdr->hdr[0];
172
173 strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
174 IPA_RESOURCE_NAME_MAX);
175 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
176
177 if (ipa3_add_hdr(hdr)) {
178 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
179 ret = -EPERM;
180 goto bail;
181 }
182
183 if (hdr_entry->status) {
184 IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
185 ret = -EPERM;
186 goto bail;
187 }
188 rmnet_ipa3_ctx->qmap_hdr_hdl = hdr_entry->hdr_hdl;
189
190 ret = 0;
191bail:
192 kfree(hdr);
193 return ret;
194}
195
196static void ipa3_del_a7_qmap_hdr(void)
197{
198 struct ipa_ioc_del_hdr *del_hdr;
199 struct ipa_hdr_del *hdl_entry;
200 u32 pyld_sz;
201 int ret;
202
203 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
204 sizeof(struct ipa_hdr_del);
205 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
206 if (!del_hdr) {
207 IPAWANERR("fail to alloc exception hdr_del\n");
208 return;
209 }
210
211 del_hdr->commit = 1;
212 del_hdr->num_hdls = 1;
213 hdl_entry = &del_hdr->hdl[0];
214 hdl_entry->hdl = rmnet_ipa3_ctx->qmap_hdr_hdl;
215
216 ret = ipa3_del_hdr(del_hdr);
217 if (ret || hdl_entry->status)
218 IPAWANERR("ipa3_del_hdr failed\n");
219 else
220 IPAWANDBG("hdrs deletion done\n");
221
222 rmnet_ipa3_ctx->qmap_hdr_hdl = 0;
223 kfree(del_hdr);
224}
225
226static void ipa3_del_qmap_hdr(uint32_t hdr_hdl)
227{
228 struct ipa_ioc_del_hdr *del_hdr;
229 struct ipa_hdr_del *hdl_entry;
230 u32 pyld_sz;
231 int ret;
232
233 if (hdr_hdl == 0) {
234 IPAWANERR("Invalid hdr_hdl provided\n");
235 return;
236 }
237
238 pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
239 sizeof(struct ipa_hdr_del);
240 del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
241 if (!del_hdr) {
242 IPAWANERR("fail to alloc exception hdr_del\n");
243 return;
244 }
245
246 del_hdr->commit = 1;
247 del_hdr->num_hdls = 1;
248 hdl_entry = &del_hdr->hdl[0];
249 hdl_entry->hdl = hdr_hdl;
250
251 ret = ipa3_del_hdr(del_hdr);
252 if (ret || hdl_entry->status)
253 IPAWANERR("ipa3_del_hdr failed\n");
254 else
255 IPAWANDBG("header deletion done\n");
256
257 rmnet_ipa3_ctx->qmap_hdr_hdl = 0;
258 kfree(del_hdr);
259}
260
261static void ipa3_del_mux_qmap_hdrs(void)
262{
263 int index;
264
265 for (index = 0; index < rmnet_ipa3_ctx->rmnet_index; index++) {
266 ipa3_del_qmap_hdr(rmnet_ipa3_ctx->mux_channel[index].hdr_hdl);
267 rmnet_ipa3_ctx->mux_channel[index].hdr_hdl = 0;
268 }
269}
270
271static int ipa3_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
272{
273 struct ipa_ioc_add_hdr *hdr;
274 struct ipa_hdr_add *hdr_entry;
275 char hdr_name[IPA_RESOURCE_NAME_MAX];
276 u32 pyld_sz;
277 int ret;
278
279 pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
280 sizeof(struct ipa_hdr_add);
281 hdr = kzalloc(pyld_sz, GFP_KERNEL);
282 if (!hdr) {
283 IPAWANERR("fail to alloc exception hdr\n");
284 return -ENOMEM;
285 }
286 hdr->num_hdrs = 1;
287 hdr->commit = 1;
288 hdr_entry = &hdr->hdr[0];
289
290 snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
291 A2_MUX_HDR_NAME_V4_PREF,
292 mux_id);
293 strlcpy(hdr_entry->name, hdr_name,
294 IPA_RESOURCE_NAME_MAX);
295
296 hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
297 hdr_entry->hdr[1] = (uint8_t) mux_id;
298 IPAWANDBG("header (%s) with mux-id: (%d)\n",
299 hdr_name,
300 hdr_entry->hdr[1]);
301 if (ipa3_add_hdr(hdr)) {
302 IPAWANERR("fail to add IPA_QMAP hdr\n");
303 ret = -EPERM;
304 goto bail;
305 }
306
307 if (hdr_entry->status) {
308 IPAWANERR("fail to add IPA_QMAP hdr\n");
309 ret = -EPERM;
310 goto bail;
311 }
312
313 ret = 0;
314 *hdr_hdl = hdr_entry->hdr_hdl;
315bail:
316 kfree(hdr);
317 return ret;
318}
319
320/**
321* ipa3_setup_dflt_wan_rt_tables() - Setup default wan routing tables
322*
323* Return codes:
324* 0: success
325* -ENOMEM: failed to allocate memory
326* -EPERM: failed to add the tables
327*/
328static int ipa3_setup_dflt_wan_rt_tables(void)
329{
330 struct ipa_ioc_add_rt_rule *rt_rule;
331 struct ipa_rt_rule_add *rt_rule_entry;
332
333 rt_rule =
334 kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
335 sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
336 if (!rt_rule) {
337 IPAWANERR("fail to alloc mem\n");
338 return -ENOMEM;
339 }
340 /* setup a default v4 route to point to Apps */
341 rt_rule->num_rules = 1;
342 rt_rule->commit = 1;
343 rt_rule->ip = IPA_IP_v4;
344 strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
345 IPA_RESOURCE_NAME_MAX);
346
347 rt_rule_entry = &rt_rule->rules[0];
348 rt_rule_entry->at_rear = 1;
349 rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
350 rt_rule_entry->rule.hdr_hdl = rmnet_ipa3_ctx->qmap_hdr_hdl;
351
352 if (ipa3_add_rt_rule(rt_rule)) {
353 IPAWANERR("fail to add dflt_wan v4 rule\n");
354 kfree(rt_rule);
355 return -EPERM;
356 }
357
358 IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
359 rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
360
361 /* setup a default v6 route to point to A5 */
362 rt_rule->ip = IPA_IP_v6;
363 if (ipa3_add_rt_rule(rt_rule)) {
364 IPAWANERR("fail to add dflt_wan v6 rule\n");
365 kfree(rt_rule);
366 return -EPERM;
367 }
368 IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
369 rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
370
371 kfree(rt_rule);
372 return 0;
373}
374
375static void ipa3_del_dflt_wan_rt_tables(void)
376{
377 struct ipa_ioc_del_rt_rule *rt_rule;
378 struct ipa_rt_rule_del *rt_rule_entry;
379 int len;
380
381 len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
382 sizeof(struct ipa_rt_rule_del);
383 rt_rule = kzalloc(len, GFP_KERNEL);
384 if (!rt_rule) {
385 IPAWANERR("unable to allocate memory for del route rule\n");
386 return;
387 }
388
389 memset(rt_rule, 0, len);
390 rt_rule->commit = 1;
391 rt_rule->num_hdls = 1;
392 rt_rule->ip = IPA_IP_v4;
393
394 rt_rule_entry = &rt_rule->hdl[0];
395 rt_rule_entry->status = -1;
396 rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl;
397
398 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
399 rt_rule_entry->hdl, IPA_IP_v4);
400 if (ipa3_del_rt_rule(rt_rule) ||
401 (rt_rule_entry->status)) {
402 IPAWANERR("Routing rule deletion failed!\n");
403 }
404
405 rt_rule->ip = IPA_IP_v6;
406 rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl;
407 IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
408 rt_rule_entry->hdl, IPA_IP_v6);
409 if (ipa3_del_rt_rule(rt_rule) ||
410 (rt_rule_entry->status)) {
411 IPAWANERR("Routing rule deletion failed!\n");
412 }
413
414 kfree(rt_rule);
415}
416
417int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
418 *rule_req)
419{
420 int i, j;
421
422 if (rule_req->filter_spec_ex_list_valid == true) {
423 rmnet_ipa3_ctx->num_q6_rules =
424 rule_req->filter_spec_ex_list_len;
425 IPAWANDBG("Received (%d) install_flt_req\n",
426 rmnet_ipa3_ctx->num_q6_rules);
427 } else {
428 rmnet_ipa3_ctx->num_q6_rules = 0;
429 IPAWANERR("got no UL rules from modem\n");
430 return -EINVAL;
431 }
432
433 /* copy UL filter rules from Modem*/
434 for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
435 /* check if rules overside the cache*/
436 if (i == MAX_NUM_Q6_RULE) {
437 IPAWANERR("Reaching (%d) max cache ",
438 MAX_NUM_Q6_RULE);
439 IPAWANERR(" however total (%d)\n",
440 rmnet_ipa3_ctx->num_q6_rules);
441 goto failure;
442 }
443 ipa3_qmi_ctx->q6_ul_filter_rule[i].ip =
444 rule_req->filter_spec_ex_list[i].ip_type;
445 ipa3_qmi_ctx->q6_ul_filter_rule[i].action =
446 rule_req->filter_spec_ex_list[i].filter_action;
447 if (rule_req->filter_spec_ex_list[i].
448 is_routing_table_index_valid == true)
449 ipa3_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
450 rule_req->filter_spec_ex_list[i].route_table_index;
451 if (rule_req->filter_spec_ex_list[i].is_mux_id_valid == true)
452 ipa3_qmi_ctx->q6_ul_filter_rule[i].mux_id =
453 rule_req->filter_spec_ex_list[i].mux_id;
454 ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id =
455 rule_req->filter_spec_ex_list[i].rule_id;
456 ipa3_qmi_ctx->q6_ul_filter_rule[i].is_rule_hashable =
457 rule_req->filter_spec_ex_list[i].is_rule_hashable;
458 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap =
459 rule_req->filter_spec_ex_list[i].filter_rule.
460 rule_eq_bitmap;
461 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present =
462 rule_req->filter_spec_ex_list[i].filter_rule.
463 tos_eq_present;
464 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq =
465 rule_req->filter_spec_ex_list[i].filter_rule.tos_eq;
466 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
467 protocol_eq_present = rule_req->filter_spec_ex_list[i].
468 filter_rule.protocol_eq_present;
469 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq =
470 rule_req->filter_spec_ex_list[i].filter_rule.
471 protocol_eq;
472
473 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
474 num_ihl_offset_range_16 =
475 rule_req->filter_spec_ex_list[i].
476 filter_rule.num_ihl_offset_range_16;
477 for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
478 num_ihl_offset_range_16; j++) {
479 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
480 ihl_offset_range_16[j].offset = rule_req->
481 filter_spec_ex_list[i].filter_rule.
482 ihl_offset_range_16[j].offset;
483 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
484 ihl_offset_range_16[j].range_low = rule_req->
485 filter_spec_ex_list[i].filter_rule.
486 ihl_offset_range_16[j].range_low;
487 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
488 ihl_offset_range_16[j].range_high = rule_req->
489 filter_spec_ex_list[i].filter_rule.
490 ihl_offset_range_16[j].range_high;
491 }
492 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 =
493 rule_req->filter_spec_ex_list[i].filter_rule.
494 num_offset_meq_32;
495 for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
496 num_offset_meq_32; j++) {
497 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
498 offset_meq_32[j].offset =
499 rule_req->filter_spec_ex_list[i].
500 filter_rule.offset_meq_32[j].offset;
501 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
502 offset_meq_32[j].mask =
503 rule_req->filter_spec_ex_list[i].
504 filter_rule.offset_meq_32[j].mask;
505 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
506 offset_meq_32[j].value =
507 rule_req->filter_spec_ex_list[i].
508 filter_rule.offset_meq_32[j].value;
509 }
510
511 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present =
512 rule_req->filter_spec_ex_list[i].
513 filter_rule.tc_eq_present;
514 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq =
515 rule_req->filter_spec_ex_list[i].filter_rule.tc_eq;
516 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present =
517 rule_req->filter_spec_ex_list[i].filter_rule.
518 flow_eq_present;
519 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq =
520 rule_req->filter_spec_ex_list[i].filter_rule.flow_eq;
521 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
522 ihl_offset_eq_16_present = rule_req->filter_spec_ex_list[i].
523 filter_rule.ihl_offset_eq_16_present;
524 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
525 ihl_offset_eq_16.offset = rule_req->filter_spec_ex_list[i].
526 filter_rule.ihl_offset_eq_16.offset;
527 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
528 ihl_offset_eq_16.value = rule_req->filter_spec_ex_list[i].
529 filter_rule.ihl_offset_eq_16.value;
530
531 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
532 ihl_offset_eq_32_present = rule_req->filter_spec_ex_list[i].
533 filter_rule.ihl_offset_eq_32_present;
534 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
535 ihl_offset_eq_32.offset = rule_req->filter_spec_ex_list[i].
536 filter_rule.ihl_offset_eq_32.offset;
537 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
538 ihl_offset_eq_32.value = rule_req->filter_spec_ex_list[i].
539 filter_rule.ihl_offset_eq_32.value;
540
541 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
542 num_ihl_offset_meq_32 = rule_req->filter_spec_ex_list[i].
543 filter_rule.num_ihl_offset_meq_32;
544 for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].
545 eq_attrib.num_ihl_offset_meq_32; j++) {
546 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
547 ihl_offset_meq_32[j].offset = rule_req->
548 filter_spec_ex_list[i].filter_rule.
549 ihl_offset_meq_32[j].offset;
550 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
551 ihl_offset_meq_32[j].mask = rule_req->
552 filter_spec_ex_list[i].filter_rule.
553 ihl_offset_meq_32[j].mask;
554 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
555 ihl_offset_meq_32[j].value = rule_req->
556 filter_spec_ex_list[i].filter_rule.
557 ihl_offset_meq_32[j].value;
558 }
559 ipa3_qmi_ctx->
560 q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 =
561 rule_req->filter_spec_ex_list[i].filter_rule.
562 num_offset_meq_128;
563 for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
564 num_offset_meq_128; j++) {
565 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
566 offset_meq_128[j].offset = rule_req->
567 filter_spec_ex_list[i].filter_rule.
568 offset_meq_128[j].offset;
569 memcpy(ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
570 offset_meq_128[j].mask,
571 rule_req->filter_spec_ex_list[i].
572 filter_rule.offset_meq_128[j].mask, 16);
573 memcpy(ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
574 offset_meq_128[j].value, rule_req->
575 filter_spec_ex_list[i].filter_rule.
576 offset_meq_128[j].value, 16);
577 }
578
579 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
580 metadata_meq32_present =
581 rule_req->filter_spec_ex_list[i].
582 filter_rule.metadata_meq32_present;
583 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
584 metadata_meq32.offset =
585 rule_req->filter_spec_ex_list[i].
586 filter_rule.metadata_meq32.offset;
587 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
588 metadata_meq32.mask = rule_req->filter_spec_ex_list[i].
589 filter_rule.metadata_meq32.mask;
590 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.
591 value = rule_req->filter_spec_ex_list[i].filter_rule.
592 metadata_meq32.value;
593 ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
594 ipv4_frag_eq_present = rule_req->filter_spec_ex_list[i].
595 filter_rule.ipv4_frag_eq_present;
596 }
597
598 if (rule_req->xlat_filter_indices_list_valid) {
599 if (rule_req->xlat_filter_indices_list_len >
600 rmnet_ipa3_ctx->num_q6_rules) {
601 IPAWANERR("Number of xlat indices is not valid: %d\n",
602 rule_req->xlat_filter_indices_list_len);
603 goto failure;
604 }
605 IPAWANDBG("Receive %d XLAT indices: ",
606 rule_req->xlat_filter_indices_list_len);
607 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++)
608 IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]);
609 IPAWANDBG("\n");
610
611 for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
612 if (rule_req->xlat_filter_indices_list[i]
613 >= rmnet_ipa3_ctx->num_q6_rules) {
614 IPAWANERR("Xlat rule idx is wrong: %d\n",
615 rule_req->xlat_filter_indices_list[i]);
616 goto failure;
617 } else {
618 ipa3_qmi_ctx->q6_ul_filter_rule
619 [rule_req->xlat_filter_indices_list[i]]
620 .is_xlat_rule = 1;
621 IPAWANDBG("Rule %d is xlat rule\n",
622 rule_req->xlat_filter_indices_list[i]);
623 }
624 }
625 }
626 goto success;
627
628failure:
629 rmnet_ipa3_ctx->num_q6_rules = 0;
630 memset(ipa3_qmi_ctx->q6_ul_filter_rule, 0,
631 sizeof(ipa3_qmi_ctx->q6_ul_filter_rule));
632 return -EINVAL;
633
634success:
635 return 0;
636}
637
638static int ipa3_wwan_add_ul_flt_rule_to_ipa(void)
639{
640 u32 pyld_sz;
641 int i, retval = 0;
642 struct ipa_ioc_add_flt_rule *param;
643 struct ipa_flt_rule_add flt_rule_entry;
644 struct ipa_fltr_installed_notif_req_msg_v01 *req;
645
646 pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) +
647 sizeof(struct ipa_flt_rule_add);
648 param = kzalloc(pyld_sz, GFP_KERNEL);
649 if (!param)
650 return -ENOMEM;
651
652 req = (struct ipa_fltr_installed_notif_req_msg_v01 *)
653 kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
654 GFP_KERNEL);
655 if (!req) {
656 kfree(param);
657 return -ENOMEM;
658 }
659
660 param->commit = 1;
661 param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD;
662 param->global = false;
663 param->num_rules = (uint8_t)1;
664
665 for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
666 param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip;
667 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
668 flt_rule_entry.at_rear = true;
669 flt_rule_entry.rule.action =
670 ipa3_qmi_ctx->q6_ul_filter_rule[i].action;
671 flt_rule_entry.rule.rt_tbl_idx
672 = ipa3_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx;
673 flt_rule_entry.rule.retain_hdr = true;
674 flt_rule_entry.rule.hashable =
675 ipa3_qmi_ctx->q6_ul_filter_rule[i].is_rule_hashable;
676 flt_rule_entry.rule.rule_id =
677 ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id;
678
679 /* debug rt-hdl*/
680 IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n",
681 i, flt_rule_entry.rule.rt_tbl_idx);
682 flt_rule_entry.rule.eq_attrib_type = true;
683 memcpy(&(flt_rule_entry.rule.eq_attrib),
684 &ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib,
685 sizeof(struct ipa_ipfltri_rule_eq));
686 memcpy(&(param->rules[0]), &flt_rule_entry,
687 sizeof(struct ipa_flt_rule_add));
688 if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
689 retval = -EFAULT;
690 IPAWANERR("add A7 UL filter rule(%d) failed\n", i);
691 } else {
692 /* store the rule handler */
693 ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i] =
694 param->rules[0].flt_rule_hdl;
695 }
696 }
697
698 /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
699 req->source_pipe_index =
700 ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
701 req->install_status = QMI_RESULT_SUCCESS_V01;
702 req->rule_id_valid = 1;
703 req->rule_id_len = rmnet_ipa3_ctx->num_q6_rules;
704 for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
705 req->rule_id[i] =
706 ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id;
707 }
708 if (ipa3_qmi_filter_notify_send(req)) {
709 IPAWANDBG("add filter rule index on A7-RX failed\n");
710 retval = -EFAULT;
711 }
712 rmnet_ipa3_ctx->old_num_q6_rules = rmnet_ipa3_ctx->num_q6_rules;
713 IPAWANDBG("add (%d) filter rule index on A7-RX\n",
714 rmnet_ipa3_ctx->old_num_q6_rules);
715 kfree(param);
716 kfree(req);
717 return retval;
718}
719
720static int ipa3_wwan_del_ul_flt_rule_to_ipa(void)
721{
722 u32 pyld_sz;
723 int i, retval = 0;
724 struct ipa_ioc_del_flt_rule *param;
725 struct ipa_flt_rule_del flt_rule_entry;
726
727 pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) +
728 sizeof(struct ipa_flt_rule_del);
729 param = kzalloc(pyld_sz, GFP_KERNEL);
730 if (!param) {
731 IPAWANERR("kzalloc failed\n");
732 return -ENOMEM;
733 }
734
735 param->commit = 1;
736 param->num_hdls = (uint8_t) 1;
737
738 for (i = 0; i < rmnet_ipa3_ctx->old_num_q6_rules; i++) {
739 param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip;
740 memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
741 flt_rule_entry.hdl = ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i];
742 /* debug rt-hdl*/
743 IPAWANDBG("delete-IPA rule index(%d)\n", i);
744 memcpy(&(param->hdl[0]), &flt_rule_entry,
745 sizeof(struct ipa_flt_rule_del));
746 if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
747 IPAWANERR("del A7 UL filter rule(%d) failed\n", i);
748 kfree(param);
749 return -EFAULT;
750 }
751 }
752
753 /* set UL filter-rule add-indication */
754 rmnet_ipa3_ctx->a7_ul_flt_set = false;
755 rmnet_ipa3_ctx->old_num_q6_rules = 0;
756
757 kfree(param);
758 return retval;
759}
760
761static int ipa3_find_mux_channel_index(uint32_t mux_id)
762{
763 int i;
764
765 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
766 if (mux_id == rmnet_ipa3_ctx->mux_channel[i].mux_id)
767 return i;
768 }
769 return MAX_NUM_OF_MUX_CHANNEL;
770}
771
772static int find_vchannel_name_index(const char *vchannel_name)
773{
774 int i;
775
776 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
777 if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
778 vchannel_name) == 0)
779 return i;
780 }
781 return MAX_NUM_OF_MUX_CHANNEL;
782}
783
784static int ipa3_wwan_register_to_ipa(int index)
785{
786 struct ipa_tx_intf tx_properties = {0};
787 struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
788 struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
789 struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
790 struct ipa_rx_intf rx_properties = {0};
791 struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
792 struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
793 struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
794 struct ipa_ext_intf ext_properties = {0};
795 struct ipa_ioc_ext_intf_prop *ext_ioc_properties;
796 u32 pyld_sz;
797 int ret = 0, i;
798
799 IPAWANDBG("index(%d) device[%s]:\n", index,
800 rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
801 if (!rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set) {
802 ret = ipa3_add_qmap_hdr(
803 rmnet_ipa3_ctx->mux_channel[index].mux_id,
804 &rmnet_ipa3_ctx->mux_channel[index].hdr_hdl);
805 if (ret) {
806 IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
807 return ret;
808 }
809 rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set = true;
810 }
811 tx_properties.prop = tx_ioc_properties;
812 tx_ipv4_property = &tx_properties.prop[0];
813 tx_ipv4_property->ip = IPA_IP_v4;
814 tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
815 snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
816 A2_MUX_HDR_NAME_V4_PREF,
817 rmnet_ipa3_ctx->mux_channel[index].mux_id);
818 tx_ipv6_property = &tx_properties.prop[1];
819 tx_ipv6_property->ip = IPA_IP_v6;
820 tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
821 /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
822 snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
823 A2_MUX_HDR_NAME_V4_PREF,
824 rmnet_ipa3_ctx->mux_channel[index].mux_id);
825 tx_properties.num_props = 2;
826
827 rx_properties.prop = rx_ioc_properties;
828 rx_ipv4_property = &rx_properties.prop[0];
829 rx_ipv4_property->ip = IPA_IP_v4;
830 rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
831 rx_ipv4_property->attrib.meta_data =
832 rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT;
833 rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
834 rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
835 rx_ipv6_property = &rx_properties.prop[1];
836 rx_ipv6_property->ip = IPA_IP_v6;
837 rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
838 rx_ipv6_property->attrib.meta_data =
839 rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT;
840 rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
841 rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
842 rx_properties.num_props = 2;
843
844 pyld_sz = rmnet_ipa3_ctx->num_q6_rules *
845 sizeof(struct ipa_ioc_ext_intf_prop);
846 ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
847 if (!ext_ioc_properties) {
848 IPAWANERR("Error allocate memory\n");
849 return -ENOMEM;
850 }
851
852 ext_properties.prop = ext_ioc_properties;
853 ext_properties.excp_pipe_valid = true;
854 ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
855 ext_properties.num_props = rmnet_ipa3_ctx->num_q6_rules;
856 for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
857 memcpy(&(ext_properties.prop[i]),
858 &(ipa3_qmi_ctx->q6_ul_filter_rule[i]),
859 sizeof(struct ipa_ioc_ext_intf_prop));
860 ext_properties.prop[i].mux_id =
861 rmnet_ipa3_ctx->mux_channel[index].mux_id;
862 IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
863 ext_properties.prop[i].ip,
864 ext_properties.prop[i].rt_tbl_idx);
865 IPAWANDBG("action: %d mux:%d\n",
866 ext_properties.prop[i].action,
867 ext_properties.prop[i].mux_id);
868 }
869 ret = ipa3_register_intf_ext(rmnet_ipa3_ctx->mux_channel[index].
870 vchannel_name, &tx_properties,
871 &rx_properties, &ext_properties);
872 if (ret) {
873 IPAWANERR("[%s]:ipa3_register_intf failed %d\n",
874 rmnet_ipa3_ctx->mux_channel[index].vchannel_name, ret);
875 goto fail;
876 }
877 rmnet_ipa3_ctx->mux_channel[index].ul_flt_reg = true;
878fail:
879 kfree(ext_ioc_properties);
880 return ret;
881}
882
883static void ipa3_cleanup_deregister_intf(void)
884{
885 int i;
886 int ret;
887
888 for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) {
889 if (rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg) {
890 ret = ipa3_deregister_intf(
891 rmnet_ipa3_ctx->mux_channel[i].vchannel_name);
892 if (ret < 0) {
893 IPAWANERR("de-register device %s(%d) failed\n",
894 rmnet_ipa3_ctx->mux_channel[i].
895 vchannel_name,
896 i);
897 return;
898 }
899 IPAWANDBG("de-register device %s(%d) success\n",
900 rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
901 i);
902 }
903 rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = false;
904 }
905}
906
907int ipa3_wwan_update_mux_channel_prop(void)
908{
909 int ret = 0, i;
910 /* install UL filter rules */
911 if (rmnet_ipa3_ctx->egress_set) {
912 if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
913 IPAWANDBG("setup UL filter rules\n");
914 if (rmnet_ipa3_ctx->a7_ul_flt_set) {
915 IPAWANDBG("del previous UL filter rules\n");
916 /* delete rule hdlers */
917 ret = ipa3_wwan_del_ul_flt_rule_to_ipa();
918 if (ret) {
919 IPAWANERR("failed to del old rules\n");
920 return -EINVAL;
921 }
922 IPAWANDBG("deleted old UL rules\n");
923 }
924 ret = ipa3_wwan_add_ul_flt_rule_to_ipa();
925 }
926 if (ret)
927 IPAWANERR("failed to install UL rules\n");
928 else
929 rmnet_ipa3_ctx->a7_ul_flt_set = true;
930 }
931 /* update Tx/Rx/Ext property */
932 IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
933 if (rmnet_ipa3_ctx->rmnet_index == 0) {
934 IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
935 return ret;
936 }
937
938 ipa3_cleanup_deregister_intf();
939
940 for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) {
941 ret = ipa3_wwan_register_to_ipa(i);
942 if (ret < 0) {
943 IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
944 rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
945 rmnet_ipa3_ctx->mux_channel[i].mux_id,
946 i);
947 return -ENODEV;
948 }
949 IPAWANERR("dev(%s) has registered to IPA\n",
950 rmnet_ipa3_ctx->mux_channel[i].vchannel_name);
951 rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = true;
952 }
953 return ret;
954}
955
956#ifdef INIT_COMPLETION
957#define reinit_completion(x) INIT_COMPLETION(*(x))
958#endif /* INIT_COMPLETION */
959
960static int __ipa_wwan_open(struct net_device *dev)
961{
962 struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
963
964 IPAWANDBG("[%s] __wwan_open()\n", dev->name);
965 if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
966 reinit_completion(&wwan_ptr->resource_granted_completion);
967 wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
968
969 if (ipa3_rmnet_res.ipa_napi_enable)
970 napi_enable(&(wwan_ptr->napi));
971 return 0;
972}
973
974/**
975 * wwan_open() - Opens the wwan network interface. Opens logical
976 * channel on A2 MUX driver and starts the network stack queue
977 *
978 * @dev: network device
979 *
980 * Return codes:
981 * 0: success
982 * -ENODEV: Error while opening logical channel on A2 MUX driver
983 */
984static int ipa3_wwan_open(struct net_device *dev)
985{
986 int rc = 0;
987
988 IPAWANDBG("[%s] wwan_open()\n", dev->name);
989 rc = __ipa_wwan_open(dev);
990 if (rc == 0)
991 netif_start_queue(dev);
992 return rc;
993}
994
995static int __ipa_wwan_close(struct net_device *dev)
996{
997 struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
998 int rc = 0;
999
1000 if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
1001 wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
1002 /* do not close wwan port once up, this causes
1003 * remote side to hang if tried to open again
1004 */
1005 reinit_completion(&wwan_ptr->resource_granted_completion);
1006 rc = ipa3_deregister_intf(dev->name);
1007 if (rc) {
1008 IPAWANERR("[%s]: ipa3_deregister_intf failed %d\n",
1009 dev->name, rc);
1010 return rc;
1011 }
1012 return rc;
1013 } else {
1014 return -EBADF;
1015 }
1016}
1017
1018/**
1019 * ipa3_wwan_stop() - Stops the wwan network interface. Closes
1020 * logical channel on A2 MUX driver and stops the network stack
1021 * queue
1022 *
1023 * @dev: network device
1024 *
1025 * Return codes:
1026 * 0: success
1027 * -ENODEV: Error while opening logical channel on A2 MUX driver
1028 */
1029static int ipa3_wwan_stop(struct net_device *dev)
1030{
1031 IPAWANDBG("[%s] ipa3_wwan_stop()\n", dev->name);
1032 __ipa_wwan_close(dev);
1033 netif_stop_queue(dev);
1034 return 0;
1035}
1036
1037static int ipa3_wwan_change_mtu(struct net_device *dev, int new_mtu)
1038{
1039 if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
1040 return -EINVAL;
1041 IPAWANDBG("[%s] MTU change: old=%d new=%d\n",
1042 dev->name, dev->mtu, new_mtu);
1043 dev->mtu = new_mtu;
1044 return 0;
1045}
1046
1047/**
1048 * ipa3_wwan_xmit() - Transmits an skb.
1049 *
1050 * @skb: skb to be transmitted
1051 * @dev: network device
1052 *
1053 * Return codes:
1054 * 0: success
1055 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1056 * later
1057 * -EFAULT: Error while transmitting the skb
1058 */
1059static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
1060{
1061 int ret = 0;
1062 bool qmap_check;
1063 struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
1064 struct ipa_tx_meta meta;
1065
1066 if (skb->protocol != htons(ETH_P_MAP)) {
1067 IPAWANDBG_LOW
1068 ("SW filtering out none QMAP packet received from %s",
1069 current->comm);
1070 return NETDEV_TX_OK;
1071 }
1072
1073 qmap_check = RMNET_MAP_GET_CD_BIT(skb);
1074 if (netif_queue_stopped(dev)) {
1075 if (qmap_check &&
1076 atomic_read(&wwan_ptr->outstanding_pkts) <
1077 wwan_ptr->outstanding_high_ctl) {
1078 pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
1079 goto send;
1080 } else {
1081 pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
1082 return NETDEV_TX_BUSY;
1083 }
1084 }
1085
1086 /* checking High WM hit */
1087 if (atomic_read(&wwan_ptr->outstanding_pkts) >=
1088 wwan_ptr->outstanding_high) {
1089 if (!qmap_check) {
1090 IPAWANDBG_LOW("pending(%d)/(%d)- stop(%d)\n",
1091 atomic_read(&wwan_ptr->outstanding_pkts),
1092 wwan_ptr->outstanding_high,
1093 netif_queue_stopped(dev));
1094 IPAWANDBG_LOW("qmap_chk(%d)\n", qmap_check);
1095 netif_stop_queue(dev);
1096 return NETDEV_TX_BUSY;
1097 }
1098 }
1099
1100send:
1101 /* IPA_RM checking start */
1102 ret = ipa_rm_inactivity_timer_request_resource(
1103 IPA_RM_RESOURCE_WWAN_0_PROD);
1104 if (ret == -EINPROGRESS) {
1105 netif_stop_queue(dev);
1106 return NETDEV_TX_BUSY;
1107 }
1108 if (ret) {
1109 pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
1110 dev->name, ret);
1111 return -EFAULT;
1112 }
1113 /* IPA_RM checking end */
1114
1115 if (RMNET_MAP_GET_CD_BIT(skb)) {
1116 memset(&meta, 0, sizeof(meta));
1117 meta.pkt_init_dst_ep_valid = true;
1118 meta.pkt_init_dst_ep_remote = true;
1119 ret = ipa3_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta);
1120 } else {
1121 ret = ipa3_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL);
1122 }
1123
1124 if (ret) {
1125 ret = NETDEV_TX_BUSY;
1126 dev->stats.tx_dropped++;
1127 goto out;
1128 }
1129
1130 atomic_inc(&wwan_ptr->outstanding_pkts);
1131 dev->stats.tx_packets++;
1132 dev->stats.tx_bytes += skb->len;
1133 ret = NETDEV_TX_OK;
1134out:
1135 ipa_rm_inactivity_timer_release_resource(
1136 IPA_RM_RESOURCE_WWAN_0_PROD);
1137 return ret;
1138}
1139
1140static void ipa3_wwan_tx_timeout(struct net_device *dev)
1141{
1142 IPAWANERR("[%s] ipa3_wwan_tx_timeout(), data stall in UL\n", dev->name);
1143}
1144
1145/**
1146 * apps_ipa_tx_complete_notify() - Rx notify
1147 *
1148 * @priv: driver context
1149 * @evt: event type
1150 * @data: data provided with event
1151 *
1152 * Check that the packet is the one we sent and release it
1153 * This function will be called in defered context in IPA wq.
1154 */
1155static void apps_ipa_tx_complete_notify(void *priv,
1156 enum ipa_dp_evt_type evt,
1157 unsigned long data)
1158{
1159 struct sk_buff *skb = (struct sk_buff *)data;
1160 struct net_device *dev = (struct net_device *)priv;
1161 struct ipa3_wwan_private *wwan_ptr;
1162
1163 if (dev != IPA_NETDEV()) {
1164 IPAWANDBG("Received pre-SSR packet completion\n");
1165 dev_kfree_skb_any(skb);
1166 return;
1167 }
1168
1169 if (evt != IPA_WRITE_DONE) {
1170 IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
1171 dev_kfree_skb_any(skb);
1172 dev->stats.tx_dropped++;
1173 return;
1174 }
1175
1176 wwan_ptr = netdev_priv(dev);
1177 atomic_dec(&wwan_ptr->outstanding_pkts);
1178 __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
1179 if (!atomic_read(&rmnet_ipa3_ctx->is_ssr) &&
1180 netif_queue_stopped(wwan_ptr->net) &&
1181 atomic_read(&wwan_ptr->outstanding_pkts) <
1182 (wwan_ptr->outstanding_low)) {
1183 IPAWANDBG_LOW("Outstanding low (%d) - waking up queue\n",
1184 wwan_ptr->outstanding_low);
1185 netif_wake_queue(wwan_ptr->net);
1186 }
1187 __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
1188 dev_kfree_skb_any(skb);
1189 ipa_rm_inactivity_timer_release_resource(
1190 IPA_RM_RESOURCE_WWAN_0_PROD);
1191}
1192
1193/**
1194 * apps_ipa_packet_receive_notify() - Rx notify
1195 *
1196 * @priv: driver context
1197 * @evt: event type
1198 * @data: data provided with event
1199 *
1200 * IPA will pass a packet to the Linux network stack with skb->data
1201 */
1202static void apps_ipa_packet_receive_notify(void *priv,
1203 enum ipa_dp_evt_type evt,
1204 unsigned long data)
1205{
1206 struct net_device *dev = (struct net_device *)priv;
1207
1208 if (evt == IPA_RECEIVE) {
1209 struct sk_buff *skb = (struct sk_buff *)data;
1210 int result;
1211 unsigned int packet_len = skb->len;
1212
1213 IPAWANDBG_LOW("Rx packet was received");
1214 skb->dev = IPA_NETDEV();
1215 skb->protocol = htons(ETH_P_MAP);
1216
1217 if (ipa3_rmnet_res.ipa_napi_enable) {
1218 trace_rmnet_ipa_netif_rcv_skb3(dev->stats.rx_packets);
1219 result = netif_receive_skb(skb);
1220 } else {
1221 if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
1222 == 0) {
1223 trace_rmnet_ipa_netifni3(dev->stats.rx_packets);
1224 result = netif_rx_ni(skb);
1225 } else {
1226 trace_rmnet_ipa_netifrx3(dev->stats.rx_packets);
1227 result = netif_rx(skb);
1228 }
1229 }
1230
1231 if (result) {
1232 pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
1233 __func__, __LINE__);
1234 dev->stats.rx_dropped++;
1235 }
1236 dev->stats.rx_packets++;
1237 dev->stats.rx_bytes += packet_len;
1238 } else if (evt == IPA_CLIENT_START_POLL)
1239 ipa3_rmnet_rx_cb(priv);
1240 else if (evt == IPA_CLIENT_COMP_NAPI) {
1241 if (ipa3_rmnet_res.ipa_napi_enable)
1242 napi_complete(&(rmnet_ipa3_ctx->wwan_priv->napi));
1243 } else
1244 IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
1245}
1246
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001247static int handle3_ingress_format(struct net_device *dev,
1248 struct rmnet_ioctl_extended_s *in)
1249{
1250 int ret = 0;
1251 struct ipa_sys_connect_params *ipa_wan_ep_cfg;
1252 struct rmnet_phys_ep_conf_s *ep_cfg;
1253
1254 IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
1255 ipa_wan_ep_cfg = &rmnet_ipa3_ctx->ipa_to_apps_ep_cfg;
1256 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
1257 ipa_wan_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
1258 IPA_ENABLE_CS_OFFLOAD_DL;
1259
1260 if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
1261 IPAWANERR("get AGG size %d count %d\n",
1262 in->u.ingress_format.agg_size,
1263 in->u.ingress_format.agg_count);
1264
1265 ret = ipa_disable_apps_wan_cons_deaggr(
1266 in->u.ingress_format.agg_size,
1267 in->u.ingress_format.agg_count);
1268
1269 if (!ret) {
1270 ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit =
1271 in->u.ingress_format.agg_size;
1272 ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
1273 in->u.ingress_format.agg_count;
1274
1275 if (ipa_wan_ep_cfg->napi_enabled) {
1276 ipa_wan_ep_cfg->recycle_enabled = true;
1277 ep_cfg = (struct rmnet_phys_ep_conf_s *)
1278 rcu_dereference(dev->rx_handler_data);
1279 ep_cfg->recycle = ipa_recycle_wan_skb;
1280 pr_info("Wan Recycle Enabled\n");
1281 }
1282 }
1283 }
1284
1285 ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 4;
1286 ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
1287 ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
1288 ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
1289 ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
1290
1291 ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
1292 ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
1293 ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = true;
1294 ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
1295 ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
1296 ipa_wan_ep_cfg->ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
1297
1298 ipa_wan_ep_cfg->client = IPA_CLIENT_APPS_WAN_CONS;
1299 ipa_wan_ep_cfg->notify = apps_ipa_packet_receive_notify;
1300 ipa_wan_ep_cfg->priv = dev;
1301
1302 ipa_wan_ep_cfg->napi_enabled = ipa3_rmnet_res.ipa_napi_enable;
1303 if (ipa_wan_ep_cfg->napi_enabled)
1304 ipa_wan_ep_cfg->desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
1305 else
1306 ipa_wan_ep_cfg->desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
1307
1308 mutex_lock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
1309
1310 if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
1311 IPAWANDBG("In SSR sequence/recovery\n");
1312 mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
1313 return -EFAULT;
1314 }
1315 ret = ipa3_setup_sys_pipe(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg,
1316 &rmnet_ipa3_ctx->ipa3_to_apps_hdl);
1317
1318 mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
1319
1320 if (ret)
1321 IPAWANERR("failed to configure ingress\n");
1322
1323 return ret;
1324}
1325
Amir Levy9659e592016-10-27 18:08:27 +03001326/**
1327 * ipa3_wwan_ioctl() - I/O control for wwan network driver.
1328 *
1329 * @dev: network device
1330 * @ifr: ignored
1331 * @cmd: cmd to be excecuded. can be one of the following:
1332 * IPA_WWAN_IOCTL_OPEN - Open the network interface
1333 * IPA_WWAN_IOCTL_CLOSE - Close the network interface
1334 *
1335 * Return codes:
1336 * 0: success
1337 * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
1338 * later
1339 * -EFAULT: Error while transmitting the skb
1340 */
1341static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1342{
1343 int rc = 0;
1344 int mru = 1000, epid = 1, mux_index, len;
1345 struct ipa_msg_meta msg_meta;
1346 struct ipa_wan_msg *wan_msg = NULL;
1347 struct rmnet_ioctl_extended_s extend_ioctl_data;
1348 struct rmnet_ioctl_data_s ioctl_data;
1349 struct ipa3_rmnet_mux_val *mux_channel;
1350 int rmnet_index;
1351
1352 IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
1353 switch (cmd) {
1354 /* Set Ethernet protocol */
1355 case RMNET_IOCTL_SET_LLP_ETHERNET:
1356 break;
1357 /* Set RAWIP protocol */
1358 case RMNET_IOCTL_SET_LLP_IP:
1359 break;
1360 /* Get link protocol */
1361 case RMNET_IOCTL_GET_LLP:
1362 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1363 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1364 sizeof(struct rmnet_ioctl_data_s)))
1365 rc = -EFAULT;
1366 break;
1367 /* Set QoS header enabled */
1368 case RMNET_IOCTL_SET_QOS_ENABLE:
1369 return -EINVAL;
1370 /* Set QoS header disabled */
1371 case RMNET_IOCTL_SET_QOS_DISABLE:
1372 break;
1373 /* Get QoS header state */
1374 case RMNET_IOCTL_GET_QOS:
1375 ioctl_data.u.operation_mode = RMNET_MODE_NONE;
1376 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1377 sizeof(struct rmnet_ioctl_data_s)))
1378 rc = -EFAULT;
1379 break;
1380 /* Get operation mode */
1381 case RMNET_IOCTL_GET_OPMODE:
1382 ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
1383 if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
1384 sizeof(struct rmnet_ioctl_data_s)))
1385 rc = -EFAULT;
1386 break;
1387 /* Open transport port */
1388 case RMNET_IOCTL_OPEN:
1389 break;
1390 /* Close transport port */
1391 case RMNET_IOCTL_CLOSE:
1392 break;
1393 /* Flow enable */
1394 case RMNET_IOCTL_FLOW_ENABLE:
1395 IPAWANDBG("Received flow enable\n");
1396 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1397 sizeof(struct rmnet_ioctl_data_s))) {
1398 rc = -EFAULT;
1399 break;
1400 }
1401 ipa3_flow_control(IPA_CLIENT_USB_PROD, true,
1402 ioctl_data.u.tcm_handle);
1403 break;
1404 /* Flow disable */
1405 case RMNET_IOCTL_FLOW_DISABLE:
1406 IPAWANDBG("Received flow disable\n");
1407 if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
1408 sizeof(struct rmnet_ioctl_data_s))) {
1409 rc = -EFAULT;
1410 break;
1411 }
1412 ipa3_flow_control(IPA_CLIENT_USB_PROD, false,
1413 ioctl_data.u.tcm_handle);
1414 break;
1415 /* Set flow handle */
1416 case RMNET_IOCTL_FLOW_SET_HNDL:
1417 break;
1418
1419 /* Extended IOCTLs */
1420 case RMNET_IOCTL_EXTENDED:
1421 IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
1422 if (copy_from_user(&extend_ioctl_data,
1423 (u8 *)ifr->ifr_ifru.ifru_data,
1424 sizeof(struct rmnet_ioctl_extended_s))) {
1425 IPAWANERR("failed to copy extended ioctl data\n");
1426 rc = -EFAULT;
1427 break;
1428 }
1429 switch (extend_ioctl_data.extended_ioctl) {
1430 /* Get features */
1431 case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
1432 IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n");
1433 extend_ioctl_data.u.data =
1434 (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL |
1435 RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT |
1436 RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT);
1437 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1438 &extend_ioctl_data,
1439 sizeof(struct rmnet_ioctl_extended_s)))
1440 rc = -EFAULT;
1441 break;
1442 /* Set MRU */
1443 case RMNET_IOCTL_SET_MRU:
1444 mru = extend_ioctl_data.u.data;
1445 IPAWANDBG("get MRU size %d\n",
1446 extend_ioctl_data.u.data);
1447 break;
1448 /* Get MRU */
1449 case RMNET_IOCTL_GET_MRU:
1450 extend_ioctl_data.u.data = mru;
1451 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1452 &extend_ioctl_data,
1453 sizeof(struct rmnet_ioctl_extended_s)))
1454 rc = -EFAULT;
1455 break;
1456 /* GET SG support */
1457 case RMNET_IOCTL_GET_SG_SUPPORT:
1458 extend_ioctl_data.u.data =
1459 ipa3_rmnet_res.ipa_advertise_sg_support;
1460 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1461 &extend_ioctl_data,
1462 sizeof(struct rmnet_ioctl_extended_s)))
1463 rc = -EFAULT;
1464 break;
1465 /* Get endpoint ID */
1466 case RMNET_IOCTL_GET_EPID:
1467 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
1468 extend_ioctl_data.u.data = epid;
1469 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1470 &extend_ioctl_data,
1471 sizeof(struct rmnet_ioctl_extended_s)))
1472 rc = -EFAULT;
1473 if (copy_from_user(&extend_ioctl_data,
1474 (u8 *)ifr->ifr_ifru.ifru_data,
1475 sizeof(struct rmnet_ioctl_extended_s))) {
1476 IPAWANERR("copy extended ioctl data failed\n");
1477 rc = -EFAULT;
1478 break;
1479 }
1480 IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n",
1481 extend_ioctl_data.u.data);
1482 break;
1483 /* Endpoint pair */
1484 case RMNET_IOCTL_GET_EP_PAIR:
1485 IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n");
1486 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
1487 ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
1488 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
1489 ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
1490 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1491 &extend_ioctl_data,
1492 sizeof(struct rmnet_ioctl_extended_s)))
1493 rc = -EFAULT;
1494 if (copy_from_user(&extend_ioctl_data,
1495 (u8 *)ifr->ifr_ifru.ifru_data,
1496 sizeof(struct rmnet_ioctl_extended_s))) {
1497 IPAWANERR("copy extended ioctl data failed\n");
1498 rc = -EFAULT;
1499 break;
1500 }
1501 IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n",
1502 extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
1503 extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
1504 break;
1505 /* Get driver name */
1506 case RMNET_IOCTL_GET_DRIVER_NAME:
1507 memcpy(&extend_ioctl_data.u.if_name,
1508 IPA_NETDEV()->name,
1509 sizeof(IFNAMSIZ));
1510 if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
1511 &extend_ioctl_data,
1512 sizeof(struct rmnet_ioctl_extended_s)))
1513 rc = -EFAULT;
1514 break;
1515 /* Add MUX ID */
1516 case RMNET_IOCTL_ADD_MUX_CHANNEL:
1517 mux_index = ipa3_find_mux_channel_index(
1518 extend_ioctl_data.u.rmnet_mux_val.mux_id);
1519 if (mux_index < MAX_NUM_OF_MUX_CHANNEL) {
1520 IPAWANDBG("already setup mux(%d)\n",
1521 extend_ioctl_data.u.
1522 rmnet_mux_val.mux_id);
1523 return rc;
1524 }
1525 if (rmnet_ipa3_ctx->rmnet_index
1526 >= MAX_NUM_OF_MUX_CHANNEL) {
1527 IPAWANERR("Exceed mux_channel limit(%d)\n",
1528 rmnet_ipa3_ctx->rmnet_index);
1529 return -EFAULT;
1530 }
1531 IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
1532 extend_ioctl_data.u.rmnet_mux_val.mux_id,
1533 extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
1534 /* cache the mux name and id */
1535 mux_channel = rmnet_ipa3_ctx->mux_channel;
1536 rmnet_index = rmnet_ipa3_ctx->rmnet_index;
1537
1538 mux_channel[rmnet_index].mux_id =
1539 extend_ioctl_data.u.rmnet_mux_val.mux_id;
1540 memcpy(mux_channel[rmnet_index].vchannel_name,
1541 extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
1542 sizeof(mux_channel[rmnet_index]
1543 .vchannel_name));
1544 IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
1545 mux_channel[rmnet_index].vchannel_name,
1546 mux_channel[rmnet_index].mux_id,
1547 rmnet_index);
1548 /* check if UL filter rules coming*/
1549 if (rmnet_ipa3_ctx->num_q6_rules != 0) {
1550 IPAWANERR("dev(%s) register to IPA\n",
1551 extend_ioctl_data.u.rmnet_mux_val.
1552 vchannel_name);
1553 rc = ipa3_wwan_register_to_ipa(
1554 rmnet_ipa3_ctx->rmnet_index);
1555 if (rc < 0) {
1556 IPAWANERR("device %s reg IPA failed\n",
1557 extend_ioctl_data.u.
1558 rmnet_mux_val.vchannel_name);
1559 return -ENODEV;
1560 }
1561 mux_channel[rmnet_index].mux_channel_set = true;
1562 mux_channel[rmnet_index].ul_flt_reg = true;
1563 } else {
1564 IPAWANDBG("dev(%s) haven't registered to IPA\n",
1565 extend_ioctl_data.u.
1566 rmnet_mux_val.vchannel_name);
1567 mux_channel[rmnet_index].mux_channel_set = true;
1568 mux_channel[rmnet_index].ul_flt_reg = false;
1569 }
1570 rmnet_ipa3_ctx->rmnet_index++;
1571 break;
1572 case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
1573 IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
1574 if ((extend_ioctl_data.u.data) &
1575 RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
1576 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
1577 ipa_ep_cfg.hdr.hdr_len = 8;
1578 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
1579 ipa_ep_cfg.cfg.cs_offload_en =
1580 IPA_ENABLE_CS_OFFLOAD_UL;
1581 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
1582 ipa_ep_cfg.cfg.cs_metadata_hdr_offset
1583 = 1;
1584 } else {
1585 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
1586 ipa_ep_cfg.hdr.hdr_len = 4;
1587 }
1588 if ((extend_ioctl_data.u.data) &
1589 RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
1590 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
1591 ipa_ep_cfg.aggr.aggr_en =
1592 IPA_ENABLE_AGGR;
1593 else
1594 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
1595 ipa_ep_cfg.aggr.aggr_en =
1596 IPA_BYPASS_AGGR;
1597 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
1598 hdr_ofst_metadata_valid = 1;
1599 /* modem want offset at 0! */
1600 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
1601 hdr_ofst_metadata = 0;
1602 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.
1603 dst = IPA_CLIENT_APPS_LAN_WAN_PROD;
1604 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.
1605 mode = IPA_BASIC;
1606
1607 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.client =
1608 IPA_CLIENT_APPS_LAN_WAN_PROD;
1609 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.notify =
1610 apps_ipa_tx_complete_notify;
1611 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.desc_fifo_sz =
1612 IPA_SYS_TX_DATA_DESC_FIFO_SZ;
1613 rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.priv = dev;
1614
1615 rc = ipa3_setup_sys_pipe(
1616 &rmnet_ipa3_ctx->apps_to_ipa_ep_cfg,
1617 &rmnet_ipa3_ctx->apps_to_ipa3_hdl);
1618 if (rc)
1619 IPAWANERR("failed to config egress endpoint\n");
1620
1621 if (rmnet_ipa3_ctx->num_q6_rules != 0) {
1622 /* already got Q6 UL filter rules*/
1623 if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt
1624 == false)
1625 rc = ipa3_wwan_add_ul_flt_rule_to_ipa();
1626 else
1627 rc = 0;
1628 rmnet_ipa3_ctx->egress_set = true;
1629 if (rc)
1630 IPAWANERR("install UL rules failed\n");
1631 else
1632 rmnet_ipa3_ctx->a7_ul_flt_set = true;
1633 } else {
1634 /* wait Q6 UL filter rules*/
1635 rmnet_ipa3_ctx->egress_set = true;
1636 IPAWANDBG("no UL-rules, egress_set(%d)\n",
1637 rmnet_ipa3_ctx->egress_set);
1638 }
1639 break;
1640 case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001641 rc = handle3_ingress_format(dev, &extend_ioctl_data);
Amir Levy9659e592016-10-27 18:08:27 +03001642 break;
1643 case RMNET_IOCTL_SET_XLAT_DEV_INFO:
1644 wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
1645 GFP_KERNEL);
1646 if (!wan_msg) {
1647 IPAWANERR("Failed to allocate memory.\n");
1648 return -ENOMEM;
1649 }
1650 len = sizeof(wan_msg->upstream_ifname) >
1651 sizeof(extend_ioctl_data.u.if_name) ?
1652 sizeof(extend_ioctl_data.u.if_name) :
1653 sizeof(wan_msg->upstream_ifname);
1654 strlcpy(wan_msg->upstream_ifname,
1655 extend_ioctl_data.u.if_name, len);
1656 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
1657 msg_meta.msg_type = WAN_XLAT_CONNECT;
1658 msg_meta.msg_len = sizeof(struct ipa_wan_msg);
1659 rc = ipa3_send_msg(&msg_meta, wan_msg,
1660 ipa3_wwan_msg_free_cb);
1661 if (rc) {
1662 IPAWANERR("Failed to send XLAT_CONNECT msg\n");
1663 kfree(wan_msg);
1664 }
1665 break;
1666 /* Get agg count */
1667 case RMNET_IOCTL_GET_AGGREGATION_COUNT:
1668 break;
1669 /* Set agg count */
1670 case RMNET_IOCTL_SET_AGGREGATION_COUNT:
1671 break;
1672 /* Get agg size */
1673 case RMNET_IOCTL_GET_AGGREGATION_SIZE:
1674 break;
1675 /* Set agg size */
1676 case RMNET_IOCTL_SET_AGGREGATION_SIZE:
1677 break;
1678 /* Do flow control */
1679 case RMNET_IOCTL_FLOW_CONTROL:
1680 break;
1681 /* For legacy use */
1682 case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL:
1683 break;
1684 /* Get HW/SW map */
1685 case RMNET_IOCTL_GET_HWSW_MAP:
1686 break;
1687 /* Set RX Headroom */
1688 case RMNET_IOCTL_SET_RX_HEADROOM:
1689 break;
1690 default:
1691 IPAWANERR("[%s] unsupported extended cmd[%d]",
1692 dev->name,
1693 extend_ioctl_data.extended_ioctl);
1694 rc = -EINVAL;
1695 }
1696 break;
1697 default:
1698 IPAWANERR("[%s] unsupported cmd[%d]",
1699 dev->name, cmd);
1700 rc = -EINVAL;
1701 }
1702 return rc;
1703}
1704
1705static const struct net_device_ops ipa3_wwan_ops_ip = {
1706 .ndo_open = ipa3_wwan_open,
1707 .ndo_stop = ipa3_wwan_stop,
1708 .ndo_start_xmit = ipa3_wwan_xmit,
1709 .ndo_tx_timeout = ipa3_wwan_tx_timeout,
1710 .ndo_do_ioctl = ipa3_wwan_ioctl,
1711 .ndo_change_mtu = ipa3_wwan_change_mtu,
1712 .ndo_set_mac_address = 0,
1713 .ndo_validate_addr = 0,
1714};
1715
1716/**
1717 * wwan_setup() - Setups the wwan network driver.
1718 *
1719 * @dev: network device
1720 *
1721 * Return codes:
1722 * None
1723 */
1724
1725static void ipa3_wwan_setup(struct net_device *dev)
1726{
1727 dev->netdev_ops = &ipa3_wwan_ops_ip;
1728 ether_setup(dev);
1729 /* set this after calling ether_setup */
1730 dev->header_ops = 0; /* No header */
1731 dev->type = ARPHRD_RAWIP;
1732 dev->hard_header_len = 0;
1733 dev->mtu = WWAN_DATA_LEN;
1734 dev->addr_len = 0;
1735 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
1736 dev->needed_headroom = HEADROOM_FOR_QMAP;
1737 dev->needed_tailroom = TAILROOM;
1738 dev->watchdog_timeo = 1000;
1739}
1740
1741/* IPA_RM related functions start*/
1742static void ipa3_q6_prod_rm_request_resource(struct work_struct *work);
1743static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_request,
1744 ipa3_q6_prod_rm_request_resource);
1745static void ipa3_q6_prod_rm_release_resource(struct work_struct *work);
1746static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_release,
1747 ipa3_q6_prod_rm_release_resource);
1748
1749static void ipa3_q6_prod_rm_request_resource(struct work_struct *work)
1750{
1751 int ret = 0;
1752
1753 ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
1754 if (ret < 0 && ret != -EINPROGRESS) {
1755 IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
1756 ret);
1757 return;
1758 }
1759}
1760
1761static int ipa3_q6_rm_request_resource(void)
1762{
1763 queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
1764 &ipa3_q6_con_rm_request, 0);
1765 return 0;
1766}
1767
1768static void ipa3_q6_prod_rm_release_resource(struct work_struct *work)
1769{
1770 int ret = 0;
1771
1772 ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
1773 if (ret < 0 && ret != -EINPROGRESS) {
1774 IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
1775 ret);
1776 return;
1777 }
1778}
1779
1780
1781static int ipa3_q6_rm_release_resource(void)
1782{
1783 queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
1784 &ipa3_q6_con_rm_release, 0);
1785 return 0;
1786}
1787
1788
1789static void ipa3_q6_rm_notify_cb(void *user_data,
1790 enum ipa_rm_event event,
1791 unsigned long data)
1792{
1793 switch (event) {
1794 case IPA_RM_RESOURCE_GRANTED:
1795 IPAWANDBG_LOW("%s: Q6_PROD GRANTED CB\n", __func__);
1796 break;
1797 case IPA_RM_RESOURCE_RELEASED:
1798 IPAWANDBG_LOW("%s: Q6_PROD RELEASED CB\n", __func__);
1799 break;
1800 default:
1801 return;
1802 }
1803}
1804static int ipa3_q6_initialize_rm(void)
1805{
1806 struct ipa_rm_create_params create_params;
1807 struct ipa_rm_perf_profile profile;
1808 int result;
1809
1810 /* Initialize IPA_RM workqueue */
1811 rmnet_ipa3_ctx->rm_q6_wq = create_singlethread_workqueue("clnt_req");
1812 if (!rmnet_ipa3_ctx->rm_q6_wq)
1813 return -ENOMEM;
1814
1815 memset(&create_params, 0, sizeof(create_params));
1816 create_params.name = IPA_RM_RESOURCE_Q6_PROD;
1817 create_params.reg_params.notify_cb = &ipa3_q6_rm_notify_cb;
1818 result = ipa_rm_create_resource(&create_params);
1819 if (result)
1820 goto create_rsrc_err1;
1821 memset(&create_params, 0, sizeof(create_params));
1822 create_params.name = IPA_RM_RESOURCE_Q6_CONS;
1823 create_params.release_resource = &ipa3_q6_rm_release_resource;
1824 create_params.request_resource = &ipa3_q6_rm_request_resource;
1825 result = ipa_rm_create_resource(&create_params);
1826 if (result)
1827 goto create_rsrc_err2;
1828 /* add dependency*/
1829 result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
1830 IPA_RM_RESOURCE_APPS_CONS);
1831 if (result)
1832 goto add_dpnd_err;
1833 /* setup Performance profile */
1834 memset(&profile, 0, sizeof(profile));
1835 profile.max_supported_bandwidth_mbps = 100;
1836 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
1837 &profile);
1838 if (result)
1839 goto set_perf_err;
1840 result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
1841 &profile);
1842 if (result)
1843 goto set_perf_err;
1844 return result;
1845
1846set_perf_err:
1847 ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1848 IPA_RM_RESOURCE_APPS_CONS);
1849add_dpnd_err:
1850 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1851 if (result < 0)
1852 IPAWANERR("Error deleting resource %d, ret=%d\n",
1853 IPA_RM_RESOURCE_Q6_CONS, result);
1854create_rsrc_err2:
1855 result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1856 if (result < 0)
1857 IPAWANERR("Error deleting resource %d, ret=%d\n",
1858 IPA_RM_RESOURCE_Q6_PROD, result);
1859create_rsrc_err1:
1860 destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
1861 return result;
1862}
1863
1864void ipa3_q6_deinitialize_rm(void)
1865{
1866 int ret;
1867
1868 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
1869 IPA_RM_RESOURCE_APPS_CONS);
1870 if (ret < 0)
1871 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
1872 IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
1873 ret);
1874 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
1875 if (ret < 0)
1876 IPAWANERR("Error deleting resource %d, ret=%d\n",
1877 IPA_RM_RESOURCE_Q6_CONS, ret);
1878 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
1879 if (ret < 0)
1880 IPAWANERR("Error deleting resource %d, ret=%d\n",
1881 IPA_RM_RESOURCE_Q6_PROD, ret);
1882 destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
1883}
1884
1885static void ipa3_wake_tx_queue(struct work_struct *work)
1886{
1887 if (IPA_NETDEV()) {
1888 __netif_tx_lock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0));
1889 netif_wake_queue(IPA_NETDEV());
1890 __netif_tx_unlock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0));
1891 }
1892}
1893
1894/**
1895 * ipa3_rm_resource_granted() - Called upon
1896 * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
1897 *
1898 * @work: work object supplied ny workqueue
1899 *
1900 * Return codes:
1901 * None
1902 */
1903static void ipa3_rm_resource_granted(void *dev)
1904{
1905 IPAWANDBG_LOW("Resource Granted - starting queue\n");
1906 schedule_work(&ipa3_tx_wakequeue_work);
1907}
1908
1909/**
1910 * ipa3_rm_notify() - Callback function for RM events. Handles
1911 * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
1912 * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
1913 * workqueue.
1914 *
1915 * @dev: network device
1916 * @event: IPA RM event
1917 * @data: Additional data provided by IPA RM
1918 *
1919 * Return codes:
1920 * None
1921 */
1922static void ipa3_rm_notify(void *dev, enum ipa_rm_event event,
1923 unsigned long data)
1924{
1925 struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
1926
1927 pr_debug("%s: event %d\n", __func__, event);
1928 switch (event) {
1929 case IPA_RM_RESOURCE_GRANTED:
1930 if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
1931 complete_all(&wwan_ptr->resource_granted_completion);
1932 break;
1933 }
1934 ipa3_rm_resource_granted(dev);
1935 break;
1936 case IPA_RM_RESOURCE_RELEASED:
1937 break;
1938 default:
1939 pr_err("%s: unknown event %d\n", __func__, event);
1940 break;
1941 }
1942}
1943
1944/* IPA_RM related functions end*/
1945
1946static int ipa3_ssr_notifier_cb(struct notifier_block *this,
1947 unsigned long code,
1948 void *data);
1949
1950static struct notifier_block ipa3_ssr_notifier = {
1951 .notifier_call = ipa3_ssr_notifier_cb,
1952};
1953
1954static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
1955 struct ipa3_rmnet_plat_drv_res *ipa_rmnet_drv_res)
1956{
1957 ipa_rmnet_drv_res->ipa_rmnet_ssr =
1958 of_property_read_bool(pdev->dev.of_node,
1959 "qcom,rmnet-ipa-ssr");
1960 pr_info("IPA SSR support = %s\n",
1961 ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False");
1962 ipa_rmnet_drv_res->ipa_loaduC =
1963 of_property_read_bool(pdev->dev.of_node,
1964 "qcom,ipa-loaduC");
1965 pr_info("IPA ipa-loaduC = %s\n",
1966 ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
1967
1968 ipa_rmnet_drv_res->ipa_advertise_sg_support =
1969 of_property_read_bool(pdev->dev.of_node,
1970 "qcom,ipa-advertise-sg-support");
1971 pr_info("IPA SG support = %s\n",
1972 ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
Gidon Studinski3021a6f2016-11-10 12:48:48 +02001973
1974 ipa_rmnet_drv_res->ipa_napi_enable =
1975 of_property_read_bool(pdev->dev.of_node,
1976 "qcom,ipa-napi-enable");
1977 pr_info("IPA Napi Enable = %s\n",
1978 ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
Amir Levy9659e592016-10-27 18:08:27 +03001979 return 0;
1980}
1981
1982struct ipa3_rmnet_context ipa3_rmnet_ctx;
1983static int ipa3_wwan_probe(struct platform_device *pdev);
1984struct platform_device *m_pdev;
1985
1986static void ipa3_delayed_probe(struct work_struct *work)
1987{
1988 (void)ipa3_wwan_probe(m_pdev);
1989}
1990
1991static DECLARE_WORK(ipa3_scheduled_probe, ipa3_delayed_probe);
1992
1993static void ipa3_ready_cb(void *user_data)
1994{
1995 struct platform_device *pdev = (struct platform_device *)(user_data);
1996
1997 m_pdev = pdev;
1998
1999 IPAWANDBG("IPA ready callback has been triggered!\n");
2000
2001 schedule_work(&ipa3_scheduled_probe);
2002}
2003
2004/**
2005 * ipa3_wwan_probe() - Initialized the module and registers as a
2006 * network interface to the network stack
2007 *
2008 * Note: In case IPA driver hasn't initialized already, the probe function
2009 * will return immediately after registering a callback to be invoked when
2010 * IPA driver initialization is complete.
2011 *
2012 * Return codes:
2013 * 0: success
2014 * -ENOMEM: No memory available
2015 * -EFAULT: Internal error
2016 */
2017static int ipa3_wwan_probe(struct platform_device *pdev)
2018{
2019 int ret, i;
2020 struct net_device *dev;
2021 struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */
2022 struct ipa_rm_perf_profile profile; /* IPA_RM */
2023
2024 pr_info("rmnet_ipa3 started initialization\n");
2025
2026 if (!ipa3_is_ready()) {
2027 IPAWANDBG("IPA driver not ready, registering callback\n");
2028 ret = ipa_register_ipa_ready_cb(ipa3_ready_cb, (void *)pdev);
2029
2030 /*
2031 * If we received -EEXIST, IPA has initialized. So we need
2032 * to continue the probing process.
2033 */
2034 if (ret != -EEXIST) {
2035 if (ret)
2036 IPAWANERR("IPA CB reg failed - %d\n", ret);
2037 return ret;
2038 }
2039 }
2040
2041 ret = get_ipa_rmnet_dts_configuration(pdev, &ipa3_rmnet_res);
2042 ipa3_rmnet_ctx.ipa_rmnet_ssr = ipa3_rmnet_res.ipa_rmnet_ssr;
2043
2044 ret = ipa3_init_q6_smem();
2045 if (ret) {
2046 IPAWANERR("ipa3_init_q6_smem failed!\n");
2047 return ret;
2048 }
2049
2050 /* initialize tx/rx endpoint setup */
2051 memset(&rmnet_ipa3_ctx->apps_to_ipa_ep_cfg, 0,
2052 sizeof(struct ipa_sys_connect_params));
2053 memset(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg, 0,
2054 sizeof(struct ipa_sys_connect_params));
2055
2056 /* initialize ex property setup */
2057 rmnet_ipa3_ctx->num_q6_rules = 0;
2058 rmnet_ipa3_ctx->old_num_q6_rules = 0;
2059 rmnet_ipa3_ctx->rmnet_index = 0;
2060 rmnet_ipa3_ctx->egress_set = false;
2061 rmnet_ipa3_ctx->a7_ul_flt_set = false;
2062 for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
2063 memset(&rmnet_ipa3_ctx->mux_channel[i], 0,
2064 sizeof(struct ipa3_rmnet_mux_val));
2065
2066 /* start A7 QMI service/client */
2067 if (ipa3_rmnet_res.ipa_loaduC)
2068 /* Android platform loads uC */
2069 ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
2070 else
2071 /* LE platform not loads uC */
2072 ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
2073
2074 /* construct default WAN RT tbl for IPACM */
2075 ret = ipa3_setup_a7_qmap_hdr();
2076 if (ret)
2077 goto setup_a7_qmap_hdr_err;
2078 ret = ipa3_setup_dflt_wan_rt_tables();
2079 if (ret)
2080 goto setup_dflt_wan_rt_tables_err;
2081
2082 if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
2083 /* Start transport-driver fd ioctl for ipacm for first init */
2084 ret = ipa3_wan_ioctl_init();
2085 if (ret)
2086 goto wan_ioctl_init_err;
2087 } else {
2088 /* Enable sending QMI messages after SSR */
2089 ipa3_wan_ioctl_enable_qmi_messages();
2090 }
2091
2092 /* initialize wan-driver netdev */
2093 dev = alloc_netdev(sizeof(struct ipa3_wwan_private),
2094 IPA_WWAN_DEV_NAME,
2095 NET_NAME_UNKNOWN,
2096 ipa3_wwan_setup);
2097 if (!dev) {
2098 IPAWANERR("no memory for netdev\n");
2099 ret = -ENOMEM;
2100 goto alloc_netdev_err;
2101 }
2102 rmnet_ipa3_ctx->wwan_priv = netdev_priv(dev);
2103 memset(rmnet_ipa3_ctx->wwan_priv, 0,
2104 sizeof(*(rmnet_ipa3_ctx->wwan_priv)));
2105 IPAWANDBG("wwan_ptr (private) = %p", rmnet_ipa3_ctx->wwan_priv);
2106 rmnet_ipa3_ctx->wwan_priv->net = dev;
2107 rmnet_ipa3_ctx->wwan_priv->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
2108 rmnet_ipa3_ctx->wwan_priv->outstanding_low = DEFAULT_OUTSTANDING_LOW;
2109 atomic_set(&rmnet_ipa3_ctx->wwan_priv->outstanding_pkts, 0);
2110 spin_lock_init(&rmnet_ipa3_ctx->wwan_priv->lock);
2111 init_completion(
2112 &rmnet_ipa3_ctx->wwan_priv->resource_granted_completion);
2113
2114 if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
2115 /* IPA_RM configuration starts */
2116 ret = ipa3_q6_initialize_rm();
2117 if (ret) {
2118 IPAWANERR("%s: ipa3_q6_initialize_rm failed, ret: %d\n",
2119 __func__, ret);
2120 goto q6_init_err;
2121 }
2122 }
2123
2124 memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
2125 ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
2126 ipa_rm_params.reg_params.user_data = dev;
2127 ipa_rm_params.reg_params.notify_cb = ipa3_rm_notify;
2128 ret = ipa_rm_create_resource(&ipa_rm_params);
2129 if (ret) {
2130 pr_err("%s: unable to create resourse %d in IPA RM\n",
2131 __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
2132 goto create_rsrc_err;
2133 }
2134 ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
2135 IPA_RM_INACTIVITY_TIMER);
2136 if (ret) {
2137 pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
2138 __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
2139 goto timer_init_err;
2140 }
2141 /* add dependency */
2142 ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2143 IPA_RM_RESOURCE_Q6_CONS);
2144 if (ret)
2145 goto add_dpnd_err;
2146 /* setup Performance profile */
2147 memset(&profile, 0, sizeof(profile));
2148 profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
2149 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
2150 &profile);
2151 if (ret)
2152 goto set_perf_err;
2153 /* IPA_RM configuration ends */
2154
2155 /* Enable SG support in netdevice. */
2156 if (ipa3_rmnet_res.ipa_advertise_sg_support)
2157 dev->hw_features |= NETIF_F_SG;
2158
2159 if (ipa3_rmnet_res.ipa_napi_enable)
2160 netif_napi_add(dev, &(rmnet_ipa3_ctx->wwan_priv->napi),
2161 ipa3_rmnet_poll, NAPI_WEIGHT);
2162 ret = register_netdev(dev);
2163 if (ret) {
2164 IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
2165 0, ret);
2166 goto set_perf_err;
2167 }
2168
2169 IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n", dev->name);
2170 if (ret) {
2171 IPAWANERR("default configuration failed rc=%d\n",
2172 ret);
2173 goto config_err;
2174 }
2175 atomic_set(&rmnet_ipa3_ctx->is_initialized, 1);
2176 if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
2177 /* offline charging mode */
2178 ipa3_proxy_clk_unvote();
2179 }
2180 atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
2181
2182 pr_info("rmnet_ipa completed initialization\n");
2183 return 0;
2184config_err:
2185 if (ipa3_rmnet_res.ipa_napi_enable)
2186 netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
2187 unregister_netdev(dev);
2188set_perf_err:
2189 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2190 IPA_RM_RESOURCE_Q6_CONS);
2191 if (ret)
2192 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2193 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2194 ret);
2195add_dpnd_err:
2196 ret = ipa_rm_inactivity_timer_destroy(
2197 IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
2198 if (ret)
2199 IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
2200 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2201timer_init_err:
2202 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2203 if (ret)
2204 IPAWANERR("Error deleting resource %d, ret=%d\n",
2205 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2206create_rsrc_err:
2207 ipa3_q6_deinitialize_rm();
2208q6_init_err:
2209 free_netdev(dev);
2210 rmnet_ipa3_ctx->wwan_priv = NULL;
2211alloc_netdev_err:
2212 ipa3_wan_ioctl_deinit();
2213wan_ioctl_init_err:
2214 ipa3_del_dflt_wan_rt_tables();
2215setup_dflt_wan_rt_tables_err:
2216 ipa3_del_a7_qmap_hdr();
2217setup_a7_qmap_hdr_err:
2218 ipa3_qmi_service_exit();
2219 atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
2220 return ret;
2221}
2222
2223static int ipa3_wwan_remove(struct platform_device *pdev)
2224{
2225 int ret;
2226
2227 pr_info("rmnet_ipa started deinitialization\n");
2228 mutex_lock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
2229 ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->ipa3_to_apps_hdl);
2230 if (ret < 0)
2231 IPAWANERR("Failed to teardown IPA->APPS pipe\n");
2232 else
2233 rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
2234 if (ipa3_rmnet_res.ipa_napi_enable)
2235 netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
2236 mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
2237 unregister_netdev(IPA_NETDEV());
2238 ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
2239 IPA_RM_RESOURCE_Q6_CONS);
2240 if (ret < 0)
2241 IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
2242 IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
2243 ret);
2244 ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
2245 if (ret < 0)
2246 IPAWANERR(
2247 "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
2248 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2249 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2250 if (ret < 0)
2251 IPAWANERR("Error deleting resource %d, ret=%d\n",
2252 IPA_RM_RESOURCE_WWAN_0_PROD, ret);
2253 cancel_work_sync(&ipa3_tx_wakequeue_work);
2254 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2255 if (IPA_NETDEV())
2256 free_netdev(IPA_NETDEV());
2257 rmnet_ipa3_ctx->wwan_priv = NULL;
2258 /* No need to remove wwan_ioctl during SSR */
2259 if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
2260 ipa3_wan_ioctl_deinit();
2261 ipa3_del_dflt_wan_rt_tables();
2262 ipa3_del_a7_qmap_hdr();
2263 ipa3_del_mux_qmap_hdrs();
2264 if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false)
2265 ipa3_wwan_del_ul_flt_rule_to_ipa();
2266 ipa3_cleanup_deregister_intf();
2267 atomic_set(&rmnet_ipa3_ctx->is_initialized, 0);
2268 pr_info("rmnet_ipa completed deinitialization\n");
2269 return 0;
2270}
2271
2272/**
2273* rmnet_ipa_ap_suspend() - suspend callback for runtime_pm
2274* @dev: pointer to device
2275*
2276* This callback will be invoked by the runtime_pm framework when an AP suspend
2277* operation is invoked, usually by pressing a suspend button.
2278*
2279* Returns -EAGAIN to runtime_pm framework in case there are pending packets
2280* in the Tx queue. This will postpone the suspend operation until all the
2281* pending packets will be transmitted.
2282*
2283* In case there are no packets to send, releases the WWAN0_PROD entity.
2284* As an outcome, the number of IPA active clients should be decremented
2285* until IPA clocks can be gated.
2286*/
2287static int rmnet_ipa_ap_suspend(struct device *dev)
2288{
2289 struct net_device *netdev = IPA_NETDEV();
2290 struct ipa3_wwan_private *wwan_ptr;
2291
2292 IPAWANDBG_LOW("Enter...\n");
2293 if (netdev == NULL) {
2294 IPAWANERR("netdev is NULL.\n");
2295 return 0;
2296 }
2297
2298 wwan_ptr = netdev_priv(netdev);
2299 if (wwan_ptr == NULL) {
2300 IPAWANERR("wwan_ptr is NULL.\n");
2301 return 0;
2302 }
2303
2304 /* Do not allow A7 to suspend in case there are oustanding packets */
2305 if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
2306 IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
2307 return -EAGAIN;
2308 }
2309
2310 /* Make sure that there is no Tx operation ongoing */
2311 netif_tx_lock_bh(netdev);
2312 ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
2313 netif_tx_unlock_bh(netdev);
2314 IPAWANDBG_LOW("Exit\n");
2315
2316 return 0;
2317}
2318
2319/**
2320* rmnet_ipa_ap_resume() - resume callback for runtime_pm
2321* @dev: pointer to device
2322*
2323* This callback will be invoked by the runtime_pm framework when an AP resume
2324* operation is invoked.
2325*
2326* Enables the network interface queue and returns success to the
2327* runtime_pm framework.
2328*/
2329static int rmnet_ipa_ap_resume(struct device *dev)
2330{
2331 struct net_device *netdev = IPA_NETDEV();
2332
2333 IPAWANDBG_LOW("Enter...\n");
2334 if (netdev)
2335 netif_wake_queue(netdev);
2336 IPAWANDBG_LOW("Exit\n");
2337
2338 return 0;
2339}
2340
2341static void ipa_stop_polling_stats(void)
2342{
2343 cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
2344 ipa3_rmnet_ctx.polling_interval = 0;
2345}
2346
2347static const struct of_device_id rmnet_ipa_dt_match[] = {
2348 {.compatible = "qcom,rmnet-ipa3"},
2349 {},
2350};
2351MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
2352
2353static const struct dev_pm_ops rmnet_ipa_pm_ops = {
2354 .suspend_noirq = rmnet_ipa_ap_suspend,
2355 .resume_noirq = rmnet_ipa_ap_resume,
2356};
2357
2358static struct platform_driver rmnet_ipa_driver = {
2359 .driver = {
2360 .name = "rmnet_ipa3",
2361 .owner = THIS_MODULE,
2362 .pm = &rmnet_ipa_pm_ops,
2363 .of_match_table = rmnet_ipa_dt_match,
2364 },
2365 .probe = ipa3_wwan_probe,
2366 .remove = ipa3_wwan_remove,
2367};
2368
2369static int ipa3_ssr_notifier_cb(struct notifier_block *this,
2370 unsigned long code,
2371 void *data)
2372{
2373 if (!ipa3_rmnet_ctx.ipa_rmnet_ssr)
2374 return NOTIFY_DONE;
2375
2376 switch (code) {
2377 case SUBSYS_BEFORE_SHUTDOWN:
2378 IPAWANINFO("IPA received MPSS BEFORE_SHUTDOWN\n");
2379 atomic_set(&rmnet_ipa3_ctx->is_ssr, 1);
2380 ipa3_q6_pre_shutdown_cleanup();
2381 if (IPA_NETDEV())
2382 netif_stop_queue(IPA_NETDEV());
2383 ipa3_qmi_stop_workqueues();
2384 ipa3_wan_ioctl_stop_qmi_messages();
2385 ipa_stop_polling_stats();
2386 if (atomic_read(&rmnet_ipa3_ctx->is_initialized))
2387 platform_driver_unregister(&rmnet_ipa_driver);
2388 IPAWANINFO("IPA BEFORE_SHUTDOWN handling is complete\n");
2389 break;
2390 case SUBSYS_AFTER_SHUTDOWN:
2391 IPAWANINFO("IPA Received MPSS AFTER_SHUTDOWN\n");
2392 if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
2393 ipa3_q6_post_shutdown_cleanup();
2394 IPAWANINFO("IPA AFTER_SHUTDOWN handling is complete\n");
2395 break;
2396 case SUBSYS_BEFORE_POWERUP:
2397 IPAWANINFO("IPA received MPSS BEFORE_POWERUP\n");
2398 if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
2399 /* clean up cached QMI msg/handlers */
2400 ipa3_qmi_service_exit();
2401 /*hold a proxy vote for the modem*/
2402 ipa3_proxy_clk_vote();
2403 IPAWANINFO("IPA BEFORE_POWERUP handling is complete\n");
2404 break;
2405 case SUBSYS_AFTER_POWERUP:
2406 IPAWANINFO("%s:%d IPA received MPSS AFTER_POWERUP\n",
2407 __func__, __LINE__);
2408 if (!atomic_read(&rmnet_ipa3_ctx->is_initialized) &&
2409 atomic_read(&rmnet_ipa3_ctx->is_ssr))
2410 platform_driver_register(&rmnet_ipa_driver);
2411
2412 IPAWANINFO("IPA AFTER_POWERUP handling is complete\n");
2413 break;
2414 default:
2415 IPAWANDBG("Unsupported subsys notification, IPA received: %lu",
2416 code);
2417 break;
2418 }
2419
2420 IPAWANDBG_LOW("Exit\n");
2421 return NOTIFY_DONE;
2422}
2423
2424/**
2425 * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa_send_msg
2426 * @buff: pointer to buffer containing the message
2427 * @len: message len
2428 * @type: message type
2429 *
2430 * This function is invoked when ipa_send_msg is complete (Provided as a
2431 * free function pointer along with the message).
2432 */
2433static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
2434{
2435 if (!buff) {
2436 IPAWANERR("Null buffer\n");
2437 return;
2438 }
2439
2440 if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
2441 type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
2442 IPAWANERR("Wrong type given. buff %p type %d\n",
2443 buff, type);
2444 }
2445 kfree(buff);
2446}
2447
2448/**
2449 * rmnet_ipa_get_stats_and_update() - Gets pipe stats from Modem
2450 *
2451 * This function queries the IPA Modem driver for the pipe stats
2452 * via QMI, and updates the user space IPA entity.
2453 */
2454static void rmnet_ipa_get_stats_and_update(void)
2455{
2456 struct ipa_get_data_stats_req_msg_v01 req;
2457 struct ipa_get_data_stats_resp_msg_v01 *resp;
2458 struct ipa_msg_meta msg_meta;
2459 int rc;
2460
2461 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2462 GFP_KERNEL);
2463 if (!resp) {
2464 IPAWANERR("Can't allocate memory for stats message\n");
2465 return;
2466 }
2467
2468 memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2469 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2470
2471 req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2472
2473 rc = ipa3_qmi_get_data_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002474 if (rc) {
2475 IPAWANERR("ipa3_qmi_get_data_stats failed: %d\n", rc);
2476 kfree(resp);
2477 return;
2478 }
Amir Levy9659e592016-10-27 18:08:27 +03002479
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002480 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2481 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
2482 msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01);
2483 rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2484 if (rc) {
2485 IPAWANERR("ipa_send_msg failed: %d\n", rc);
2486 kfree(resp);
2487 return;
Amir Levy9659e592016-10-27 18:08:27 +03002488 }
2489}
2490
2491/**
2492 * tethering_stats_poll_queue() - Stats polling function
2493 * @work - Work entry
2494 *
2495 * This function is scheduled periodically (per the interval) in
2496 * order to poll the IPA Modem driver for the pipe stats.
2497 */
2498static void tethering_stats_poll_queue(struct work_struct *work)
2499{
2500 rmnet_ipa_get_stats_and_update();
2501
2502 /* Schedule again only if there's an active polling interval */
2503 if (ipa3_rmnet_ctx.polling_interval != 0)
2504 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work,
2505 msecs_to_jiffies(ipa3_rmnet_ctx.polling_interval*1000));
2506}
2507
2508/**
2509 * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem
2510 *
2511 * This function retrieves the data usage (used quota) from the IPA Modem driver
2512 * via QMI, and updates IPA user space entity.
2513 */
2514static void rmnet_ipa_get_network_stats_and_update(void)
2515{
2516 struct ipa_get_apn_data_stats_req_msg_v01 req;
2517 struct ipa_get_apn_data_stats_resp_msg_v01 *resp;
2518 struct ipa_msg_meta msg_meta;
2519 int rc;
2520
2521 resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
2522 GFP_KERNEL);
2523 if (!resp) {
2524 IPAWANERR("Can't allocate memory for network stats message\n");
2525 return;
2526 }
2527
2528 memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01));
2529 memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01));
2530
2531 req.mux_id_list_valid = true;
2532 req.mux_id_list_len = 1;
2533 req.mux_id_list[0] = ipa3_rmnet_ctx.metered_mux_id;
2534
2535 rc = ipa3_qmi_get_network_stats(&req, resp);
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002536 if (rc) {
2537 IPAWANERR("ipa3_qmi_get_network_stats failed: %d\n", rc);
2538 kfree(resp);
2539 return;
2540 }
Amir Levy9659e592016-10-27 18:08:27 +03002541
Gidon Studinski3021a6f2016-11-10 12:48:48 +02002542 memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
2543 msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
2544 msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
2545 rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
2546 if (rc) {
2547 IPAWANERR("ipa_send_msg failed: %d\n", rc);
2548 kfree(resp);
2549 return;
Amir Levy9659e592016-10-27 18:08:27 +03002550 }
2551}
2552
2553/**
2554 * rmnet_ipa3_poll_tethering_stats() - Tethering stats polling IOCTL handler
2555 * @data - IOCTL data
2556 *
2557 * This function handles WAN_IOC_POLL_TETHERING_STATS.
2558 * In case polling interval received is 0, polling will stop
2559 * (If there's a polling in progress, it will allow it to finish), and then will
2560 * fetch network stats, and update the IPA user space.
2561 *
2562 * Return codes:
2563 * 0: Success
2564 */
2565int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
2566{
2567 ipa3_rmnet_ctx.polling_interval = data->polling_interval_secs;
2568
2569 cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work);
2570
2571 if (ipa3_rmnet_ctx.polling_interval == 0) {
2572 ipa3_qmi_stop_data_qouta();
2573 rmnet_ipa_get_network_stats_and_update();
2574 rmnet_ipa_get_stats_and_update();
2575 return 0;
2576 }
2577
2578 schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0);
2579 return 0;
2580}
2581
2582/**
2583 * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
2584 * @data - IOCTL data
2585 *
2586 * This function handles WAN_IOC_SET_DATA_QUOTA.
2587 * It translates the given interface name to the Modem MUX ID and
2588 * sends the request of the quota to the IPA Modem driver via QMI.
2589 *
2590 * Return codes:
2591 * 0: Success
2592 * -EFAULT: Invalid interface name provided
2593 * other: See ipa_qmi_set_data_quota
2594 */
2595int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
2596{
2597 u32 mux_id;
2598 int index;
2599 struct ipa_set_data_usage_quota_req_msg_v01 req;
2600
2601 index = find_vchannel_name_index(data->interface_name);
2602 IPAWANERR("iface name %s, quota %lu\n",
2603 data->interface_name,
2604 (unsigned long int) data->quota_mbytes);
2605
2606 if (index == MAX_NUM_OF_MUX_CHANNEL) {
2607 IPAWANERR("%s is an invalid iface name\n",
2608 data->interface_name);
2609 return -EFAULT;
2610 }
2611
2612 mux_id = rmnet_ipa3_ctx->mux_channel[index].mux_id;
2613 ipa3_rmnet_ctx.metered_mux_id = mux_id;
2614
2615 memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
2616 req.apn_quota_list_valid = true;
2617 req.apn_quota_list_len = 1;
2618 req.apn_quota_list[0].mux_id = mux_id;
2619 req.apn_quota_list[0].num_Mbytes = data->quota_mbytes;
2620
2621 return ipa3_qmi_set_data_quota(&req);
2622}
2623
2624 /* rmnet_ipa_set_tether_client_pipe() -
2625 * @data - IOCTL data
2626 *
2627 * This function handles WAN_IOC_SET_DATA_QUOTA.
2628 * It translates the given interface name to the Modem MUX ID and
2629 * sends the request of the quota to the IPA Modem driver via QMI.
2630 *
2631 * Return codes:
2632 * 0: Success
2633 * -EFAULT: Invalid interface name provided
2634 * other: See ipa_qmi_set_data_quota
2635 */
2636int rmnet_ipa3_set_tether_client_pipe(
2637 struct wan_ioctl_set_tether_client_pipe *data)
2638{
2639 int number, i;
2640
2641 IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
2642 data->ipa_client,
2643 data->ul_src_pipe_len,
2644 data->dl_dst_pipe_len,
2645 data->reset_client);
2646 number = data->ul_src_pipe_len;
2647 for (i = 0; i < number; i++) {
2648 IPAWANDBG("UL index-%d pipe %d\n", i,
2649 data->ul_src_pipe_list[i]);
2650 if (data->reset_client)
2651 ipa3_set_client(data->ul_src_pipe_list[i],
2652 0, false);
2653 else
2654 ipa3_set_client(data->ul_src_pipe_list[i],
2655 data->ipa_client, true);
2656 }
2657 number = data->dl_dst_pipe_len;
2658 for (i = 0; i < number; i++) {
2659 IPAWANDBG("DL index-%d pipe %d\n", i,
2660 data->dl_dst_pipe_list[i]);
2661 if (data->reset_client)
2662 ipa3_set_client(data->dl_dst_pipe_list[i],
2663 0, false);
2664 else
2665 ipa3_set_client(data->dl_dst_pipe_list[i],
2666 data->ipa_client, false);
2667 }
2668 return 0;
2669}
2670
2671int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
2672 bool reset)
2673{
2674 struct ipa_get_data_stats_req_msg_v01 *req;
2675 struct ipa_get_data_stats_resp_msg_v01 *resp;
2676 int pipe_len, rc;
2677
2678 req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
2679 GFP_KERNEL);
2680 if (!req) {
2681 IPAWANERR("Can't allocate memory for stats message\n");
2682 return -ENOMEM;
2683 }
2684 resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
2685 GFP_KERNEL);
2686 if (!resp) {
2687 IPAWANERR("Can't allocate memory for stats message\n");
2688 kfree(req);
2689 return -ENOMEM;
2690 }
2691 memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
2692 memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
2693
2694 req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
2695 if (reset) {
2696 req->reset_stats_valid = true;
2697 req->reset_stats = true;
2698 IPAWANERR("reset the pipe stats\n");
2699 } else {
2700 /* print tethered-client enum */
2701 IPAWANDBG_LOW("Tethered-client enum(%d)\n", data->ipa_client);
2702 }
2703
2704 rc = ipa3_qmi_get_data_stats(req, resp);
2705 if (rc) {
2706 IPAWANERR("can't get ipa_qmi_get_data_stats\n");
2707 kfree(req);
2708 kfree(resp);
2709 return rc;
2710 } else if (reset) {
2711 kfree(req);
2712 kfree(resp);
2713 return 0;
2714 }
2715
2716 if (resp->dl_dst_pipe_stats_list_valid) {
2717 for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len;
2718 pipe_len++) {
2719 IPAWANDBG_LOW("Check entry(%d) dl_dst_pipe(%d)\n",
2720 pipe_len, resp->dl_dst_pipe_stats_list
2721 [pipe_len].pipe_index);
2722 IPAWANDBG_LOW("dl_p_v4(%lu)v6(%lu)\n",
2723 (unsigned long int) resp->
2724 dl_dst_pipe_stats_list[pipe_len].
2725 num_ipv4_packets,
2726 (unsigned long int) resp->
2727 dl_dst_pipe_stats_list[pipe_len].
2728 num_ipv6_packets);
2729 IPAWANDBG_LOW("dl_b_v4(%lu)v6(%lu)\n",
2730 (unsigned long int) resp->
2731 dl_dst_pipe_stats_list[pipe_len].
2732 num_ipv4_bytes,
2733 (unsigned long int) resp->
2734 dl_dst_pipe_stats_list[pipe_len].
2735 num_ipv6_bytes);
2736 if (ipa_get_client_uplink(resp->
2737 dl_dst_pipe_stats_list[pipe_len].
2738 pipe_index) == false) {
2739 if (data->ipa_client == ipa_get_client(resp->
2740 dl_dst_pipe_stats_list[pipe_len].
2741 pipe_index)) {
2742 /* update the DL stats */
2743 data->ipv4_rx_packets += resp->
2744 dl_dst_pipe_stats_list[pipe_len].
2745 num_ipv4_packets;
2746 data->ipv6_rx_packets += resp->
2747 dl_dst_pipe_stats_list[pipe_len].
2748 num_ipv6_packets;
2749 data->ipv4_rx_bytes += resp->
2750 dl_dst_pipe_stats_list[pipe_len].
2751 num_ipv4_bytes;
2752 data->ipv6_rx_bytes += resp->
2753 dl_dst_pipe_stats_list[pipe_len].
2754 num_ipv6_bytes;
2755 }
2756 }
2757 }
2758 }
2759 IPAWANDBG_LOW("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
2760 (unsigned long int) data->ipv4_rx_packets,
2761 (unsigned long int) data->ipv6_rx_packets,
2762 (unsigned long int) data->ipv4_rx_bytes,
2763 (unsigned long int) data->ipv6_rx_bytes);
2764
2765 if (resp->ul_src_pipe_stats_list_valid) {
2766 for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len;
2767 pipe_len++) {
2768 IPAWANDBG_LOW("Check entry(%d) ul_dst_pipe(%d)\n",
2769 pipe_len,
2770 resp->ul_src_pipe_stats_list[pipe_len].
2771 pipe_index);
2772 IPAWANDBG_LOW("ul_p_v4(%lu)v6(%lu)\n",
2773 (unsigned long int) resp->
2774 ul_src_pipe_stats_list[pipe_len].
2775 num_ipv4_packets,
2776 (unsigned long int) resp->
2777 ul_src_pipe_stats_list[pipe_len].
2778 num_ipv6_packets);
2779 IPAWANDBG_LOW("ul_b_v4(%lu)v6(%lu)\n",
2780 (unsigned long int) resp->
2781 ul_src_pipe_stats_list[pipe_len].
2782 num_ipv4_bytes,
2783 (unsigned long int) resp->
2784 ul_src_pipe_stats_list[pipe_len].
2785 num_ipv6_bytes);
2786 if (ipa_get_client_uplink(resp->
2787 ul_src_pipe_stats_list[pipe_len].
2788 pipe_index) == true) {
2789 if (data->ipa_client == ipa_get_client(resp->
2790 ul_src_pipe_stats_list[pipe_len].
2791 pipe_index)) {
2792 /* update the DL stats */
2793 data->ipv4_tx_packets += resp->
2794 ul_src_pipe_stats_list[pipe_len].
2795 num_ipv4_packets;
2796 data->ipv6_tx_packets += resp->
2797 ul_src_pipe_stats_list[pipe_len].
2798 num_ipv6_packets;
2799 data->ipv4_tx_bytes += resp->
2800 ul_src_pipe_stats_list[pipe_len].
2801 num_ipv4_bytes;
2802 data->ipv6_tx_bytes += resp->
2803 ul_src_pipe_stats_list[pipe_len].
2804 num_ipv6_bytes;
2805 }
2806 }
2807 }
2808 }
2809 IPAWANDBG_LOW("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
2810 (unsigned long int) data->ipv4_tx_packets,
2811 (unsigned long int) data->ipv6_tx_packets,
2812 (unsigned long int) data->ipv4_tx_bytes,
2813 (unsigned long int) data->ipv6_tx_bytes);
2814 kfree(req);
2815 kfree(resp);
2816 return 0;
2817}
2818
2819/**
2820 * ipa3_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
2821 * @mux_id - The MUX ID on which the quota has been reached
2822 *
2823 * This function broadcasts a Netlink event using the kobject of the
2824 * rmnet_ipa interface in order to alert the user space that the quota
2825 * on the specific interface which matches the mux_id has been reached.
2826 *
2827 */
2828void ipa3_broadcast_quota_reach_ind(u32 mux_id)
2829{
2830 char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
2831 char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
2832 char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
2833 char *envp[IPA_UEVENT_NUM_EVNP] = {
2834 alert_msg, iface_name_l, iface_name_m, NULL };
2835 int res;
2836 int index;
2837
2838 index = ipa3_find_mux_channel_index(mux_id);
2839
2840 if (index == MAX_NUM_OF_MUX_CHANNEL) {
2841 IPAWANERR("%u is an mux ID\n", mux_id);
2842 return;
2843 }
2844
2845 res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
2846 "ALERT_NAME=%s", "quotaReachedAlert");
2847 if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
2848 IPAWANERR("message too long (%d)", res);
2849 return;
2850 }
2851 /* posting msg for L-release for CNE */
2852 res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
2853 "UPSTREAM=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
2854 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
2855 IPAWANERR("message too long (%d)", res);
2856 return;
2857 }
2858 /* posting msg for M-release for CNE */
2859 res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
2860 "INTERFACE=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
2861 if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
2862 IPAWANERR("message too long (%d)", res);
2863 return;
2864 }
2865
2866 IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
2867 alert_msg, iface_name_l, iface_name_m);
2868 kobject_uevent_env(&(IPA_NETDEV()->dev.kobj),
2869 KOBJ_CHANGE, envp);
2870}
2871
2872/**
2873 * ipa3_q6_handshake_complete() - Perform operations once Q6 is up
2874 * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR.
2875 *
2876 * This function is invoked once the handshake between the IPA AP driver
2877 * and IPA Q6 driver is complete. At this point, it is possible to perform
2878 * operations which can't be performed until IPA Q6 driver is up.
2879 *
2880 */
2881void ipa3_q6_handshake_complete(bool ssr_bootup)
2882{
2883 /* It is required to recover the network stats after SSR recovery */
2884 if (ssr_bootup) {
2885 /*
2886 * In case the uC is required to be loaded by the Modem,
2887 * the proxy vote will be removed only when uC loading is
2888 * complete and indication is received by the AP. After SSR,
2889 * uC is already loaded. Therefore, proxy vote can be removed
2890 * once Modem init is complete.
2891 */
2892 ipa3_proxy_clk_unvote();
2893
2894 /*
2895 * It is required to recover the network stats after
2896 * SSR recovery
2897 */
2898 rmnet_ipa_get_network_stats_and_update();
2899 }
2900}
2901
2902static int __init ipa3_wwan_init(void)
2903{
2904 rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL);
2905 if (!rmnet_ipa3_ctx) {
2906 IPAWANERR("no memory\n");
2907 return -ENOMEM;
2908 }
2909
2910 atomic_set(&rmnet_ipa3_ctx->is_initialized, 0);
2911 atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
2912
2913 mutex_init(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
2914 rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
2915 /* Register for Modem SSR */
2916 rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier(
2917 SUBSYS_MODEM,
2918 &ipa3_ssr_notifier);
2919 if (!IS_ERR(rmnet_ipa3_ctx->subsys_notify_handle))
2920 return platform_driver_register(&rmnet_ipa_driver);
2921 else
2922 return (int)PTR_ERR(rmnet_ipa3_ctx->subsys_notify_handle);
2923}
2924
2925static void __exit ipa3_wwan_cleanup(void)
2926{
2927 int ret;
2928
2929 mutex_destroy(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
2930 ret = subsys_notif_unregister_notifier(
2931 rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
2932 if (ret)
2933 IPAWANERR(
2934 "Error subsys_notif_unregister_notifier system %s, ret=%d\n",
2935 SUBSYS_MODEM, ret);
2936 platform_driver_unregister(&rmnet_ipa_driver);
2937 kfree(rmnet_ipa3_ctx);
2938 rmnet_ipa3_ctx = NULL;
2939}
2940
2941static void ipa3_wwan_msg_free_cb(void *buff, u32 len, u32 type)
2942{
2943 if (!buff)
2944 IPAWANERR("Null buffer.\n");
2945 kfree(buff);
2946}
2947
2948static void ipa3_rmnet_rx_cb(void *priv)
2949{
2950 IPAWANDBG_LOW("\n");
2951 napi_schedule(&(rmnet_ipa3_ctx->wwan_priv->napi));
2952}
2953
2954static int ipa3_rmnet_poll(struct napi_struct *napi, int budget)
2955{
2956 int rcvd_pkts = 0;
2957
2958 rcvd_pkts = ipa_rx_poll(rmnet_ipa3_ctx->ipa3_to_apps_hdl,
2959 NAPI_WEIGHT);
2960 IPAWANDBG_LOW("rcvd packets: %d\n", rcvd_pkts);
2961 return rcvd_pkts;
2962}
2963
2964late_initcall(ipa3_wwan_init);
2965module_exit(ipa3_wwan_cleanup);
2966MODULE_DESCRIPTION("WWAN Network Interface");
2967MODULE_LICENSE("GPL v2");