blob: d32c7955a756b28e5c32c03279ff889d991b9d4e [file] [log] [blame]
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05301// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
4 */
5
6#define pr_fmt(fmt) "icnss2: " fmt
7
8#include <linux/of_address.h>
9#include <linux/clk.h>
10#include <linux/iommu.h>
11#include <linux/export.h>
12#include <linux/err.h>
13#include <linux/of.h>
14#include <linux/of_device.h>
15#include <linux/init.h>
16#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/debugfs.h>
20#include <linux/seq_file.h>
21#include <linux/slab.h>
22#include <linux/regulator/consumer.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/thread_info.h>
28#include <linux/uaccess.h>
29#include <linux/adc-tm-clients.h>
30#include <linux/iio/consumer.h>
31#include <linux/etherdevice.h>
32#include <linux/of.h>
33#include <linux/of_irq.h>
34#include <linux/soc/qcom/qmi.h>
35#include <linux/sysfs.h>
36#include <soc/qcom/memory_dump.h>
37#include <soc/qcom/secure_buffer.h>
38#include <soc/qcom/subsystem_notif.h>
39#include <soc/qcom/subsystem_restart.h>
40#include <soc/qcom/socinfo.h>
41#include <soc/qcom/ramdump.h>
42#include "main.h"
43#include "qmi.h"
44#include "debug.h"
45#include "power.h"
Mohammed Siddiq049053c2020-04-07 21:33:26 +053046#include "genl.h"
Sandeep Singh1edc2cf2019-12-06 15:52:54 +053047
48#define MAX_PROP_SIZE 32
49#define NUM_LOG_PAGES 10
50#define NUM_LOG_LONG_PAGES 4
51#define ICNSS_MAGIC 0x5abc5abc
52
53#define ICNSS_SERVICE_LOCATION_CLIENT_NAME "ICNSS-WLAN"
54#define ICNSS_WLAN_SERVICE_NAME "wlan/fw"
55#define ICNSS_DEFAULT_FEATURE_MASK 0x01
56
57#define ICNSS_QUIRKS_DEFAULT BIT(FW_REJUVENATE_ENABLE)
58#define ICNSS_MAX_PROBE_CNT 2
59
60#define ICNSS_BDF_TYPE_DEFAULT ICNSS_BDF_ELF
61
62#define PROBE_TIMEOUT 15000
63#define WLFW_TIMEOUT msecs_to_jiffies(3000)
64
65static struct icnss_priv *penv;
66
67uint64_t dynamic_feature_mask = ICNSS_DEFAULT_FEATURE_MASK;
68
69#define ICNSS_EVENT_PENDING 2989
70
71#define ICNSS_EVENT_SYNC BIT(0)
72#define ICNSS_EVENT_UNINTERRUPTIBLE BIT(1)
73#define ICNSS_EVENT_SYNC_UNINTERRUPTIBLE (ICNSS_EVENT_UNINTERRUPTIBLE | \
74 ICNSS_EVENT_SYNC)
75
76
77enum icnss_pdr_cause_index {
78 ICNSS_FW_CRASH,
79 ICNSS_ROOT_PD_CRASH,
80 ICNSS_ROOT_PD_SHUTDOWN,
81 ICNSS_HOST_ERROR,
82};
83
84static const char * const icnss_pdr_cause[] = {
85 [ICNSS_FW_CRASH] = "FW crash",
86 [ICNSS_ROOT_PD_CRASH] = "Root PD crashed",
87 [ICNSS_ROOT_PD_SHUTDOWN] = "Root PD shutdown",
88 [ICNSS_HOST_ERROR] = "Host error",
89};
90
91static void icnss_set_plat_priv(struct icnss_priv *priv)
92{
93 penv = priv;
94}
95
96static struct icnss_priv *icnss_get_plat_priv()
97{
98 return penv;
99}
100
101static ssize_t icnss_sysfs_store(struct kobject *kobj,
102 struct kobj_attribute *attr,
103 const char *buf, size_t count)
104{
105 struct icnss_priv *priv = icnss_get_plat_priv();
106
107 atomic_set(&priv->is_shutdown, true);
108 icnss_pr_dbg("Received shutdown indication");
109 return count;
110}
111
112static struct kobj_attribute icnss_sysfs_attribute =
113__ATTR(shutdown, 0660, NULL, icnss_sysfs_store);
114
115static void icnss_pm_stay_awake(struct icnss_priv *priv)
116{
117 if (atomic_inc_return(&priv->pm_count) != 1)
118 return;
119
120 icnss_pr_vdbg("PM stay awake, state: 0x%lx, count: %d\n", priv->state,
121 atomic_read(&priv->pm_count));
122
123 pm_stay_awake(&priv->pdev->dev);
124
125 priv->stats.pm_stay_awake++;
126}
127
128static void icnss_pm_relax(struct icnss_priv *priv)
129{
130 int r = atomic_dec_return(&priv->pm_count);
131
132 WARN_ON(r < 0);
133
134 if (r != 0)
135 return;
136
137 icnss_pr_vdbg("PM relax, state: 0x%lx, count: %d\n", priv->state,
138 atomic_read(&priv->pm_count));
139
140 pm_relax(&priv->pdev->dev);
141 priv->stats.pm_relax++;
142}
143
144char *icnss_driver_event_to_str(enum icnss_driver_event_type type)
145{
146 switch (type) {
147 case ICNSS_DRIVER_EVENT_SERVER_ARRIVE:
148 return "SERVER_ARRIVE";
149 case ICNSS_DRIVER_EVENT_SERVER_EXIT:
150 return "SERVER_EXIT";
151 case ICNSS_DRIVER_EVENT_FW_READY_IND:
152 return "FW_READY";
153 case ICNSS_DRIVER_EVENT_REGISTER_DRIVER:
154 return "REGISTER_DRIVER";
155 case ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
156 return "UNREGISTER_DRIVER";
157 case ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN:
158 return "PD_SERVICE_DOWN";
159 case ICNSS_DRIVER_EVENT_FW_EARLY_CRASH_IND:
160 return "FW_EARLY_CRASH_IND";
161 case ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
162 return "IDLE_SHUTDOWN";
163 case ICNSS_DRIVER_EVENT_IDLE_RESTART:
164 return "IDLE_RESTART";
165 case ICNSS_DRIVER_EVENT_FW_INIT_DONE_IND:
166 return "FW_INIT_DONE";
Mohammed Siddiq049053c2020-04-07 21:33:26 +0530167 case ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
168 return "QDSS_TRACE_REQ_MEM";
169 case ICNSS_DRIVER_EVENT_QDSS_TRACE_SAVE:
170 return "QDSS_TRACE_SAVE";
171 case ICNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
172 return "QDSS_TRACE_FREE";
Sandeep Singh1edc2cf2019-12-06 15:52:54 +0530173 case ICNSS_DRIVER_EVENT_MAX:
174 return "EVENT_MAX";
175 }
176
177 return "UNKNOWN";
178};
179
Mohammed Siddiqc2024c42020-05-07 17:14:24 +0530180char *icnss_soc_wake_event_to_str(enum icnss_soc_wake_event_type type)
181{
182 switch (type) {
183 case ICNSS_SOC_WAKE_REQUEST_EVENT:
184 return "SOC_WAKE_REQUEST";
185 case ICNSS_SOC_WAKE_RELEASE_EVENT:
186 return "SOC_WAKE_RELEASE";
187 case ICNSS_SOC_WAKE_EVENT_MAX:
188 return "SOC_EVENT_MAX";
189 }
190
191 return "UNKNOWN";
192};
193
Sandeep Singh1edc2cf2019-12-06 15:52:54 +0530194int icnss_driver_event_post(struct icnss_priv *priv,
195 enum icnss_driver_event_type type,
196 u32 flags, void *data)
197{
198 struct icnss_driver_event *event;
199 unsigned long irq_flags;
200 int gfp = GFP_KERNEL;
201 int ret = 0;
202
203 if (!priv)
204 return -ENODEV;
205
206 icnss_pr_dbg("Posting event: %s(%d), %s, flags: 0x%x, state: 0x%lx\n",
207 icnss_driver_event_to_str(type), type, current->comm,
208 flags, priv->state);
209
210 if (type >= ICNSS_DRIVER_EVENT_MAX) {
211 icnss_pr_err("Invalid Event type: %d, can't post", type);
212 return -EINVAL;
213 }
214
215 if (in_interrupt() || irqs_disabled())
216 gfp = GFP_ATOMIC;
217
218 event = kzalloc(sizeof(*event), gfp);
219 if (event == NULL)
220 return -ENOMEM;
221
222 icnss_pm_stay_awake(priv);
223
224 event->type = type;
225 event->data = data;
226 init_completion(&event->complete);
227 event->ret = ICNSS_EVENT_PENDING;
228 event->sync = !!(flags & ICNSS_EVENT_SYNC);
229
230 spin_lock_irqsave(&priv->event_lock, irq_flags);
231 list_add_tail(&event->list, &priv->event_list);
232 spin_unlock_irqrestore(&priv->event_lock, irq_flags);
233
234 priv->stats.events[type].posted++;
235 queue_work(priv->event_wq, &priv->event_work);
236
237 if (!(flags & ICNSS_EVENT_SYNC))
238 goto out;
239
240 if (flags & ICNSS_EVENT_UNINTERRUPTIBLE)
241 wait_for_completion(&event->complete);
242 else
243 ret = wait_for_completion_interruptible(&event->complete);
244
245 icnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
246 icnss_driver_event_to_str(type), type, priv->state, ret,
247 event->ret);
248
249 spin_lock_irqsave(&priv->event_lock, irq_flags);
250 if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
251 event->sync = false;
252 spin_unlock_irqrestore(&priv->event_lock, irq_flags);
253 ret = -EINTR;
254 goto out;
255 }
256 spin_unlock_irqrestore(&priv->event_lock, irq_flags);
257
258 ret = event->ret;
259 kfree(event);
260
261out:
262 icnss_pm_relax(priv);
263 return ret;
264}
265
Mohammed Siddiqc2024c42020-05-07 17:14:24 +0530266int icnss_soc_wake_event_post(struct icnss_priv *priv,
267 enum icnss_soc_wake_event_type type,
268 u32 flags, void *data)
269{
270 struct icnss_soc_wake_event *event;
271 unsigned long irq_flags;
272 int gfp = GFP_KERNEL;
273 int ret = 0;
274
275 if (!priv)
276 return -ENODEV;
277
278 icnss_pr_dbg("Posting event: %s(%d), %s, flags: 0x%x, state: 0x%lx\n",
279 icnss_soc_wake_event_to_str(type), type, current->comm,
280 flags, priv->state);
281
282 if (type >= ICNSS_SOC_WAKE_EVENT_MAX) {
283 icnss_pr_err("Invalid Event type: %d, can't post", type);
284 return -EINVAL;
285 }
286
287 if (in_interrupt() || irqs_disabled())
288 gfp = GFP_ATOMIC;
289
290 event = kzalloc(sizeof(*event), gfp);
291 if (!event)
292 return -ENOMEM;
293
294 icnss_pm_stay_awake(priv);
295
296 event->type = type;
297 event->data = data;
298 init_completion(&event->complete);
299 event->ret = ICNSS_EVENT_PENDING;
300 event->sync = !!(flags & ICNSS_EVENT_SYNC);
301
302 spin_lock_irqsave(&priv->soc_wake_msg_lock, irq_flags);
303 list_add_tail(&event->list, &priv->soc_wake_msg_list);
304 spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
305
306 priv->stats.soc_wake_events[type].posted++;
307 queue_work(priv->soc_wake_wq, &priv->soc_wake_msg_work);
308
309 if (!(flags & ICNSS_EVENT_SYNC))
310 goto out;
311
312 if (flags & ICNSS_EVENT_UNINTERRUPTIBLE)
313 wait_for_completion(&event->complete);
314 else
315 ret = wait_for_completion_interruptible(&event->complete);
316
317 icnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
318 icnss_soc_wake_event_to_str(type), type, priv->state, ret,
319 event->ret);
320
321 spin_lock_irqsave(&priv->soc_wake_msg_lock, irq_flags);
322 if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
323 event->sync = false;
324 spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
325 ret = -EINTR;
326 goto out;
327 }
328 spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
329
330 ret = event->ret;
331 kfree(event);
332
333out:
334 icnss_pm_relax(priv);
335 return ret;
336}
337
Sandeep Singh1edc2cf2019-12-06 15:52:54 +0530338bool icnss_is_fw_ready(void)
339{
340 if (!penv)
341 return false;
342 else
343 return test_bit(ICNSS_FW_READY, &penv->state);
344}
345EXPORT_SYMBOL(icnss_is_fw_ready);
346
347void icnss_block_shutdown(bool status)
348{
349 if (!penv)
350 return;
351
352 if (status) {
353 set_bit(ICNSS_BLOCK_SHUTDOWN, &penv->state);
354 reinit_completion(&penv->unblock_shutdown);
355 } else {
356 clear_bit(ICNSS_BLOCK_SHUTDOWN, &penv->state);
357 complete(&penv->unblock_shutdown);
358 }
359}
360EXPORT_SYMBOL(icnss_block_shutdown);
361
362bool icnss_is_fw_down(void)
363{
364
365 struct icnss_priv *priv = icnss_get_plat_priv();
366
367 if (!priv)
368 return false;
369
370 return test_bit(ICNSS_FW_DOWN, &priv->state) ||
371 test_bit(ICNSS_PD_RESTART, &priv->state) ||
372 test_bit(ICNSS_REJUVENATE, &priv->state);
373}
374EXPORT_SYMBOL(icnss_is_fw_down);
375
376bool icnss_is_rejuvenate(void)
377{
378 if (!penv)
379 return false;
380 else
381 return test_bit(ICNSS_REJUVENATE, &penv->state);
382}
383EXPORT_SYMBOL(icnss_is_rejuvenate);
384
385bool icnss_is_pdr(void)
386{
387 if (!penv)
388 return false;
389 else
390 return test_bit(ICNSS_PDR, &penv->state);
391}
392EXPORT_SYMBOL(icnss_is_pdr);
393
394static irqreturn_t fw_error_fatal_handler(int irq, void *ctx)
395{
396 struct icnss_priv *priv = ctx;
397
398 if (priv)
399 priv->force_err_fatal = true;
400
401 icnss_pr_err("Received force error fatal request from FW\n");
402
403 return IRQ_HANDLED;
404}
405
406static irqreturn_t fw_crash_indication_handler(int irq, void *ctx)
407{
408 struct icnss_priv *priv = ctx;
409 struct icnss_uevent_fw_down_data fw_down_data = {0};
410
411 icnss_pr_err("Received early crash indication from FW\n");
412
413 if (priv) {
414 set_bit(ICNSS_FW_DOWN, &priv->state);
415 icnss_ignore_fw_timeout(true);
416
417 if (test_bit(ICNSS_FW_READY, &priv->state)) {
418 fw_down_data.crashed = true;
419 icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN,
420 &fw_down_data);
421 }
422 }
423
424 icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_FW_EARLY_CRASH_IND,
425 0, NULL);
426
427 return IRQ_HANDLED;
428}
429
430static void register_fw_error_notifications(struct device *dev)
431{
432 struct icnss_priv *priv = dev_get_drvdata(dev);
433 struct device_node *dev_node;
434 int irq = 0, ret = 0;
435
436 if (!priv)
437 return;
438
439 dev_node = of_find_node_by_name(NULL, "qcom,smp2p_map_wlan_1_in");
440 if (!dev_node) {
441 icnss_pr_err("Failed to get smp2p node for force-fatal-error\n");
442 return;
443 }
444
445 icnss_pr_dbg("smp2p node->name=%s\n", dev_node->name);
446
447 if (strcmp("qcom,smp2p_map_wlan_1_in", dev_node->name) == 0) {
448 ret = irq = of_irq_get_byname(dev_node,
449 "qcom,smp2p-force-fatal-error");
450 if (ret < 0) {
451 icnss_pr_err("Unable to get force-fatal-error irq %d\n",
452 irq);
453 return;
454 }
455 }
456
457 ret = devm_request_threaded_irq(dev, irq, NULL, fw_error_fatal_handler,
458 IRQF_ONESHOT | IRQF_TRIGGER_RISING,
459 "wlanfw-err", priv);
460 if (ret < 0) {
461 icnss_pr_err("Unable to register for error fatal IRQ handler %d ret = %d",
462 irq, ret);
463 return;
464 }
465 icnss_pr_dbg("FW force error fatal handler registered irq = %d\n", irq);
466 priv->fw_error_fatal_irq = irq;
467}
468
469static void register_early_crash_notifications(struct device *dev)
470{
471 struct icnss_priv *priv = dev_get_drvdata(dev);
472 struct device_node *dev_node;
473 int irq = 0, ret = 0;
474
475 if (!priv)
476 return;
477
478 dev_node = of_find_node_by_name(NULL, "qcom,smp2p_map_wlan_1_in");
479 if (!dev_node) {
480 icnss_pr_err("Failed to get smp2p node for early-crash-ind\n");
481 return;
482 }
483
484 icnss_pr_dbg("smp2p node->name=%s\n", dev_node->name);
485
486 if (strcmp("qcom,smp2p_map_wlan_1_in", dev_node->name) == 0) {
487 ret = irq = of_irq_get_byname(dev_node,
488 "qcom,smp2p-early-crash-ind");
489 if (ret < 0) {
490 icnss_pr_err("Unable to get early-crash-ind irq %d\n",
491 irq);
492 return;
493 }
494 }
495
496 ret = devm_request_threaded_irq(dev, irq, NULL,
497 fw_crash_indication_handler,
498 IRQF_ONESHOT | IRQF_TRIGGER_RISING,
499 "wlanfw-early-crash-ind", priv);
500 if (ret < 0) {
501 icnss_pr_err("Unable to register for early crash indication IRQ handler %d ret = %d",
502 irq, ret);
503 return;
504 }
505 icnss_pr_dbg("FW crash indication handler registered irq = %d\n", irq);
506 priv->fw_early_crash_irq = irq;
507}
508
509int icnss_call_driver_uevent(struct icnss_priv *priv,
510 enum icnss_uevent uevent, void *data)
511{
512 struct icnss_uevent_data uevent_data;
513
514 if (!priv->ops || !priv->ops->uevent)
515 return 0;
516
517 icnss_pr_dbg("Calling driver uevent state: 0x%lx, uevent: %d\n",
518 priv->state, uevent);
519
520 uevent_data.uevent = uevent;
521 uevent_data.data = data;
522
523 return priv->ops->uevent(&priv->pdev->dev, &uevent_data);
524}
525
526static int icnss_driver_event_server_arrive(struct icnss_priv *priv,
527 void *data)
528{
529 int ret = 0;
530 bool ignore_assert = false;
531
532 if (!priv)
533 return -ENODEV;
534
535 set_bit(ICNSS_WLFW_EXISTS, &priv->state);
536 clear_bit(ICNSS_FW_DOWN, &priv->state);
Sandeep Singheac7ac42020-05-28 12:08:06 +0530537 clear_bit(ICNSS_FW_READY, &priv->state);
538
Sandeep Singh1edc2cf2019-12-06 15:52:54 +0530539 icnss_ignore_fw_timeout(false);
540
541 if (test_bit(ICNSS_WLFW_CONNECTED, &penv->state)) {
542 icnss_pr_err("QMI Server already in Connected State\n");
543 ICNSS_ASSERT(0);
544 }
545
546 ret = icnss_connect_to_fw_server(priv, data);
547 if (ret)
548 goto fail;
549
550 set_bit(ICNSS_WLFW_CONNECTED, &priv->state);
551
552 ret = icnss_hw_power_on(priv);
553 if (ret)
554 goto clear_server;
555
556 ret = wlfw_ind_register_send_sync_msg(priv);
557 if (ret < 0) {
558 if (ret == -EALREADY) {
559 ret = 0;
560 goto qmi_registered;
561 }
562 ignore_assert = true;
563 goto err_power_on;
564 }
565
566 if (priv->device_id == WCN6750_DEVICE_ID) {
567 ret = wlfw_host_cap_send_sync(priv);
568 if (ret < 0)
569 goto err_power_on;
570 }
571
572 if (priv->device_id == ADRASTEA_DEVICE_ID) {
573 if (!priv->msa_va) {
574 icnss_pr_err("Invalid MSA address\n");
575 ret = -EINVAL;
576 goto err_power_on;
577 }
578
579 ret = wlfw_msa_mem_info_send_sync_msg(priv);
580 if (ret < 0) {
581 ignore_assert = true;
582 goto err_power_on;
583 }
584
585 ret = wlfw_msa_ready_send_sync_msg(priv);
586 if (ret < 0) {
587 ignore_assert = true;
588 goto err_power_on;
589 }
590 }
591
592 ret = wlfw_cap_send_sync_msg(priv);
593 if (ret < 0) {
594 ignore_assert = true;
595 goto err_power_on;
596 }
597
598 if (priv->device_id == WCN6750_DEVICE_ID) {
599 ret = wlfw_device_info_send_msg(priv);
600 if (ret < 0) {
601 ignore_assert = true;
602 goto err_power_on;
603 }
604
Naman Padhiard65b3d52020-02-29 00:04:05 +0530605 priv->mem_base_va = devm_ioremap(&priv->pdev->dev,
606 priv->mem_base_pa,
607 priv->mem_base_size);
608 if (!priv->mem_base_va) {
609 icnss_pr_err("Ioremap failed for bar address\n");
610 goto err_power_on;
611 }
612
613 icnss_pr_dbg("MEM_BASE pa: %pa, va: 0x%pK\n",
614 &priv->mem_base_pa,
615 priv->mem_base_va);
616
Sandeep Singh1edc2cf2019-12-06 15:52:54 +0530617 icnss_wlfw_bdf_dnld_send_sync(priv, ICNSS_BDF_REGDB);
618
619 ret = icnss_wlfw_bdf_dnld_send_sync(priv,
620 priv->ctrl_params.bdf_type);
621
622 }
623
624 if (priv->device_id == ADRASTEA_DEVICE_ID) {
625 wlfw_dynamic_feature_mask_send_sync_msg(priv,
626 dynamic_feature_mask);
627 }
628
629 if (!priv->fw_error_fatal_irq)
630 register_fw_error_notifications(&priv->pdev->dev);
631
632 if (!priv->fw_early_crash_irq)
633 register_early_crash_notifications(&priv->pdev->dev);
634
635 if (priv->vbatt_supported)
636 icnss_init_vph_monitor(priv);
637
638 return ret;
639
640err_power_on:
641 icnss_hw_power_off(priv);
642clear_server:
643 icnss_clear_server(priv);
644fail:
645 ICNSS_ASSERT(ignore_assert);
646qmi_registered:
647 return ret;
648}
649
650static int icnss_driver_event_server_exit(struct icnss_priv *priv)
651{
652 if (!priv)
653 return -ENODEV;
654
655 icnss_pr_info("WLAN FW Service Disconnected: 0x%lx\n", priv->state);
656
657 icnss_clear_server(priv);
658
659 if (priv->adc_tm_dev && priv->vbatt_supported)
660 adc_tm5_disable_chan_meas(priv->adc_tm_dev,
661 &priv->vph_monitor_params);
662
663 return 0;
664}
665
666static int icnss_call_driver_probe(struct icnss_priv *priv)
667{
668 int ret = 0;
669 int probe_cnt = 0;
670
671 if (!priv->ops || !priv->ops->probe)
672 return 0;
673
674 if (test_bit(ICNSS_DRIVER_PROBED, &priv->state))
675 return -EINVAL;
676
677 icnss_pr_dbg("Calling driver probe state: 0x%lx\n", priv->state);
678
679 icnss_hw_power_on(priv);
680
681 icnss_block_shutdown(true);
682 while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
683 ret = priv->ops->probe(&priv->pdev->dev);
684 probe_cnt++;
685 if (ret != -EPROBE_DEFER)
686 break;
687 }
688 if (ret < 0) {
689 icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
690 ret, priv->state, probe_cnt);
691 icnss_block_shutdown(false);
692 goto out;
693 }
694
695 icnss_block_shutdown(false);
696 set_bit(ICNSS_DRIVER_PROBED, &priv->state);
697
698 return 0;
699
700out:
701 icnss_hw_power_off(priv);
702 return ret;
703}
704
705static int icnss_call_driver_shutdown(struct icnss_priv *priv)
706{
707 if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
708 goto out;
709
710 if (!priv->ops || !priv->ops->shutdown)
711 goto out;
712
713 if (test_bit(ICNSS_SHUTDOWN_DONE, &priv->state))
714 goto out;
715
716 icnss_pr_dbg("Calling driver shutdown state: 0x%lx\n", priv->state);
717
718 priv->ops->shutdown(&priv->pdev->dev);
719 set_bit(ICNSS_SHUTDOWN_DONE, &priv->state);
720
721out:
722 return 0;
723}
724
725static int icnss_pd_restart_complete(struct icnss_priv *priv)
726{
727 int ret;
728
729 icnss_pm_relax(priv);
730
731 icnss_call_driver_shutdown(priv);
732
733 clear_bit(ICNSS_PDR, &priv->state);
734 clear_bit(ICNSS_REJUVENATE, &priv->state);
735 clear_bit(ICNSS_PD_RESTART, &priv->state);
736 priv->early_crash_ind = false;
737 priv->is_ssr = false;
738
739 if (!priv->ops || !priv->ops->reinit)
740 goto out;
741
742 if (test_bit(ICNSS_FW_DOWN, &priv->state)) {
743 icnss_pr_err("FW is in bad state, state: 0x%lx\n",
744 priv->state);
745 goto out;
746 }
747
748 if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
749 goto call_probe;
750
751 icnss_pr_dbg("Calling driver reinit state: 0x%lx\n", priv->state);
752
753 icnss_hw_power_on(priv);
754
755 icnss_block_shutdown(true);
756
757 ret = priv->ops->reinit(&priv->pdev->dev);
758 if (ret < 0) {
759 icnss_fatal_err("Driver reinit failed: %d, state: 0x%lx\n",
760 ret, priv->state);
761 if (!priv->allow_recursive_recovery)
762 ICNSS_ASSERT(false);
763 icnss_block_shutdown(false);
764 goto out_power_off;
765 }
766
767out:
768 icnss_block_shutdown(false);
769 clear_bit(ICNSS_SHUTDOWN_DONE, &priv->state);
770 return 0;
771
772call_probe:
773 return icnss_call_driver_probe(priv);
774
775out_power_off:
776 icnss_hw_power_off(priv);
777
778 return ret;
779}
780
781
782static int icnss_driver_event_fw_ready_ind(struct icnss_priv *priv, void *data)
783{
784 int ret = 0;
785
786 if (!priv)
787 return -ENODEV;
788
789 set_bit(ICNSS_FW_READY, &priv->state);
790 clear_bit(ICNSS_MODE_ON, &priv->state);
791
792 icnss_pr_info("WLAN FW is ready: 0x%lx\n", priv->state);
793
794 icnss_hw_power_off(priv);
795
796 if (!priv->pdev) {
797 icnss_pr_err("Device is not ready\n");
798 ret = -ENODEV;
799 goto out;
800 }
801
802 if (test_bit(ICNSS_PD_RESTART, &priv->state))
803 ret = icnss_pd_restart_complete(priv);
804 else
805 ret = icnss_call_driver_probe(priv);
806
807out:
808 return ret;
809}
810
811static int icnss_driver_event_fw_init_done(struct icnss_priv *priv, void *data)
812{
813 int ret = 0;
814
815 if (!priv)
816 return -ENODEV;
817
818 icnss_pr_info("WLAN FW Initialization done: 0x%lx\n", priv->state);
819
820 ret = wlfw_wlan_mode_send_sync_msg(priv,
821 (enum wlfw_driver_mode_enum_v01)ICNSS_CALIBRATION);
822
823 return ret;
824}
825
Mohammed Siddiq049053c2020-04-07 21:33:26 +0530826int icnss_alloc_qdss_mem(struct icnss_priv *priv)
827{
828 struct platform_device *pdev = priv->pdev;
829 struct icnss_fw_mem *qdss_mem = priv->qdss_mem;
830 int i, j;
831
832 for (i = 0; i < priv->qdss_mem_seg_len; i++) {
833 if (!qdss_mem[i].va && qdss_mem[i].size) {
834 qdss_mem[i].va =
835 dma_alloc_coherent(&pdev->dev,
836 qdss_mem[i].size,
837 &qdss_mem[i].pa,
838 GFP_KERNEL);
839 if (!qdss_mem[i].va) {
840 icnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
841 qdss_mem[i].size,
842 qdss_mem[i].type, i);
843 break;
844 }
845 }
846 }
847
848 /* Best-effort allocation for QDSS trace */
849 if (i < priv->qdss_mem_seg_len) {
850 for (j = i; j < priv->qdss_mem_seg_len; j++) {
851 qdss_mem[j].type = 0;
852 qdss_mem[j].size = 0;
853 }
854 priv->qdss_mem_seg_len = i;
855 }
856
857 return 0;
858}
859
860void icnss_free_qdss_mem(struct icnss_priv *priv)
861{
862 struct platform_device *pdev = priv->pdev;
863 struct icnss_fw_mem *qdss_mem = priv->qdss_mem;
864 int i;
865
866 for (i = 0; i < priv->qdss_mem_seg_len; i++) {
867 if (qdss_mem[i].va && qdss_mem[i].size) {
868 icnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
869 &qdss_mem[i].pa, qdss_mem[i].size,
870 qdss_mem[i].type);
871 dma_free_coherent(&pdev->dev,
872 qdss_mem[i].size, qdss_mem[i].va,
873 qdss_mem[i].pa);
874 qdss_mem[i].va = NULL;
875 qdss_mem[i].pa = 0;
876 qdss_mem[i].size = 0;
877 qdss_mem[i].type = 0;
878 }
879 }
880 priv->qdss_mem_seg_len = 0;
881}
882
883static int icnss_qdss_trace_req_mem_hdlr(struct icnss_priv *priv)
884{
885 int ret = 0;
886
887 ret = icnss_alloc_qdss_mem(priv);
888 if (ret < 0)
889 return ret;
890
891 return wlfw_qdss_trace_mem_info_send_sync(priv);
892}
893
894static void *icnss_qdss_trace_pa_to_va(struct icnss_priv *priv,
895 u64 pa, u32 size, int *seg_id)
896{
897 int i = 0;
898 struct icnss_fw_mem *qdss_mem = priv->qdss_mem;
899 u64 offset = 0;
900 void *va = NULL;
901 u64 local_pa;
902 u32 local_size;
903
904 for (i = 0; i < priv->qdss_mem_seg_len; i++) {
905 local_pa = (u64)qdss_mem[i].pa;
906 local_size = (u32)qdss_mem[i].size;
907 if (pa == local_pa && size <= local_size) {
908 va = qdss_mem[i].va;
909 break;
910 }
911 if (pa > local_pa &&
912 pa < local_pa + local_size &&
913 pa + size <= local_pa + local_size) {
914 offset = pa - local_pa;
915 va = qdss_mem[i].va + offset;
916 break;
917 }
918 }
919
920 *seg_id = i;
921 return va;
922}
923
924static int icnss_qdss_trace_save_hdlr(struct icnss_priv *priv,
925 void *data)
926{
927 struct icnss_qmi_event_qdss_trace_save_data *event_data = data;
928 struct icnss_fw_mem *qdss_mem = priv->qdss_mem;
929 int ret = 0;
930 int i;
931 void *va = NULL;
932 u64 pa;
933 u32 size;
934 int seg_id = 0;
935
936 if (!priv->qdss_mem_seg_len) {
937 icnss_pr_err("Memory for QDSS trace is not available\n");
938 return -ENOMEM;
939 }
940
941 if (event_data->mem_seg_len == 0) {
942 for (i = 0; i < priv->qdss_mem_seg_len; i++) {
943 ret = icnss_genl_send_msg(qdss_mem[i].va,
944 ICNSS_GENL_MSG_TYPE_QDSS,
945 event_data->file_name,
946 qdss_mem[i].size);
947 if (ret < 0) {
948 icnss_pr_err("Fail to save QDSS data: %d\n",
949 ret);
950 break;
951 }
952 }
953 } else {
954 for (i = 0; i < event_data->mem_seg_len; i++) {
955 pa = event_data->mem_seg[i].addr;
956 size = event_data->mem_seg[i].size;
957 va = icnss_qdss_trace_pa_to_va(priv, pa,
958 size, &seg_id);
959 if (!va) {
960 icnss_pr_err("Fail to find matching va for pa %pa\n",
961 &pa);
962 ret = -EINVAL;
963 break;
964 }
965 ret = icnss_genl_send_msg(va, ICNSS_GENL_MSG_TYPE_QDSS,
966 event_data->file_name, size);
967 if (ret < 0) {
968 icnss_pr_err("Fail to save QDSS data: %d\n",
969 ret);
970 break;
971 }
972 }
973 }
974
975 kfree(data);
976 return ret;
977}
978
Mohammed Siddiqc2024c42020-05-07 17:14:24 +0530979static int icnss_event_soc_wake_request(struct icnss_priv *priv, void *data)
980{
981 int ret = 0;
982
983 if (!priv)
984 return -ENODEV;
985
986 ret = wlfw_send_soc_wake_msg(priv, QMI_WLFW_WAKE_REQUEST_V01);
987 if (!ret)
988 atomic_inc(&priv->soc_wake_ref_count);
989
990 return ret;
991}
992
993static int icnss_event_soc_wake_release(struct icnss_priv *priv, void *data)
994{
995 int ret = 0;
996 int count = 0;
997
998 if (!priv)
999 return -ENODEV;
1000
1001 count = atomic_dec_return(&priv->soc_wake_ref_count);
1002
1003 if (count) {
1004 icnss_pr_dbg("Wake release not called. Ref count: %d",
1005 count);
1006 return 0;
1007 }
1008
1009 ret = wlfw_send_soc_wake_msg(priv, QMI_WLFW_WAKE_RELEASE_V01);
1010
1011 return ret;
1012}
1013
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05301014static int icnss_driver_event_register_driver(struct icnss_priv *priv,
1015 void *data)
1016{
1017 int ret = 0;
1018 int probe_cnt = 0;
1019
1020 if (priv->ops)
1021 return -EEXIST;
1022
1023 priv->ops = data;
1024
1025 if (test_bit(SKIP_QMI, &priv->ctrl_params.quirks))
1026 set_bit(ICNSS_FW_READY, &priv->state);
1027
1028 if (test_bit(ICNSS_FW_DOWN, &priv->state)) {
1029 icnss_pr_err("FW is in bad state, state: 0x%lx\n",
1030 priv->state);
1031 return -ENODEV;
1032 }
1033
1034 if (!test_bit(ICNSS_FW_READY, &priv->state)) {
1035 icnss_pr_dbg("FW is not ready yet, state: 0x%lx\n",
1036 priv->state);
1037 goto out;
1038 }
1039
1040 ret = icnss_hw_power_on(priv);
1041 if (ret)
1042 goto out;
1043
1044 icnss_block_shutdown(true);
1045 while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
1046 ret = priv->ops->probe(&priv->pdev->dev);
1047 probe_cnt++;
1048 if (ret != -EPROBE_DEFER)
1049 break;
1050 }
1051 if (ret) {
1052 icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
1053 ret, priv->state, probe_cnt);
1054 icnss_block_shutdown(false);
1055 goto power_off;
1056 }
1057
1058 icnss_block_shutdown(false);
1059 set_bit(ICNSS_DRIVER_PROBED, &priv->state);
1060
1061 return 0;
1062
1063power_off:
1064 icnss_hw_power_off(priv);
1065out:
1066 return ret;
1067}
1068
1069static int icnss_driver_event_unregister_driver(struct icnss_priv *priv,
1070 void *data)
1071{
1072 if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state)) {
1073 priv->ops = NULL;
1074 goto out;
1075 }
1076
1077 set_bit(ICNSS_DRIVER_UNLOADING, &priv->state);
1078
1079 icnss_block_shutdown(true);
1080
1081 if (priv->ops)
1082 priv->ops->remove(&priv->pdev->dev);
1083
1084 icnss_block_shutdown(false);
1085
1086 clear_bit(ICNSS_DRIVER_UNLOADING, &priv->state);
1087 clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
1088
1089 priv->ops = NULL;
1090
1091 icnss_hw_power_off(priv);
1092
1093out:
1094 return 0;
1095}
1096
1097static int icnss_call_driver_remove(struct icnss_priv *priv)
1098{
1099 icnss_pr_dbg("Calling driver remove state: 0x%lx\n", priv->state);
1100
1101 clear_bit(ICNSS_FW_READY, &priv->state);
1102
1103 if (test_bit(ICNSS_DRIVER_UNLOADING, &priv->state))
1104 return 0;
1105
1106 if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
1107 return 0;
1108
1109 if (!priv->ops || !priv->ops->remove)
1110 return 0;
1111
1112 set_bit(ICNSS_DRIVER_UNLOADING, &priv->state);
1113 priv->ops->remove(&priv->pdev->dev);
1114
1115 clear_bit(ICNSS_DRIVER_UNLOADING, &priv->state);
1116 clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
1117
1118 icnss_hw_power_off(priv);
1119
1120 return 0;
1121}
1122
1123static int icnss_fw_crashed(struct icnss_priv *priv,
1124 struct icnss_event_pd_service_down_data *event_data)
1125{
1126 icnss_pr_dbg("FW crashed, state: 0x%lx\n", priv->state);
1127
1128 set_bit(ICNSS_PD_RESTART, &priv->state);
1129 clear_bit(ICNSS_FW_READY, &priv->state);
1130
1131 icnss_pm_stay_awake(priv);
1132
1133 if (test_bit(ICNSS_DRIVER_PROBED, &priv->state))
1134 icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_CRASHED, NULL);
1135
1136 if (event_data && event_data->fw_rejuvenate)
1137 wlfw_rejuvenate_ack_send_sync_msg(priv);
1138
1139 return 0;
1140}
1141
1142static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
1143 void *data)
1144{
1145 struct icnss_event_pd_service_down_data *event_data = data;
1146
1147 if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
1148 icnss_ignore_fw_timeout(false);
1149 goto out;
1150 }
1151
1152 if (priv->force_err_fatal)
1153 ICNSS_ASSERT(0);
1154
1155 if (priv->early_crash_ind) {
1156 icnss_pr_dbg("PD Down ignored as early indication is processed: %d, state: 0x%lx\n",
1157 event_data->crashed, priv->state);
1158 goto out;
1159 }
1160
1161 if (test_bit(ICNSS_PD_RESTART, &priv->state) && event_data->crashed) {
1162 icnss_fatal_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
1163 event_data->crashed, priv->state);
1164 if (!priv->allow_recursive_recovery)
1165 ICNSS_ASSERT(0);
1166 goto out;
1167 }
1168
1169 if (!test_bit(ICNSS_PD_RESTART, &priv->state))
1170 icnss_fw_crashed(priv, event_data);
1171
1172out:
1173 kfree(data);
1174
1175 return 0;
1176}
1177
1178static int icnss_driver_event_early_crash_ind(struct icnss_priv *priv,
1179 void *data)
1180{
1181 if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
1182 icnss_ignore_fw_timeout(false);
1183 goto out;
1184 }
1185
1186 priv->early_crash_ind = true;
1187 icnss_fw_crashed(priv, NULL);
1188
1189out:
1190 kfree(data);
1191
1192 return 0;
1193}
1194
1195static int icnss_driver_event_idle_shutdown(struct icnss_priv *priv,
1196 void *data)
1197{
1198 int ret = 0;
1199
1200 if (!priv->ops || !priv->ops->idle_shutdown)
1201 return 0;
1202
1203 if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
1204 test_bit(ICNSS_REJUVENATE, &priv->state)) {
1205 icnss_pr_err("SSR/PDR is already in-progress during idle shutdown callback\n");
1206 ret = -EBUSY;
1207 } else {
1208 icnss_pr_dbg("Calling driver idle shutdown, state: 0x%lx\n",
1209 priv->state);
1210 icnss_block_shutdown(true);
1211 ret = priv->ops->idle_shutdown(&priv->pdev->dev);
1212 icnss_block_shutdown(false);
1213 }
1214
1215 return ret;
1216}
1217
1218static int icnss_driver_event_idle_restart(struct icnss_priv *priv,
1219 void *data)
1220{
1221 int ret = 0;
1222
1223 if (!priv->ops || !priv->ops->idle_restart)
1224 return 0;
1225
1226 if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
1227 test_bit(ICNSS_REJUVENATE, &priv->state)) {
1228 icnss_pr_err("SSR/PDR is already in-progress during idle restart callback\n");
1229 ret = -EBUSY;
1230 } else {
1231 icnss_pr_dbg("Calling driver idle restart, state: 0x%lx\n",
1232 priv->state);
1233 icnss_block_shutdown(true);
1234 ret = priv->ops->idle_restart(&priv->pdev->dev);
1235 icnss_block_shutdown(false);
1236 }
1237
1238 return ret;
1239}
1240
Mohammed Siddiq049053c2020-04-07 21:33:26 +05301241static int icnss_qdss_trace_free_hdlr(struct icnss_priv *priv)
1242{
1243 icnss_free_qdss_mem(priv);
1244
1245 return 0;
1246}
1247
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05301248static void icnss_driver_event_work(struct work_struct *work)
1249{
1250 struct icnss_priv *priv =
1251 container_of(work, struct icnss_priv, event_work);
1252 struct icnss_driver_event *event;
1253 unsigned long flags;
1254 int ret;
1255
1256 icnss_pm_stay_awake(priv);
1257
1258 spin_lock_irqsave(&priv->event_lock, flags);
1259
1260 while (!list_empty(&priv->event_list)) {
1261 event = list_first_entry(&priv->event_list,
1262 struct icnss_driver_event, list);
1263 list_del(&event->list);
1264 spin_unlock_irqrestore(&priv->event_lock, flags);
1265
1266 icnss_pr_dbg("Processing event: %s%s(%d), state: 0x%lx\n",
1267 icnss_driver_event_to_str(event->type),
1268 event->sync ? "-sync" : "", event->type,
1269 priv->state);
1270
1271 switch (event->type) {
1272 case ICNSS_DRIVER_EVENT_SERVER_ARRIVE:
1273 ret = icnss_driver_event_server_arrive(priv,
1274 event->data);
1275 break;
1276 case ICNSS_DRIVER_EVENT_SERVER_EXIT:
1277 ret = icnss_driver_event_server_exit(priv);
1278 break;
1279 case ICNSS_DRIVER_EVENT_FW_READY_IND:
1280 ret = icnss_driver_event_fw_ready_ind(priv,
1281 event->data);
1282 break;
1283 case ICNSS_DRIVER_EVENT_REGISTER_DRIVER:
1284 ret = icnss_driver_event_register_driver(priv,
1285 event->data);
1286 break;
1287 case ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
1288 ret = icnss_driver_event_unregister_driver(priv,
1289 event->data);
1290 break;
1291 case ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN:
1292 ret = icnss_driver_event_pd_service_down(priv,
1293 event->data);
1294 break;
1295 case ICNSS_DRIVER_EVENT_FW_EARLY_CRASH_IND:
1296 ret = icnss_driver_event_early_crash_ind(priv,
1297 event->data);
1298 break;
1299 case ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
1300 ret = icnss_driver_event_idle_shutdown(priv,
1301 event->data);
1302 break;
1303 case ICNSS_DRIVER_EVENT_IDLE_RESTART:
1304 ret = icnss_driver_event_idle_restart(priv,
1305 event->data);
1306 break;
1307 case ICNSS_DRIVER_EVENT_FW_INIT_DONE_IND:
1308 ret = icnss_driver_event_fw_init_done(priv,
1309 event->data);
1310 break;
Mohammed Siddiq049053c2020-04-07 21:33:26 +05301311 case ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
1312 ret = icnss_qdss_trace_req_mem_hdlr(priv);
1313 break;
1314 case ICNSS_DRIVER_EVENT_QDSS_TRACE_SAVE:
1315 ret = icnss_qdss_trace_save_hdlr(priv,
1316 event->data);
1317 break;
1318 case ICNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
1319 ret = icnss_qdss_trace_free_hdlr(priv);
1320 break;
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05301321 default:
1322 icnss_pr_err("Invalid Event type: %d", event->type);
1323 kfree(event);
1324 continue;
1325 }
1326
1327 priv->stats.events[event->type].processed++;
1328
1329 icnss_pr_dbg("Event Processed: %s%s(%d), ret: %d, state: 0x%lx\n",
1330 icnss_driver_event_to_str(event->type),
1331 event->sync ? "-sync" : "", event->type, ret,
1332 priv->state);
1333
1334 spin_lock_irqsave(&priv->event_lock, flags);
1335 if (event->sync) {
1336 event->ret = ret;
1337 complete(&event->complete);
1338 continue;
1339 }
1340 spin_unlock_irqrestore(&priv->event_lock, flags);
1341
1342 kfree(event);
1343
1344 spin_lock_irqsave(&priv->event_lock, flags);
1345 }
1346 spin_unlock_irqrestore(&priv->event_lock, flags);
1347
1348 icnss_pm_relax(priv);
1349}
1350
Mohammed Siddiqc2024c42020-05-07 17:14:24 +05301351static void icnss_soc_wake_msg_work(struct work_struct *work)
1352{
1353 struct icnss_priv *priv =
1354 container_of(work, struct icnss_priv, soc_wake_msg_work);
1355 struct icnss_soc_wake_event *event;
1356 unsigned long flags;
1357 int ret;
1358
1359 icnss_pm_stay_awake(priv);
1360
1361 spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
1362
1363 while (!list_empty(&priv->soc_wake_msg_list)) {
1364 event = list_first_entry(&priv->soc_wake_msg_list,
1365 struct icnss_soc_wake_event, list);
1366 list_del(&event->list);
1367 spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
1368
1369 icnss_pr_dbg("Processing event: %s%s(%d), state: 0x%lx\n",
1370 icnss_soc_wake_event_to_str(event->type),
1371 event->sync ? "-sync" : "", event->type,
1372 priv->state);
1373
1374 switch (event->type) {
1375 case ICNSS_SOC_WAKE_REQUEST_EVENT:
1376 ret = icnss_event_soc_wake_request(priv,
1377 event->data);
1378 break;
1379 case ICNSS_SOC_WAKE_RELEASE_EVENT:
1380 ret = icnss_event_soc_wake_release(priv,
1381 event->data);
1382 break;
1383 default:
1384 icnss_pr_err("Invalid Event type: %d", event->type);
1385 kfree(event);
1386 continue;
1387 }
1388
1389 priv->stats.soc_wake_events[event->type].processed++;
1390
1391 icnss_pr_dbg("Event Processed: %s%s(%d), ret: %d, state: 0x%lx\n",
1392 icnss_soc_wake_event_to_str(event->type),
1393 event->sync ? "-sync" : "", event->type, ret,
1394 priv->state);
1395
1396 spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
1397 if (event->sync) {
1398 event->ret = ret;
1399 complete(&event->complete);
1400 continue;
1401 }
1402 spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
1403
1404 kfree(event);
1405
1406 spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
1407 }
1408 spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
1409
1410 icnss_pm_relax(priv);
1411}
1412
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05301413static int icnss_msa0_ramdump(struct icnss_priv *priv)
1414{
1415 struct ramdump_segment segment;
1416
1417 memset(&segment, 0, sizeof(segment));
1418 segment.v_address = priv->msa_va;
1419 segment.size = priv->msa_mem_size;
1420 return do_ramdump(priv->msa0_dump_dev, &segment, 1);
1421}
1422
1423static void icnss_update_state_send_modem_shutdown(struct icnss_priv *priv,
1424 void *data)
1425{
1426 struct notif_data *notif = data;
1427 int ret = 0;
1428
1429 if (!notif->crashed) {
1430 if (atomic_read(&priv->is_shutdown)) {
1431 atomic_set(&priv->is_shutdown, false);
1432 if (!test_bit(ICNSS_PD_RESTART, &priv->state) &&
1433 !test_bit(ICNSS_SHUTDOWN_DONE, &priv->state)) {
1434 icnss_call_driver_remove(priv);
1435 }
1436 }
1437
1438 if (test_bit(ICNSS_BLOCK_SHUTDOWN, &priv->state)) {
1439 if (!wait_for_completion_timeout(
1440 &priv->unblock_shutdown,
1441 msecs_to_jiffies(PROBE_TIMEOUT)))
1442 icnss_pr_err("modem block shutdown timeout\n");
1443 }
1444
1445 ret = wlfw_send_modem_shutdown_msg(priv);
1446 if (ret < 0)
1447 icnss_pr_err("Fail to send modem shutdown Indication %d\n",
1448 ret);
1449 }
1450}
1451
1452static int icnss_modem_notifier_nb(struct notifier_block *nb,
1453 unsigned long code,
1454 void *data)
1455{
1456 struct icnss_event_pd_service_down_data *event_data;
1457 struct notif_data *notif = data;
1458 struct icnss_priv *priv = container_of(nb, struct icnss_priv,
1459 modem_ssr_nb);
1460 struct icnss_uevent_fw_down_data fw_down_data;
1461
1462 icnss_pr_vdbg("Modem-Notify: event %lu\n", code);
1463
1464 if (code == SUBSYS_AFTER_SHUTDOWN) {
1465 icnss_pr_info("Collecting msa0 segment dump\n");
1466 icnss_msa0_ramdump(priv);
1467 return NOTIFY_OK;
1468 }
1469
1470 if (code != SUBSYS_BEFORE_SHUTDOWN)
1471 return NOTIFY_OK;
1472
1473 priv->is_ssr = true;
1474
1475 icnss_update_state_send_modem_shutdown(priv, data);
1476
1477 if (test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
1478 set_bit(ICNSS_FW_DOWN, &priv->state);
1479 icnss_ignore_fw_timeout(true);
1480
1481 fw_down_data.crashed = !!notif->crashed;
1482 if (test_bit(ICNSS_FW_READY, &priv->state))
1483 icnss_call_driver_uevent(priv,
1484 ICNSS_UEVENT_FW_DOWN,
1485 &fw_down_data);
1486 return NOTIFY_OK;
1487 }
1488
1489 icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
1490 priv->state, notif->crashed);
1491
1492 set_bit(ICNSS_FW_DOWN, &priv->state);
1493
1494 if (notif->crashed)
1495 priv->stats.recovery.root_pd_crash++;
1496 else
1497 priv->stats.recovery.root_pd_shutdown++;
1498
1499 icnss_ignore_fw_timeout(true);
1500
1501 event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
1502
1503 if (event_data == NULL)
1504 return notifier_from_errno(-ENOMEM);
1505
1506 event_data->crashed = notif->crashed;
1507
1508 fw_down_data.crashed = !!notif->crashed;
1509 if (test_bit(ICNSS_FW_READY, &priv->state))
1510 icnss_call_driver_uevent(priv,
1511 ICNSS_UEVENT_FW_DOWN,
1512 &fw_down_data);
1513
1514 icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
1515 ICNSS_EVENT_SYNC, event_data);
1516
1517 return NOTIFY_OK;
1518}
1519
1520static int icnss_modem_ssr_register_notifier(struct icnss_priv *priv)
1521{
1522 int ret = 0;
1523
1524 priv->modem_ssr_nb.notifier_call = icnss_modem_notifier_nb;
1525
1526 priv->modem_notify_handler =
1527 subsys_notif_register_notifier("modem", &priv->modem_ssr_nb);
1528
1529 if (IS_ERR(priv->modem_notify_handler)) {
1530 ret = PTR_ERR(priv->modem_notify_handler);
1531 icnss_pr_err("Modem register notifier failed: %d\n", ret);
1532 }
1533
1534 set_bit(ICNSS_SSR_REGISTERED, &priv->state);
1535
1536 return ret;
1537}
1538
1539static int icnss_modem_ssr_unregister_notifier(struct icnss_priv *priv)
1540{
1541 if (!test_and_clear_bit(ICNSS_SSR_REGISTERED, &priv->state))
1542 return 0;
1543
1544 subsys_notif_unregister_notifier(priv->modem_notify_handler,
1545 &priv->modem_ssr_nb);
1546 priv->modem_notify_handler = NULL;
1547
1548 return 0;
1549}
1550
1551static int icnss_pdr_unregister_notifier(struct icnss_priv *priv)
1552{
1553 int i;
1554
1555 if (!test_and_clear_bit(ICNSS_PDR_REGISTERED, &priv->state))
1556 return 0;
1557
1558 for (i = 0; i < priv->total_domains; i++)
1559 service_notif_unregister_notifier(
1560 priv->service_notifier[i].handle,
1561 &priv->service_notifier_nb);
1562
1563 kfree(priv->service_notifier);
1564
1565 priv->service_notifier = NULL;
1566
1567 return 0;
1568}
1569
1570static int icnss_service_notifier_notify(struct notifier_block *nb,
1571 unsigned long notification, void *data)
1572{
1573 struct icnss_priv *priv = container_of(nb, struct icnss_priv,
1574 service_notifier_nb);
1575 enum pd_subsys_state *state = data;
1576 struct icnss_event_pd_service_down_data *event_data;
1577 struct icnss_uevent_fw_down_data fw_down_data;
1578 enum icnss_pdr_cause_index cause = ICNSS_ROOT_PD_CRASH;
1579
1580 icnss_pr_dbg("PD service notification: 0x%lx state: 0x%lx\n",
1581 notification, priv->state);
1582
1583 if (notification != SERVREG_NOTIF_SERVICE_STATE_DOWN_V01)
1584 goto done;
1585
1586 if (!priv->is_ssr)
1587 set_bit(ICNSS_PDR, &priv->state);
1588
1589 event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
1590
1591 if (event_data == NULL)
1592 return notifier_from_errno(-ENOMEM);
1593
1594 event_data->crashed = true;
1595
1596 if (state == NULL) {
1597 priv->stats.recovery.root_pd_crash++;
1598 goto event_post;
1599 }
1600
1601 switch (*state) {
1602 case ROOT_PD_WDOG_BITE:
1603 priv->stats.recovery.root_pd_crash++;
1604 break;
1605 case ROOT_PD_SHUTDOWN:
1606 cause = ICNSS_ROOT_PD_SHUTDOWN;
1607 priv->stats.recovery.root_pd_shutdown++;
1608 event_data->crashed = false;
1609 break;
1610 case USER_PD_STATE_CHANGE:
1611 if (test_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state)) {
1612 cause = ICNSS_HOST_ERROR;
1613 priv->stats.recovery.pdr_host_error++;
1614 } else {
1615 cause = ICNSS_FW_CRASH;
1616 priv->stats.recovery.pdr_fw_crash++;
1617 }
1618 break;
1619 default:
1620 priv->stats.recovery.root_pd_crash++;
1621 break;
1622 }
1623 icnss_pr_info("PD service down, pd_state: %d, state: 0x%lx: cause: %s\n",
1624 *state, priv->state, icnss_pdr_cause[cause]);
1625event_post:
1626 if (!test_bit(ICNSS_FW_DOWN, &priv->state)) {
1627 set_bit(ICNSS_FW_DOWN, &priv->state);
1628 icnss_ignore_fw_timeout(true);
1629
1630 fw_down_data.crashed = event_data->crashed;
1631 if (test_bit(ICNSS_FW_READY, &priv->state))
1632 icnss_call_driver_uevent(priv,
1633 ICNSS_UEVENT_FW_DOWN,
1634 &fw_down_data);
1635 }
1636
1637 clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
1638 icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
1639 ICNSS_EVENT_SYNC, event_data);
1640done:
1641 if (notification == SERVREG_NOTIF_SERVICE_STATE_UP_V01)
1642 clear_bit(ICNSS_FW_DOWN, &priv->state);
1643 return NOTIFY_OK;
1644}
1645
1646static int icnss_get_service_location_notify(struct notifier_block *nb,
1647 unsigned long opcode, void *data)
1648{
1649 struct icnss_priv *priv = container_of(nb, struct icnss_priv,
1650 get_service_nb);
1651 struct pd_qmi_client_data *pd = data;
1652 int curr_state;
1653 int ret;
1654 int i;
1655 int j;
1656 bool duplicate;
1657 struct service_notifier_context *notifier;
1658
1659 icnss_pr_dbg("Get service notify opcode: %lu, state: 0x%lx\n", opcode,
1660 priv->state);
1661
1662 if (opcode != LOCATOR_UP)
1663 return NOTIFY_DONE;
1664
1665 if (pd->total_domains == 0) {
1666 icnss_pr_err("Did not find any domains\n");
1667 ret = -ENOENT;
1668 goto out;
1669 }
1670
1671 notifier = kcalloc(pd->total_domains,
1672 sizeof(struct service_notifier_context),
1673 GFP_KERNEL);
1674 if (!notifier) {
1675 ret = -ENOMEM;
1676 goto out;
1677 }
1678
1679 priv->service_notifier_nb.notifier_call = icnss_service_notifier_notify;
1680
1681 for (i = 0; i < pd->total_domains; i++) {
1682 duplicate = false;
1683 for (j = i + 1; j < pd->total_domains; j++) {
1684 if (!strcmp(pd->domain_list[i].name,
1685 pd->domain_list[j].name))
1686 duplicate = true;
1687 }
1688
1689 if (duplicate)
1690 continue;
1691
1692 icnss_pr_dbg("%d: domain_name: %s, instance_id: %d\n", i,
1693 pd->domain_list[i].name,
1694 pd->domain_list[i].instance_id);
1695
1696 notifier[i].handle =
1697 service_notif_register_notifier(pd->domain_list[i].name,
1698 pd->domain_list[i].instance_id,
1699 &priv->service_notifier_nb, &curr_state);
1700 notifier[i].instance_id = pd->domain_list[i].instance_id;
1701 strlcpy(notifier[i].name, pd->domain_list[i].name,
1702 QMI_SERVREG_LOC_NAME_LENGTH_V01 + 1);
1703
1704 if (IS_ERR(notifier[i].handle)) {
1705 icnss_pr_err("%d: Unable to register notifier for %s(0x%x)\n",
1706 i, pd->domain_list->name,
1707 pd->domain_list->instance_id);
1708 ret = PTR_ERR(notifier[i].handle);
1709 goto free_handle;
1710 }
1711 }
1712
1713 priv->service_notifier = notifier;
1714 priv->total_domains = pd->total_domains;
1715
1716 set_bit(ICNSS_PDR_REGISTERED, &priv->state);
1717
1718 icnss_pr_dbg("PD notification registration happened, state: 0x%lx\n",
1719 priv->state);
1720
1721 return NOTIFY_OK;
1722
1723free_handle:
1724 for (i = 0; i < pd->total_domains; i++) {
1725 if (notifier[i].handle)
1726 service_notif_unregister_notifier(notifier[i].handle,
1727 &priv->service_notifier_nb);
1728 }
1729 kfree(notifier);
1730
1731out:
1732 icnss_pr_err("PD restart not enabled: %d, state: 0x%lx\n", ret,
1733 priv->state);
1734
1735 return NOTIFY_OK;
1736}
1737
1738
1739static int icnss_pd_restart_enable(struct icnss_priv *priv)
1740{
1741 int ret;
1742
1743 if (test_bit(SSR_ONLY, &priv->ctrl_params.quirks)) {
1744 icnss_pr_dbg("PDR disabled through module parameter\n");
1745 return 0;
1746 }
1747
1748 icnss_pr_dbg("Get service location, state: 0x%lx\n", priv->state);
1749
1750 priv->get_service_nb.notifier_call = icnss_get_service_location_notify;
1751 ret = get_service_location(ICNSS_SERVICE_LOCATION_CLIENT_NAME,
1752 ICNSS_WLAN_SERVICE_NAME,
1753 &priv->get_service_nb);
1754 if (ret) {
1755 icnss_pr_err("Get service location failed: %d\n", ret);
1756 goto out;
1757 }
1758
1759 return 0;
1760out:
1761 icnss_pr_err("Failed to enable PD restart: %d\n", ret);
1762 return ret;
1763
1764}
1765
1766
1767static int icnss_enable_recovery(struct icnss_priv *priv)
1768{
1769 int ret;
1770
1771 if (test_bit(RECOVERY_DISABLE, &priv->ctrl_params.quirks)) {
1772 icnss_pr_dbg("Recovery disabled through module parameter\n");
1773 return 0;
1774 }
1775
1776 if (test_bit(PDR_ONLY, &priv->ctrl_params.quirks)) {
1777 icnss_pr_dbg("SSR disabled through module parameter\n");
1778 goto enable_pdr;
1779 }
1780
1781 priv->msa0_dump_dev = create_ramdump_device("wcss_msa0",
1782 &priv->pdev->dev);
1783 if (!priv->msa0_dump_dev)
1784 return -ENOMEM;
1785
1786 icnss_modem_ssr_register_notifier(priv);
1787 if (test_bit(SSR_ONLY, &priv->ctrl_params.quirks)) {
1788 icnss_pr_dbg("PDR disabled through module parameter\n");
1789 return 0;
1790 }
1791
1792enable_pdr:
1793 ret = icnss_pd_restart_enable(priv);
1794
1795 if (ret)
1796 return ret;
1797
1798 return 0;
1799}
1800
Naman Padhiar80389af2020-04-17 21:50:20 +05301801int icnss_qmi_send(struct device *dev, int type, void *cmd,
1802 int cmd_len, void *cb_ctx,
1803 int (*cb)(void *ctx, void *event, int event_len))
1804{
1805 struct icnss_priv *priv = icnss_get_plat_priv();
1806 int ret;
1807
1808 if (!priv)
1809 return -ENODEV;
1810
1811 if (!test_bit(ICNSS_WLFW_CONNECTED, &priv->state))
1812 return -EINVAL;
1813
1814 priv->get_info_cb = cb;
1815 priv->get_info_cb_ctx = cb_ctx;
1816
1817 ret = icnss_wlfw_get_info_send_sync(priv, type, cmd, cmd_len);
1818 if (ret) {
1819 priv->get_info_cb = NULL;
1820 priv->get_info_cb_ctx = NULL;
1821 }
1822
1823 return ret;
1824}
1825EXPORT_SYMBOL(icnss_qmi_send);
1826
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05301827int __icnss_register_driver(struct icnss_driver_ops *ops,
1828 struct module *owner, const char *mod_name)
1829{
1830 int ret = 0;
1831 struct icnss_priv *priv = icnss_get_plat_priv();
1832
1833 if (!priv || !priv->pdev) {
1834 ret = -ENODEV;
1835 goto out;
1836 }
1837
1838 icnss_pr_dbg("Registering driver, state: 0x%lx\n", priv->state);
1839
1840 if (priv->ops) {
1841 icnss_pr_err("Driver already registered\n");
1842 ret = -EEXIST;
1843 goto out;
1844 }
1845
1846 if (!ops->probe || !ops->remove) {
1847 ret = -EINVAL;
1848 goto out;
1849 }
1850
1851 ret = icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_REGISTER_DRIVER,
1852 0, ops);
1853
1854 if (ret == -EINTR)
1855 ret = 0;
1856
1857out:
1858 return ret;
1859}
1860EXPORT_SYMBOL(__icnss_register_driver);
1861
1862int icnss_unregister_driver(struct icnss_driver_ops *ops)
1863{
1864 int ret;
1865 struct icnss_priv *priv = icnss_get_plat_priv();
1866
1867 if (!priv || !priv->pdev) {
1868 ret = -ENODEV;
1869 goto out;
1870 }
1871
1872 icnss_pr_dbg("Unregistering driver, state: 0x%lx\n", priv->state);
1873
1874 if (!priv->ops) {
1875 icnss_pr_err("Driver not registered\n");
1876 ret = -ENOENT;
1877 goto out;
1878 }
1879
1880 ret = icnss_driver_event_post(priv,
1881 ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
1882 ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
1883out:
1884 return ret;
1885}
1886EXPORT_SYMBOL(icnss_unregister_driver);
1887
1888static struct icnss_msi_config msi_config = {
1889 .total_vectors = 28,
1890 .total_users = 2,
1891 .users = (struct icnss_msi_user[]) {
1892 { .name = "CE", .num_vectors = 10, .base_vector = 0 },
1893 { .name = "DP", .num_vectors = 18, .base_vector = 10 },
1894 },
1895};
1896
1897static int icnss_get_msi_assignment(struct icnss_priv *priv)
1898{
1899 priv->msi_config = &msi_config;
1900
1901 return 0;
1902}
1903
1904int icnss_get_user_msi_assignment(struct device *dev, char *user_name,
1905 int *num_vectors, u32 *user_base_data,
1906 u32 *base_vector)
1907{
1908 struct icnss_priv *priv = dev_get_drvdata(dev);
1909 struct icnss_msi_config *msi_config;
1910 int idx;
1911
1912 if (!priv)
1913 return -ENODEV;
1914
1915 msi_config = priv->msi_config;
1916 if (!msi_config) {
1917 icnss_pr_err("MSI is not supported.\n");
1918 return -EINVAL;
1919 }
1920
1921 for (idx = 0; idx < msi_config->total_users; idx++) {
1922 if (strcmp(user_name, msi_config->users[idx].name) == 0) {
1923 *num_vectors = msi_config->users[idx].num_vectors;
1924 *user_base_data = msi_config->users[idx].base_vector
1925 + priv->msi_base_data;
1926 *base_vector = msi_config->users[idx].base_vector;
1927
1928 icnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
1929 user_name, *num_vectors, *user_base_data,
1930 *base_vector);
1931
1932 return 0;
1933 }
1934 }
1935
1936 icnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
1937
1938 return -EINVAL;
1939}
1940EXPORT_SYMBOL(icnss_get_user_msi_assignment);
1941
1942int icnss_get_msi_irq(struct device *dev, unsigned int vector)
1943{
1944 struct icnss_priv *priv = dev_get_drvdata(dev);
1945 int irq_num;
1946
1947 irq_num = priv->srng_irqs[vector];
1948 icnss_pr_dbg("Get IRQ number %d for vector index %d\n",
1949 irq_num, vector);
1950
1951 return irq_num;
1952}
1953EXPORT_SYMBOL(icnss_get_msi_irq);
1954
1955void icnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
1956 u32 *msi_addr_high)
1957{
1958 struct icnss_priv *priv = dev_get_drvdata(dev);
1959
1960 *msi_addr_low = lower_32_bits(priv->msi_addr_iova);
1961 *msi_addr_high = upper_32_bits(priv->msi_addr_iova);
1962
1963}
1964EXPORT_SYMBOL(icnss_get_msi_address);
1965
1966int icnss_ce_request_irq(struct device *dev, unsigned int ce_id,
1967 irqreturn_t (*handler)(int, void *),
1968 unsigned long flags, const char *name, void *ctx)
1969{
1970 int ret = 0;
1971 unsigned int irq;
1972 struct ce_irq_list *irq_entry;
1973 struct icnss_priv *priv = dev_get_drvdata(dev);
1974
1975 if (!priv || !priv->pdev) {
1976 ret = -ENODEV;
1977 goto out;
1978 }
1979
1980 icnss_pr_vdbg("CE request IRQ: %d, state: 0x%lx\n", ce_id, priv->state);
1981
1982 if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
1983 icnss_pr_err("Invalid CE ID, ce_id: %d\n", ce_id);
1984 ret = -EINVAL;
1985 goto out;
1986 }
1987 irq = priv->ce_irqs[ce_id];
1988 irq_entry = &priv->ce_irq_list[ce_id];
1989
1990 if (irq_entry->handler || irq_entry->irq) {
1991 icnss_pr_err("IRQ already requested: %d, ce_id: %d\n",
1992 irq, ce_id);
1993 ret = -EEXIST;
1994 goto out;
1995 }
1996
1997 ret = request_irq(irq, handler, flags, name, ctx);
1998 if (ret) {
1999 icnss_pr_err("IRQ request failed: %d, ce_id: %d, ret: %d\n",
2000 irq, ce_id, ret);
2001 goto out;
2002 }
2003 irq_entry->irq = irq;
2004 irq_entry->handler = handler;
2005
2006 icnss_pr_vdbg("IRQ requested: %d, ce_id: %d\n", irq, ce_id);
2007
2008 penv->stats.ce_irqs[ce_id].request++;
2009out:
2010 return ret;
2011}
2012EXPORT_SYMBOL(icnss_ce_request_irq);
2013
2014int icnss_ce_free_irq(struct device *dev, unsigned int ce_id, void *ctx)
2015{
2016 int ret = 0;
2017 unsigned int irq;
2018 struct ce_irq_list *irq_entry;
2019
2020 if (!penv || !penv->pdev || !dev) {
2021 ret = -ENODEV;
2022 goto out;
2023 }
2024
2025 icnss_pr_vdbg("CE free IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
2026
2027 if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
2028 icnss_pr_err("Invalid CE ID to free, ce_id: %d\n", ce_id);
2029 ret = -EINVAL;
2030 goto out;
2031 }
2032
2033 irq = penv->ce_irqs[ce_id];
2034 irq_entry = &penv->ce_irq_list[ce_id];
2035 if (!irq_entry->handler || !irq_entry->irq) {
2036 icnss_pr_err("IRQ not requested: %d, ce_id: %d\n", irq, ce_id);
2037 ret = -EEXIST;
2038 goto out;
2039 }
2040 free_irq(irq, ctx);
2041 irq_entry->irq = 0;
2042 irq_entry->handler = NULL;
2043
2044 penv->stats.ce_irqs[ce_id].free++;
2045out:
2046 return ret;
2047}
2048EXPORT_SYMBOL(icnss_ce_free_irq);
2049
2050void icnss_enable_irq(struct device *dev, unsigned int ce_id)
2051{
2052 unsigned int irq;
2053
2054 if (!penv || !penv->pdev || !dev) {
2055 icnss_pr_err("Platform driver not initialized\n");
2056 return;
2057 }
2058
2059 icnss_pr_vdbg("Enable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
2060 penv->state);
2061
2062 if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
2063 icnss_pr_err("Invalid CE ID to enable IRQ, ce_id: %d\n", ce_id);
2064 return;
2065 }
2066
2067 penv->stats.ce_irqs[ce_id].enable++;
2068
2069 irq = penv->ce_irqs[ce_id];
2070 enable_irq(irq);
2071}
2072EXPORT_SYMBOL(icnss_enable_irq);
2073
2074void icnss_disable_irq(struct device *dev, unsigned int ce_id)
2075{
2076 unsigned int irq;
2077
2078 if (!penv || !penv->pdev || !dev) {
2079 icnss_pr_err("Platform driver not initialized\n");
2080 return;
2081 }
2082
2083 icnss_pr_vdbg("Disable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
2084 penv->state);
2085
2086 if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
2087 icnss_pr_err("Invalid CE ID to disable IRQ, ce_id: %d\n",
2088 ce_id);
2089 return;
2090 }
2091
2092 irq = penv->ce_irqs[ce_id];
2093 disable_irq(irq);
2094
2095 penv->stats.ce_irqs[ce_id].disable++;
2096}
2097EXPORT_SYMBOL(icnss_disable_irq);
2098
2099int icnss_get_soc_info(struct device *dev, struct icnss_soc_info *info)
2100{
2101 char *fw_build_timestamp = NULL;
2102 struct icnss_priv *priv = dev_get_drvdata(dev);
2103
2104 if (!priv) {
2105 icnss_pr_err("Platform driver not initialized\n");
2106 return -EINVAL;
2107 }
2108
2109 info->v_addr = priv->mem_base_va;
2110 info->p_addr = priv->mem_base_pa;
2111 info->chip_id = priv->chip_info.chip_id;
2112 info->chip_family = priv->chip_info.chip_family;
2113 info->board_id = priv->board_id;
2114 info->soc_id = priv->soc_id;
2115 info->fw_version = priv->fw_version_info.fw_version;
2116 fw_build_timestamp = priv->fw_version_info.fw_build_timestamp;
2117 fw_build_timestamp[WLFW_MAX_TIMESTAMP_LEN] = '\0';
2118 strlcpy(info->fw_build_timestamp,
2119 priv->fw_version_info.fw_build_timestamp,
2120 WLFW_MAX_TIMESTAMP_LEN + 1);
2121
2122 return 0;
2123}
2124EXPORT_SYMBOL(icnss_get_soc_info);
2125
2126int icnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode)
2127{
2128 int ret;
2129 struct icnss_priv *priv = dev_get_drvdata(dev);
2130
2131 if (!dev)
2132 return -ENODEV;
2133
2134 if (test_bit(ICNSS_FW_DOWN, &penv->state) ||
2135 !test_bit(ICNSS_FW_READY, &penv->state)) {
2136 icnss_pr_err("FW down, ignoring fw_log_mode state: 0x%lx\n",
2137 priv->state);
2138 return -EINVAL;
2139 }
2140
2141 icnss_pr_dbg("FW log mode: %u\n", fw_log_mode);
2142
2143 ret = wlfw_ini_send_sync_msg(priv, fw_log_mode);
2144 if (ret)
2145 icnss_pr_err("Fail to send ini, ret = %d, fw_log_mode: %u\n",
2146 ret, fw_log_mode);
2147 return ret;
2148}
2149EXPORT_SYMBOL(icnss_set_fw_log_mode);
2150
Mohammed Siddiqc2024c42020-05-07 17:14:24 +05302151int icnss_force_wake_request(struct device *dev)
2152{
2153 struct icnss_priv *priv = dev_get_drvdata(dev);
2154 int count = 0;
2155
2156 if (!dev)
2157 return -ENODEV;
2158
2159 if (!priv) {
2160 icnss_pr_err("Platform driver not initialized\n");
2161 return -EINVAL;
2162 }
2163
2164 icnss_pr_dbg("Calling SOC Wake request");
2165
2166 if (atomic_read(&priv->soc_wake_ref_count)) {
2167 count = atomic_inc_return(&priv->soc_wake_ref_count);
2168 icnss_pr_dbg("SOC already awake, Ref count: %d", count);
2169 return 0;
2170 }
2171
2172 icnss_soc_wake_event_post(priv, ICNSS_SOC_WAKE_REQUEST_EVENT,
2173 0, NULL);
2174
2175 return 0;
2176}
2177EXPORT_SYMBOL(icnss_force_wake_request);
2178
2179int icnss_force_wake_release(struct device *dev)
2180{
2181 struct icnss_priv *priv = dev_get_drvdata(dev);
2182
2183 if (!dev)
2184 return -ENODEV;
2185
2186 if (!priv) {
2187 icnss_pr_err("Platform driver not initialized\n");
2188 return -EINVAL;
2189 }
2190
2191 icnss_pr_dbg("Calling SOC Wake response");
2192
2193 icnss_soc_wake_event_post(priv, ICNSS_SOC_WAKE_RELEASE_EVENT,
2194 0, NULL);
2195
2196 return 0;
2197}
2198EXPORT_SYMBOL(icnss_force_wake_release);
2199
2200int icnss_is_device_awake(struct device *dev)
2201{
2202 struct icnss_priv *priv = dev_get_drvdata(dev);
2203
2204 if (!dev)
2205 return -ENODEV;
2206
2207 if (!priv) {
2208 icnss_pr_err("Platform driver not initialized\n");
2209 return -EINVAL;
2210 }
2211
2212 return atomic_read(&priv->soc_wake_ref_count);
2213}
2214EXPORT_SYMBOL(icnss_is_device_awake);
2215
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05302216int icnss_athdiag_read(struct device *dev, uint32_t offset,
2217 uint32_t mem_type, uint32_t data_len,
2218 uint8_t *output)
2219{
2220 int ret = 0;
2221 struct icnss_priv *priv = dev_get_drvdata(dev);
2222
2223 if (priv->magic != ICNSS_MAGIC) {
2224 icnss_pr_err("Invalid drvdata for diag read: dev %pK, data %pK, magic 0x%x\n",
2225 dev, priv, priv->magic);
2226 return -EINVAL;
2227 }
2228
2229 if (!output || data_len == 0
2230 || data_len > WLFW_MAX_DATA_SIZE) {
2231 icnss_pr_err("Invalid parameters for diag read: output %pK, data_len %u\n",
2232 output, data_len);
2233 ret = -EINVAL;
2234 goto out;
2235 }
2236
2237 if (!test_bit(ICNSS_FW_READY, &priv->state) ||
2238 !test_bit(ICNSS_POWER_ON, &priv->state)) {
2239 icnss_pr_err("Invalid state for diag read: 0x%lx\n",
2240 priv->state);
2241 ret = -EINVAL;
2242 goto out;
2243 }
2244
2245 ret = wlfw_athdiag_read_send_sync_msg(priv, offset, mem_type,
2246 data_len, output);
2247out:
2248 return ret;
2249}
2250EXPORT_SYMBOL(icnss_athdiag_read);
2251
2252int icnss_athdiag_write(struct device *dev, uint32_t offset,
2253 uint32_t mem_type, uint32_t data_len,
2254 uint8_t *input)
2255{
2256 int ret = 0;
2257 struct icnss_priv *priv = dev_get_drvdata(dev);
2258
2259 if (priv->magic != ICNSS_MAGIC) {
2260 icnss_pr_err("Invalid drvdata for diag write: dev %pK, data %pK, magic 0x%x\n",
2261 dev, priv, priv->magic);
2262 return -EINVAL;
2263 }
2264
2265 if (!input || data_len == 0
2266 || data_len > WLFW_MAX_DATA_SIZE) {
2267 icnss_pr_err("Invalid parameters for diag write: input %pK, data_len %u\n",
2268 input, data_len);
2269 ret = -EINVAL;
2270 goto out;
2271 }
2272
2273 if (!test_bit(ICNSS_FW_READY, &priv->state) ||
2274 !test_bit(ICNSS_POWER_ON, &priv->state)) {
2275 icnss_pr_err("Invalid state for diag write: 0x%lx\n",
2276 priv->state);
2277 ret = -EINVAL;
2278 goto out;
2279 }
2280
2281 ret = wlfw_athdiag_write_send_sync_msg(priv, offset, mem_type,
2282 data_len, input);
2283out:
2284 return ret;
2285}
2286EXPORT_SYMBOL(icnss_athdiag_write);
2287
2288int icnss_wlan_enable(struct device *dev, struct icnss_wlan_enable_cfg *config,
2289 enum icnss_driver_mode mode,
2290 const char *host_version)
2291{
2292 struct icnss_priv *priv = dev_get_drvdata(dev);
2293
2294 if (test_bit(ICNSS_FW_DOWN, &priv->state) ||
2295 !test_bit(ICNSS_FW_READY, &priv->state)) {
2296 icnss_pr_err("FW down, ignoring wlan_enable state: 0x%lx\n",
2297 priv->state);
2298 return -EINVAL;
2299 }
2300
2301 if (test_bit(ICNSS_MODE_ON, &priv->state)) {
2302 icnss_pr_err("Already Mode on, ignoring wlan_enable state: 0x%lx\n",
2303 priv->state);
2304 return -EINVAL;
2305 }
2306
2307 return icnss_send_wlan_enable_to_fw(priv, config, mode, host_version);
2308}
2309EXPORT_SYMBOL(icnss_wlan_enable);
2310
2311int icnss_wlan_disable(struct device *dev, enum icnss_driver_mode mode)
2312{
2313 struct icnss_priv *priv = dev_get_drvdata(dev);
2314
2315 if (test_bit(ICNSS_FW_DOWN, &priv->state)) {
2316 icnss_pr_dbg("FW down, ignoring wlan_disable state: 0x%lx\n",
2317 priv->state);
2318 return 0;
2319 }
2320
2321 return icnss_send_wlan_disable_to_fw(priv);
2322}
2323EXPORT_SYMBOL(icnss_wlan_disable);
2324
2325bool icnss_is_qmi_disable(struct device *dev)
2326{
2327 return test_bit(SKIP_QMI, &penv->ctrl_params.quirks) ? true : false;
2328}
2329EXPORT_SYMBOL(icnss_is_qmi_disable);
2330
2331int icnss_get_ce_id(struct device *dev, int irq)
2332{
2333 int i;
2334
2335 if (!penv || !penv->pdev || !dev)
2336 return -ENODEV;
2337
2338 for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++) {
2339 if (penv->ce_irqs[i] == irq)
2340 return i;
2341 }
2342
2343 icnss_pr_err("No matching CE id for irq %d\n", irq);
2344
2345 return -EINVAL;
2346}
2347EXPORT_SYMBOL(icnss_get_ce_id);
2348
2349int icnss_get_irq(struct device *dev, int ce_id)
2350{
2351 int irq;
2352
2353 if (!penv || !penv->pdev || !dev)
2354 return -ENODEV;
2355
2356 if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS)
2357 return -EINVAL;
2358
2359 irq = penv->ce_irqs[ce_id];
2360
2361 return irq;
2362}
2363EXPORT_SYMBOL(icnss_get_irq);
2364
2365struct iommu_domain *icnss_smmu_get_domain(struct device *dev)
2366{
2367 struct icnss_priv *priv = dev_get_drvdata(dev);
2368
2369 if (!priv) {
2370 icnss_pr_err("Invalid drvdata: dev %pK\n", dev);
2371 return NULL;
2372 }
2373 return priv->iommu_domain;
2374}
2375EXPORT_SYMBOL(icnss_smmu_get_domain);
2376
2377int icnss_smmu_map(struct device *dev,
2378 phys_addr_t paddr, uint32_t *iova_addr, size_t size)
2379{
2380 struct icnss_priv *priv = dev_get_drvdata(dev);
2381 unsigned long iova;
2382 size_t len;
2383 int ret = 0;
2384
2385 if (!priv) {
2386 icnss_pr_err("Invalid drvdata: dev %pK, data %pK\n",
2387 dev, priv);
2388 return -EINVAL;
2389 }
2390
2391 if (!iova_addr) {
2392 icnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
2393 &paddr, size);
2394 return -EINVAL;
2395 }
2396
2397 len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
2398 iova = roundup(priv->smmu_iova_ipa_start, PAGE_SIZE);
2399
2400 if (iova >= priv->smmu_iova_ipa_start + priv->smmu_iova_ipa_len) {
2401 icnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
2402 iova,
2403 &priv->smmu_iova_ipa_start,
2404 priv->smmu_iova_ipa_len);
2405 return -ENOMEM;
2406 }
2407
2408 ret = iommu_map(priv->iommu_domain, iova,
2409 rounddown(paddr, PAGE_SIZE), len,
2410 IOMMU_READ | IOMMU_WRITE);
2411 if (ret) {
2412 icnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
2413 return ret;
2414 }
2415
2416 priv->smmu_iova_ipa_start = iova + len;
2417 *iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE));
2418
2419 return 0;
2420}
2421EXPORT_SYMBOL(icnss_smmu_map);
2422
2423unsigned int icnss_socinfo_get_serial_number(struct device *dev)
2424{
2425 return socinfo_get_serial_number();
2426}
2427EXPORT_SYMBOL(icnss_socinfo_get_serial_number);
2428
2429int icnss_trigger_recovery(struct device *dev)
2430{
2431 int ret = 0;
2432 struct icnss_priv *priv = dev_get_drvdata(dev);
2433
2434 if (priv->magic != ICNSS_MAGIC) {
2435 icnss_pr_err("Invalid drvdata: magic 0x%x\n", priv->magic);
2436 ret = -EINVAL;
2437 goto out;
2438 }
2439
2440 if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
2441 icnss_pr_err("PD recovery already in progress: state: 0x%lx\n",
2442 priv->state);
2443 ret = -EPERM;
2444 goto out;
2445 }
2446
2447 if (!test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
2448 icnss_pr_err("PD restart not enabled to trigger recovery: state: 0x%lx\n",
2449 priv->state);
2450 ret = -EOPNOTSUPP;
2451 goto out;
2452 }
2453
2454 if (!priv->service_notifier || !priv->service_notifier[0].handle) {
2455 icnss_pr_err("Invalid handle during recovery, state: 0x%lx\n",
2456 priv->state);
2457 ret = -EINVAL;
2458 goto out;
2459 }
2460
2461 icnss_pr_warn("Initiate PD restart at WLAN FW, state: 0x%lx\n",
2462 priv->state);
2463
2464 /*
2465 * Initiate PDR, required only for the first instance
2466 */
2467 ret = service_notif_pd_restart(priv->service_notifier[0].name,
2468 priv->service_notifier[0].instance_id);
2469
2470 if (!ret)
2471 set_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
2472
2473out:
2474 return ret;
2475}
2476EXPORT_SYMBOL(icnss_trigger_recovery);
2477
2478int icnss_idle_shutdown(struct device *dev)
2479{
2480 struct icnss_priv *priv = dev_get_drvdata(dev);
2481
2482 if (!priv) {
2483 icnss_pr_err("Invalid drvdata: dev %pK", dev);
2484 return -EINVAL;
2485 }
2486
2487 if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
2488 test_bit(ICNSS_REJUVENATE, &priv->state)) {
2489 icnss_pr_err("SSR/PDR is already in-progress during idle shutdown\n");
2490 return -EBUSY;
2491 }
2492
2493 return icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN,
2494 ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
2495}
2496EXPORT_SYMBOL(icnss_idle_shutdown);
2497
2498int icnss_idle_restart(struct device *dev)
2499{
2500 struct icnss_priv *priv = dev_get_drvdata(dev);
2501
2502 if (!priv) {
2503 icnss_pr_err("Invalid drvdata: dev %pK", dev);
2504 return -EINVAL;
2505 }
2506
2507 if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
2508 test_bit(ICNSS_REJUVENATE, &priv->state)) {
2509 icnss_pr_err("SSR/PDR is already in-progress during idle restart\n");
2510 return -EBUSY;
2511 }
2512
2513 return icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_IDLE_RESTART,
2514 ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
2515}
2516EXPORT_SYMBOL(icnss_idle_restart);
2517
2518void icnss_allow_recursive_recovery(struct device *dev)
2519{
2520 struct icnss_priv *priv = dev_get_drvdata(dev);
2521
2522 priv->allow_recursive_recovery = true;
2523
2524 icnss_pr_info("Recursive recovery allowed for WLAN\n");
2525}
2526
2527void icnss_disallow_recursive_recovery(struct device *dev)
2528{
2529 struct icnss_priv *priv = dev_get_drvdata(dev);
2530
2531 priv->allow_recursive_recovery = false;
2532
2533 icnss_pr_info("Recursive recovery disallowed for WLAN\n");
2534}
2535
2536static void icnss_sysfs_create(struct icnss_priv *priv)
2537{
2538 struct kobject *icnss_kobject;
2539 int error = 0;
2540
2541 atomic_set(&priv->is_shutdown, false);
2542
2543 icnss_kobject = kobject_create_and_add("shutdown_wlan", kernel_kobj);
2544 if (!icnss_kobject) {
2545 icnss_pr_err("Unable to create kernel object");
2546 return;
2547 }
2548
2549 priv->icnss_kobject = icnss_kobject;
2550
2551 error = sysfs_create_file(icnss_kobject, &icnss_sysfs_attribute.attr);
2552 if (error)
2553 icnss_pr_err("Unable to create icnss sysfs file");
2554}
2555
2556static void icnss_sysfs_destroy(struct icnss_priv *priv)
2557{
2558 struct kobject *icnss_kobject;
2559
2560 icnss_kobject = priv->icnss_kobject;
2561 if (icnss_kobject)
2562 kobject_put(icnss_kobject);
2563}
2564
2565static int icnss_get_vbatt_info(struct icnss_priv *priv)
2566{
2567 struct adc_tm_chip *adc_tm_dev = NULL;
2568 struct iio_channel *channel = NULL;
2569 int ret = 0;
2570
2571 adc_tm_dev = get_adc_tm(&priv->pdev->dev, "icnss");
2572 if (PTR_ERR(adc_tm_dev) == -EPROBE_DEFER) {
2573 icnss_pr_err("adc_tm_dev probe defer\n");
2574 return -EPROBE_DEFER;
2575 }
2576
2577 if (IS_ERR(adc_tm_dev)) {
2578 ret = PTR_ERR(adc_tm_dev);
2579 icnss_pr_err("Not able to get ADC dev, VBATT monitoring is disabled: %d\n",
2580 ret);
2581 return ret;
2582 }
2583
2584 channel = iio_channel_get(&priv->pdev->dev, "icnss");
2585 if (PTR_ERR(channel) == -EPROBE_DEFER) {
2586 icnss_pr_err("channel probe defer\n");
2587 return -EPROBE_DEFER;
2588 }
2589
2590 if (IS_ERR(channel)) {
2591 ret = PTR_ERR(channel);
2592 icnss_pr_err("Not able to get VADC dev, VBATT monitoring is disabled: %d\n",
2593 ret);
2594 return ret;
2595 }
2596
2597 priv->adc_tm_dev = adc_tm_dev;
2598 priv->channel = channel;
2599
2600 return 0;
2601}
2602
2603static int icnss_resource_parse(struct icnss_priv *priv)
2604{
2605 int ret = 0, i = 0;
2606 struct platform_device *pdev = priv->pdev;
2607 struct device *dev = &pdev->dev;
2608 struct resource *res;
2609 u32 int_prop;
2610
2611 if (of_property_read_bool(pdev->dev.of_node, "qcom,icnss-adc_tm")) {
2612 ret = icnss_get_vbatt_info(priv);
2613 if (ret == -EPROBE_DEFER)
2614 goto out;
2615 priv->vbatt_supported = true;
2616 }
2617
2618 ret = icnss_get_vreg(priv);
2619 if (ret) {
2620 icnss_pr_err("Failed to get vreg, err = %d\n", ret);
2621 goto out;
2622 }
2623
2624 ret = icnss_get_clk(priv);
2625 if (ret) {
2626 icnss_pr_err("Failed to get clocks, err = %d\n", ret);
2627 goto put_vreg;
2628 }
2629
2630 if (priv->device_id == ADRASTEA_DEVICE_ID) {
2631 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2632 "membase");
2633 if (!res) {
2634 icnss_pr_err("Memory base not found in DT\n");
2635 ret = -EINVAL;
2636 goto put_clk;
2637 }
2638
2639 priv->mem_base_pa = res->start;
2640 priv->mem_base_va = devm_ioremap(dev, priv->mem_base_pa,
2641 resource_size(res));
2642 if (!priv->mem_base_va) {
2643 icnss_pr_err("Memory base ioremap failed: phy addr: %pa\n",
2644 &priv->mem_base_pa);
2645 ret = -EINVAL;
2646 goto put_clk;
2647 }
2648 icnss_pr_dbg("MEM_BASE pa: %pa, va: 0x%pK\n",
2649 &priv->mem_base_pa,
2650 priv->mem_base_va);
2651
2652 for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++) {
2653 res = platform_get_resource(priv->pdev,
2654 IORESOURCE_IRQ, i);
2655 if (!res) {
2656 icnss_pr_err("Fail to get IRQ-%d\n", i);
2657 ret = -ENODEV;
2658 goto put_clk;
2659 } else {
2660 priv->ce_irqs[i] = res->start;
2661 }
2662 }
2663 } else if (priv->device_id == WCN6750_DEVICE_ID) {
2664 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2665 "msi_addr");
2666 if (!res) {
2667 icnss_pr_err("MSI address not found in DT\n");
2668 ret = -EINVAL;
2669 goto put_clk;
2670 }
2671
2672 priv->msi_addr_pa = res->start;
2673 priv->msi_addr_iova = dma_map_resource(dev, priv->msi_addr_pa,
2674 PAGE_SIZE,
2675 DMA_FROM_DEVICE, 0);
2676 if (dma_mapping_error(dev, priv->msi_addr_iova)) {
2677 icnss_pr_err("MSI: failed to map msi address\n");
2678 priv->msi_addr_iova = 0;
2679 ret = -ENOMEM;
2680 goto put_clk;
2681 }
2682 icnss_pr_dbg("MSI Addr pa: %pa, iova: 0x%pK\n",
2683 &priv->msi_addr_pa,
2684 priv->msi_addr_iova);
2685
2686 ret = of_property_read_u32_index(dev->of_node,
2687 "interrupts",
2688 1,
2689 &int_prop);
2690 if (ret) {
2691 icnss_pr_dbg("Read interrupt prop failed");
2692 goto put_clk;
2693 }
2694
2695 priv->msi_base_data = int_prop + 32;
2696 icnss_pr_dbg(" MSI Base Data: %d, IRQ Index: %d\n",
2697 priv->msi_base_data, int_prop);
2698
2699 icnss_get_msi_assignment(priv);
2700 for (i = 0; i < msi_config.total_vectors; i++) {
2701 res = platform_get_resource(priv->pdev,
2702 IORESOURCE_IRQ, i);
2703 if (!res) {
2704 icnss_pr_err("Fail to get IRQ-%d\n", i);
2705 ret = -ENODEV;
2706 goto put_clk;
2707 } else {
2708 priv->srng_irqs[i] = res->start;
2709 }
2710 }
2711 }
2712
2713 return 0;
2714
2715put_clk:
2716 icnss_put_clk(priv);
2717put_vreg:
2718 icnss_put_vreg(priv);
2719out:
2720 return ret;
2721}
2722
2723static int icnss_msa_dt_parse(struct icnss_priv *priv)
2724{
2725 int ret = 0;
2726 struct platform_device *pdev = priv->pdev;
2727 struct device *dev = &pdev->dev;
2728 struct device_node *np = NULL;
2729 u64 prop_size = 0;
2730 const __be32 *addrp = NULL;
2731
2732 np = of_parse_phandle(dev->of_node,
2733 "qcom,wlan-msa-fixed-region", 0);
2734 if (np) {
2735 addrp = of_get_address(np, 0, &prop_size, NULL);
2736 if (!addrp) {
2737 icnss_pr_err("Failed to get assigned-addresses or property\n");
2738 ret = -EINVAL;
2739 of_node_put(np);
2740 goto out;
2741 }
2742
2743 priv->msa_pa = of_translate_address(np, addrp);
2744 if (priv->msa_pa == OF_BAD_ADDR) {
2745 icnss_pr_err("Failed to translate MSA PA from device-tree\n");
2746 ret = -EINVAL;
2747 of_node_put(np);
2748 goto out;
2749 }
2750
2751 of_node_put(np);
2752
2753 priv->msa_va = memremap(priv->msa_pa,
2754 (unsigned long)prop_size, MEMREMAP_WT);
2755 if (!priv->msa_va) {
2756 icnss_pr_err("MSA PA ioremap failed: phy addr: %pa\n",
2757 &priv->msa_pa);
2758 ret = -EINVAL;
2759 goto out;
2760 }
2761 priv->msa_mem_size = prop_size;
2762 } else {
2763 ret = of_property_read_u32(dev->of_node, "qcom,wlan-msa-memory",
2764 &priv->msa_mem_size);
2765 if (ret || priv->msa_mem_size == 0) {
2766 icnss_pr_err("Fail to get MSA Memory Size: %u ret: %d\n",
2767 priv->msa_mem_size, ret);
2768 goto out;
2769 }
2770
2771 priv->msa_va = dmam_alloc_coherent(&pdev->dev,
2772 priv->msa_mem_size, &priv->msa_pa, GFP_KERNEL);
2773
2774 if (!priv->msa_va) {
2775 icnss_pr_err("DMA alloc failed for MSA\n");
2776 ret = -ENOMEM;
2777 goto out;
2778 }
2779 }
2780
2781 icnss_pr_dbg("MSA pa: %pa, MSA va: 0x%pK MSA Memory Size: 0x%x\n",
2782 &priv->msa_pa, (void *)priv->msa_va, priv->msa_mem_size);
2783
2784 return 0;
2785
2786out:
2787 return ret;
2788}
2789
2790static int icnss_smmu_dt_parse(struct icnss_priv *priv)
2791{
2792 int ret = 0;
2793 struct platform_device *pdev = priv->pdev;
2794 struct device *dev = &pdev->dev;
2795 struct resource *res;
2796 u32 addr_win[2];
2797
2798 ret = of_property_read_u32_array(dev->of_node,
2799 "qcom,iommu-dma-addr-pool",
2800 addr_win,
2801 ARRAY_SIZE(addr_win));
2802
2803 if (ret) {
2804 icnss_pr_err("SMMU IOVA base not found\n");
2805 } else {
2806 priv->iommu_domain =
2807 iommu_get_domain_for_dev(&pdev->dev);
2808
2809 res = platform_get_resource_byname(pdev,
2810 IORESOURCE_MEM,
2811 "smmu_iova_ipa");
2812 if (!res) {
2813 icnss_pr_err("SMMU IOVA IPA not found\n");
2814 } else {
2815 priv->smmu_iova_ipa_start = res->start;
2816 priv->smmu_iova_ipa_len = resource_size(res);
2817 icnss_pr_dbg("SMMU IOVA IPA start: %pa, len: %zx\n",
2818 &priv->smmu_iova_ipa_start,
2819 priv->smmu_iova_ipa_len);
2820 }
2821 }
2822
2823 return 0;
2824}
2825
2826static const struct platform_device_id icnss_platform_id_table[] = {
2827 { .name = "wcn6750", .driver_data = WCN6750_DEVICE_ID, },
2828 { .name = "adrastea", .driver_data = ADRASTEA_DEVICE_ID, },
2829 { },
2830};
2831
2832static const struct of_device_id icnss_dt_match[] = {
2833 {
2834 .compatible = "qcom,wcn6750",
2835 .data = (void *)&icnss_platform_id_table[0]},
2836 {
2837 .compatible = "qcom,icnss",
2838 .data = (void *)&icnss_platform_id_table[1]},
2839 { },
2840};
2841
2842MODULE_DEVICE_TABLE(of, icnss_dt_match);
2843
2844static void icnss_init_control_params(struct icnss_priv *priv)
2845{
2846 priv->ctrl_params.qmi_timeout = WLFW_TIMEOUT;
2847 priv->ctrl_params.quirks = ICNSS_QUIRKS_DEFAULT;
2848 priv->ctrl_params.bdf_type = ICNSS_BDF_TYPE_DEFAULT;
2849
2850 if (of_property_read_bool(priv->pdev->dev.of_node,
2851 "cnss-daemon-support")) {
2852 priv->ctrl_params.quirks |= BIT(ENABLE_DAEMON_SUPPORT);
2853 }
2854}
2855
2856static int icnss_probe(struct platform_device *pdev)
2857{
2858 int ret = 0;
2859 struct device *dev = &pdev->dev;
2860 struct icnss_priv *priv;
2861 const struct of_device_id *of_id;
2862 const struct platform_device_id *device_id;
2863
2864 if (dev_get_drvdata(dev)) {
2865 icnss_pr_err("Driver is already initialized\n");
2866 return -EEXIST;
2867 }
2868
2869 of_id = of_match_device(icnss_dt_match, &pdev->dev);
2870 if (!of_id || !of_id->data) {
2871 icnss_pr_err("Failed to find of match device!\n");
2872 ret = -ENODEV;
2873 goto out;
2874 }
2875
2876 device_id = of_id->data;
2877
2878 icnss_pr_dbg("Platform driver probe\n");
2879
2880 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
2881 if (!priv)
2882 return -ENOMEM;
2883
2884 priv->magic = ICNSS_MAGIC;
2885 dev_set_drvdata(dev, priv);
2886
2887 priv->pdev = pdev;
2888 priv->device_id = device_id->driver_data;
2889 INIT_LIST_HEAD(&priv->vreg_list);
2890 INIT_LIST_HEAD(&priv->clk_list);
2891 icnss_allow_recursive_recovery(dev);
2892
2893 icnss_init_control_params(priv);
2894
2895 ret = icnss_resource_parse(priv);
2896 if (ret)
2897 goto out;
2898
2899 ret = icnss_msa_dt_parse(priv);
2900 if (ret)
2901 goto out;
2902
2903 ret = icnss_smmu_dt_parse(priv);
2904 if (ret)
2905 goto out;
2906
2907 spin_lock_init(&priv->event_lock);
2908 spin_lock_init(&priv->on_off_lock);
Mohammed Siddiqc2024c42020-05-07 17:14:24 +05302909 spin_lock_init(&priv->soc_wake_msg_lock);
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05302910 mutex_init(&priv->dev_lock);
2911
2912 priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
2913 if (!priv->event_wq) {
2914 icnss_pr_err("Workqueue creation failed\n");
2915 ret = -EFAULT;
2916 goto smmu_cleanup;
2917 }
2918
2919 INIT_WORK(&priv->event_work, icnss_driver_event_work);
2920 INIT_LIST_HEAD(&priv->event_list);
2921
Mohammed Siddiqc2024c42020-05-07 17:14:24 +05302922 priv->soc_wake_wq = alloc_workqueue("icnss_soc_wake_event",
2923 WQ_UNBOUND, 1);
2924 if (!priv->soc_wake_wq) {
2925 icnss_pr_err("Soc wake Workqueue creation failed\n");
2926 ret = -EFAULT;
2927 goto out_destroy_wq;
2928 }
2929
2930 INIT_WORK(&priv->soc_wake_msg_work, icnss_soc_wake_msg_work);
2931 INIT_LIST_HEAD(&priv->soc_wake_msg_list);
2932
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05302933 ret = icnss_register_fw_service(priv);
2934 if (ret < 0) {
2935 icnss_pr_err("fw service registration failed: %d\n", ret);
Mohammed Siddiqc2024c42020-05-07 17:14:24 +05302936 goto out_destroy_soc_wq;
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05302937 }
2938
2939 icnss_enable_recovery(priv);
2940
2941 icnss_debugfs_create(priv);
2942
2943 icnss_sysfs_create(priv);
2944
2945 ret = device_init_wakeup(&priv->pdev->dev, true);
2946 if (ret)
2947 icnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
2948 ret);
2949
2950 icnss_set_plat_priv(priv);
2951
2952 init_completion(&priv->unblock_shutdown);
2953
Mohammed Siddiq049053c2020-04-07 21:33:26 +05302954 ret = icnss_genl_init();
2955 if (ret < 0)
2956 icnss_pr_err("ICNSS genl init failed %d\n", ret);
2957
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05302958 icnss_pr_info("Platform driver probed successfully\n");
2959
2960 return 0;
2961
Mohammed Siddiqc2024c42020-05-07 17:14:24 +05302962out_destroy_soc_wq:
2963 destroy_workqueue(priv->soc_wake_wq);
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05302964out_destroy_wq:
2965 destroy_workqueue(priv->event_wq);
2966smmu_cleanup:
2967 priv->iommu_domain = NULL;
2968out:
2969 dev_set_drvdata(dev, NULL);
2970
2971 return ret;
2972}
2973
2974static int icnss_remove(struct platform_device *pdev)
2975{
2976 struct icnss_priv *priv = dev_get_drvdata(&pdev->dev);
2977
2978 icnss_pr_info("Removing driver: state: 0x%lx\n", priv->state);
2979
Mohammed Siddiq049053c2020-04-07 21:33:26 +05302980 icnss_genl_exit();
2981
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05302982 device_init_wakeup(&priv->pdev->dev, false);
2983
2984 icnss_debugfs_destroy(priv);
2985
2986 icnss_sysfs_destroy(priv);
2987
2988 complete_all(&priv->unblock_shutdown);
2989
2990 icnss_modem_ssr_unregister_notifier(priv);
2991
2992 destroy_ramdump_device(priv->msa0_dump_dev);
2993
2994 icnss_pdr_unregister_notifier(priv);
2995
2996 icnss_unregister_fw_service(priv);
2997 if (priv->event_wq)
2998 destroy_workqueue(priv->event_wq);
2999
Mohammed Siddiqc2024c42020-05-07 17:14:24 +05303000 if (priv->soc_wake_wq)
3001 destroy_workqueue(priv->soc_wake_wq);
3002
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05303003 priv->iommu_domain = NULL;
3004
3005 icnss_hw_power_off(priv);
3006
3007 dev_set_drvdata(&pdev->dev, NULL);
3008
3009 return 0;
3010}
3011
3012#ifdef CONFIG_PM_SLEEP
3013static int icnss_pm_suspend(struct device *dev)
3014{
3015 struct icnss_priv *priv = dev_get_drvdata(dev);
3016 int ret = 0;
3017
3018 if (priv->magic != ICNSS_MAGIC) {
3019 icnss_pr_err("Invalid drvdata for pm suspend: dev %pK, data %pK, magic 0x%x\n",
3020 dev, priv, priv->magic);
3021 return -EINVAL;
3022 }
3023
3024 icnss_pr_vdbg("PM Suspend, state: 0x%lx\n", priv->state);
3025
3026 if (!priv->ops || !priv->ops->pm_suspend ||
3027 !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
3028 goto out;
3029
3030 ret = priv->ops->pm_suspend(dev);
3031
3032out:
3033 if (ret == 0) {
3034 priv->stats.pm_suspend++;
3035 set_bit(ICNSS_PM_SUSPEND, &priv->state);
3036 } else {
3037 priv->stats.pm_suspend_err++;
3038 }
3039 return ret;
3040}
3041
3042static int icnss_pm_resume(struct device *dev)
3043{
3044 struct icnss_priv *priv = dev_get_drvdata(dev);
3045 int ret = 0;
3046
3047 if (priv->magic != ICNSS_MAGIC) {
3048 icnss_pr_err("Invalid drvdata for pm resume: dev %pK, data %pK, magic 0x%x\n",
3049 dev, priv, priv->magic);
3050 return -EINVAL;
3051 }
3052
3053 icnss_pr_vdbg("PM resume, state: 0x%lx\n", priv->state);
3054
3055 if (!priv->ops || !priv->ops->pm_resume ||
3056 !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
3057 goto out;
3058
Mohammed Siddiq8dad2db2020-04-17 15:27:57 +05303059 if (priv->device_id == WCN6750_DEVICE_ID) {
3060 ret = wlfw_exit_power_save_send_msg(priv);
3061 if (ret) {
3062 priv->stats.pm_resume_err++;
3063 return ret;
3064 }
3065 }
3066
Sandeep Singh1edc2cf2019-12-06 15:52:54 +05303067 ret = priv->ops->pm_resume(dev);
3068
3069out:
3070 if (ret == 0) {
3071 priv->stats.pm_resume++;
3072 clear_bit(ICNSS_PM_SUSPEND, &priv->state);
3073 } else {
3074 priv->stats.pm_resume_err++;
3075 }
3076 return ret;
3077}
3078
3079static int icnss_pm_suspend_noirq(struct device *dev)
3080{
3081 struct icnss_priv *priv = dev_get_drvdata(dev);
3082 int ret = 0;
3083
3084 if (priv->magic != ICNSS_MAGIC) {
3085 icnss_pr_err("Invalid drvdata for pm suspend_noirq: dev %pK, data %pK, magic 0x%x\n",
3086 dev, priv, priv->magic);
3087 return -EINVAL;
3088 }
3089
3090 icnss_pr_vdbg("PM suspend_noirq, state: 0x%lx\n", priv->state);
3091
3092 if (!priv->ops || !priv->ops->suspend_noirq ||
3093 !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
3094 goto out;
3095
3096 ret = priv->ops->suspend_noirq(dev);
3097
3098out:
3099 if (ret == 0) {
3100 priv->stats.pm_suspend_noirq++;
3101 set_bit(ICNSS_PM_SUSPEND_NOIRQ, &priv->state);
3102 } else {
3103 priv->stats.pm_suspend_noirq_err++;
3104 }
3105 return ret;
3106}
3107
3108static int icnss_pm_resume_noirq(struct device *dev)
3109{
3110 struct icnss_priv *priv = dev_get_drvdata(dev);
3111 int ret = 0;
3112
3113 if (priv->magic != ICNSS_MAGIC) {
3114 icnss_pr_err("Invalid drvdata for pm resume_noirq: dev %pK, data %pK, magic 0x%x\n",
3115 dev, priv, priv->magic);
3116 return -EINVAL;
3117 }
3118
3119 icnss_pr_vdbg("PM resume_noirq, state: 0x%lx\n", priv->state);
3120
3121 if (!priv->ops || !priv->ops->resume_noirq ||
3122 !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
3123 goto out;
3124
3125 ret = priv->ops->resume_noirq(dev);
3126
3127out:
3128 if (ret == 0) {
3129 priv->stats.pm_resume_noirq++;
3130 clear_bit(ICNSS_PM_SUSPEND_NOIRQ, &priv->state);
3131 } else {
3132 priv->stats.pm_resume_noirq_err++;
3133 }
3134 return ret;
3135}
3136#endif
3137
3138static const struct dev_pm_ops icnss_pm_ops = {
3139 SET_SYSTEM_SLEEP_PM_OPS(icnss_pm_suspend,
3140 icnss_pm_resume)
3141 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(icnss_pm_suspend_noirq,
3142 icnss_pm_resume_noirq)
3143};
3144
3145
3146static struct platform_driver icnss_driver = {
3147 .probe = icnss_probe,
3148 .remove = icnss_remove,
3149 .driver = {
3150 .name = "icnss2",
3151 .pm = &icnss_pm_ops,
3152 .of_match_table = icnss_dt_match,
3153 },
3154};
3155
3156static int __init icnss_initialize(void)
3157{
3158 icnss_debug_init();
3159 return platform_driver_register(&icnss_driver);
3160}
3161
3162static void __exit icnss_exit(void)
3163{
3164 platform_driver_unregister(&icnss_driver);
3165 icnss_debug_deinit();
3166}
3167
3168
3169module_init(icnss_initialize);
3170module_exit(icnss_exit);
3171
3172MODULE_LICENSE("GPL v2");
3173MODULE_DESCRIPTION(DEVICE "iWCN CORE platform driver");