blob: b1834e2b03450027962ba67a390d1d8fb137eed1 [file] [log] [blame]
Abhimanyu Kapurc75b2e12016-02-22 18:15:13 -08001/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/coresight.h>
14#include <linux/coresight-cti.h>
15#include <linux/workqueue.h>
16#include <soc/qcom/sysmon.h>
17#include "esoc-mdm.h"
18
19enum gpio_update_config {
20 GPIO_UPDATE_BOOTING_CONFIG = 1,
21 GPIO_UPDATE_RUNNING_CONFIG,
22};
23
24enum irq_mask {
25 IRQ_ERRFATAL = 0x1,
26 IRQ_STATUS = 0x2,
27 IRQ_PBLRDY = 0x4,
28};
29
30
31static struct gpio_map {
32 const char *name;
33 int index;
34} gpio_map[] = {
35 {"qcom,mdm2ap-errfatal-gpio", MDM2AP_ERRFATAL},
36 {"qcom,ap2mdm-errfatal-gpio", AP2MDM_ERRFATAL},
37 {"qcom,mdm2ap-status-gpio", MDM2AP_STATUS},
38 {"qcom,ap2mdm-status-gpio", AP2MDM_STATUS},
39 {"qcom,mdm2ap-pblrdy-gpio", MDM2AP_PBLRDY},
40 {"qcom,ap2mdm-wakeup-gpio", AP2MDM_WAKEUP},
41 {"qcom,ap2mdm-chnlrdy-gpio", AP2MDM_CHNLRDY},
42 {"qcom,mdm2ap-wakeup-gpio", MDM2AP_WAKEUP},
43 {"qcom,ap2mdm-vddmin-gpio", AP2MDM_VDDMIN},
44 {"qcom,mdm2ap-vddmin-gpio", MDM2AP_VDDMIN},
45 {"qcom,ap2mdm-pmic-pwr-en-gpio", AP2MDM_PMIC_PWR_EN},
46 {"qcom,mdm-link-detect-gpio", MDM_LINK_DETECT},
47};
48
49/* Required gpios */
50static const int required_gpios[] = {
51 MDM2AP_ERRFATAL,
52 AP2MDM_ERRFATAL,
53 MDM2AP_STATUS,
54 AP2MDM_STATUS,
55};
56
57static void mdm_debug_gpio_show(struct mdm_ctrl *mdm)
58{
59 struct device *dev = mdm->dev;
60
61 dev_dbg(dev, "%s: MDM2AP_ERRFATAL gpio = %d\n",
62 __func__, MDM_GPIO(mdm, MDM2AP_ERRFATAL));
63 dev_dbg(dev, "%s: AP2MDM_ERRFATAL gpio = %d\n",
64 __func__, MDM_GPIO(mdm, AP2MDM_ERRFATAL));
65 dev_dbg(dev, "%s: MDM2AP_STATUS gpio = %d\n",
66 __func__, MDM_GPIO(mdm, MDM2AP_STATUS));
67 dev_dbg(dev, "%s: AP2MDM_STATUS gpio = %d\n",
68 __func__, MDM_GPIO(mdm, AP2MDM_STATUS));
69 dev_dbg(dev, "%s: AP2MDM_SOFT_RESET gpio = %d\n",
70 __func__, MDM_GPIO(mdm, AP2MDM_SOFT_RESET));
71 dev_dbg(dev, "%s: MDM2AP_WAKEUP gpio = %d\n",
72 __func__, MDM_GPIO(mdm, MDM2AP_WAKEUP));
73 dev_dbg(dev, "%s: AP2MDM_WAKEUP gpio = %d\n",
74 __func__, MDM_GPIO(mdm, AP2MDM_WAKEUP));
75 dev_dbg(dev, "%s: AP2MDM_PMIC_PWR_EN gpio = %d\n",
76 __func__, MDM_GPIO(mdm, AP2MDM_PMIC_PWR_EN));
77 dev_dbg(dev, "%s: MDM2AP_PBLRDY gpio = %d\n",
78 __func__, MDM_GPIO(mdm, MDM2AP_PBLRDY));
79 dev_dbg(dev, "%s: AP2MDM_VDDMIN gpio = %d\n",
80 __func__, MDM_GPIO(mdm, AP2MDM_VDDMIN));
81 dev_dbg(dev, "%s: MDM2AP_VDDMIN gpio = %d\n",
82 __func__, MDM_GPIO(mdm, MDM2AP_VDDMIN));
83}
84
85static void mdm_enable_irqs(struct mdm_ctrl *mdm)
86{
87 if (!mdm)
88 return;
89 if (mdm->irq_mask & IRQ_ERRFATAL) {
90 enable_irq(mdm->errfatal_irq);
91 irq_set_irq_wake(mdm->errfatal_irq, 1);
92 mdm->irq_mask &= ~IRQ_ERRFATAL;
93 }
94 if (mdm->irq_mask & IRQ_STATUS) {
95 enable_irq(mdm->status_irq);
96 irq_set_irq_wake(mdm->status_irq, 1);
97 mdm->irq_mask &= ~IRQ_STATUS;
98 }
99 if (mdm->irq_mask & IRQ_PBLRDY) {
100 enable_irq(mdm->pblrdy_irq);
101 mdm->irq_mask &= ~IRQ_PBLRDY;
102 }
103}
104
105static void mdm_disable_irqs(struct mdm_ctrl *mdm)
106{
107 if (!mdm)
108 return;
109 if (!(mdm->irq_mask & IRQ_ERRFATAL)) {
110 irq_set_irq_wake(mdm->errfatal_irq, 0);
111 disable_irq_nosync(mdm->errfatal_irq);
112 mdm->irq_mask |= IRQ_ERRFATAL;
113 }
114 if (!(mdm->irq_mask & IRQ_STATUS)) {
115 irq_set_irq_wake(mdm->status_irq, 0);
116 disable_irq_nosync(mdm->status_irq);
117 mdm->irq_mask |= IRQ_STATUS;
118 }
119 if (!(mdm->irq_mask & IRQ_PBLRDY)) {
120 disable_irq_nosync(mdm->pblrdy_irq);
121 mdm->irq_mask |= IRQ_PBLRDY;
122 }
123}
124
125static void mdm_deconfigure_ipc(struct mdm_ctrl *mdm)
126{
127 int i;
128
129 for (i = 0; i < NUM_GPIOS; ++i) {
130 if (gpio_is_valid(MDM_GPIO(mdm, i)))
131 gpio_free(MDM_GPIO(mdm, i));
132 }
133 if (mdm->mdm_queue) {
134 destroy_workqueue(mdm->mdm_queue);
135 mdm->mdm_queue = NULL;
136 }
137}
138
139static void mdm_update_gpio_configs(struct mdm_ctrl *mdm,
140 enum gpio_update_config gpio_config)
141{
142 struct pinctrl_state *pins_state = NULL;
143 /* Some gpio configuration may need updating after modem bootup.*/
144 switch (gpio_config) {
145 case GPIO_UPDATE_RUNNING_CONFIG:
146 pins_state = mdm->gpio_state_running;
147 break;
148 case GPIO_UPDATE_BOOTING_CONFIG:
149 pins_state = mdm->gpio_state_booting;
150 break;
151 default:
152 pins_state = NULL;
153 dev_err(mdm->dev, "%s: called with no config\n", __func__);
154 break;
155 }
156 if (pins_state != NULL) {
157 if (pinctrl_select_state(mdm->pinctrl, pins_state))
158 dev_err(mdm->dev, "switching gpio config failed\n");
159 }
160}
161
162static void mdm_trigger_dbg(struct mdm_ctrl *mdm)
163{
164 int ret;
165
166 if (mdm->dbg_mode && !mdm->trig_cnt) {
167 ret = coresight_cti_pulse_trig(mdm->cti, MDM_CTI_CH);
168 mdm->trig_cnt++;
169 if (ret)
170 dev_err(mdm->dev, "unable to trigger cti pulse on\n");
171 }
172}
173
174static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
175{
176 unsigned long end_time;
177 bool status_down = false;
178 struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
179 struct device *dev = mdm->dev;
180 int ret;
181 bool graceful_shutdown = false;
182
183 switch (cmd) {
184 case ESOC_PWR_ON:
185 gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
186 mdm_enable_irqs(mdm);
187 mdm->init = 1;
188 mdm_do_first_power_on(mdm);
189 break;
190 case ESOC_PWR_OFF:
191 mdm_disable_irqs(mdm);
192 mdm->debug = 0;
193 mdm->ready = false;
194 mdm->trig_cnt = 0;
195 graceful_shutdown = true;
196 ret = sysmon_send_shutdown(&esoc->subsys);
197 if (ret) {
198 dev_err(mdm->dev, "sysmon shutdown fail, ret = %d\n",
199 ret);
200 graceful_shutdown = false;
201 goto force_poff;
202 }
203 dev_dbg(mdm->dev, "Waiting for status gpio go low\n");
204 status_down = false;
205 end_time = jiffies + msecs_to_jiffies(10000);
206 while (time_before(jiffies, end_time)) {
207 if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS))
208 == 0) {
209 dev_dbg(dev, "Status went low\n");
210 status_down = true;
211 break;
212 }
213 msleep(100);
214 }
215 if (status_down)
216 dev_dbg(dev, "shutdown successful\n");
217 else
218 dev_err(mdm->dev, "graceful poff ipc fail\n");
219 break;
220force_poff:
221 case ESOC_FORCE_PWR_OFF:
222 if (!graceful_shutdown) {
223 mdm_disable_irqs(mdm);
224 mdm->debug = 0;
225 mdm->ready = false;
226 mdm->trig_cnt = 0;
227
228 dev_err(mdm->dev, "Graceful shutdown fail, ret = %d\n",
229 esoc->subsys.sysmon_shutdown_ret);
230 }
231
232 /*
233 * Force a shutdown of the mdm. This is required in order
234 * to prevent the mdm from immediately powering back on
235 * after the shutdown
236 */
237 gpio_set_value(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
238 esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
239 mdm_power_down(mdm);
240 mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
241 break;
242 case ESOC_RESET:
243 mdm_toggle_soft_reset(mdm, false);
244 break;
245 case ESOC_PREPARE_DEBUG:
246 /*
247 * disable all irqs except request irq (pblrdy)
248 * force a reset of the mdm by signaling
249 * an APQ crash, wait till mdm is ready for ramdumps.
250 */
251 mdm->ready = false;
252 cancel_delayed_work(&mdm->mdm2ap_status_check_work);
253 gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
254 dev_dbg(mdm->dev, "set ap2mdm errfatal to force reset\n");
255 msleep(mdm->ramdump_delay_ms);
256 break;
257 case ESOC_EXE_DEBUG:
258 mdm->debug = 1;
259 mdm->trig_cnt = 0;
260 mdm_toggle_soft_reset(mdm, false);
261 /*
262 * wait for ramdumps to be collected
263 * then power down the mdm and switch gpios to booting
264 * config
265 */
266 wait_for_completion(&mdm->debug_done);
267 if (mdm->debug_fail) {
268 dev_err(mdm->dev, "unable to collect ramdumps\n");
269 mdm->debug = 0;
270 return -EIO;
271 }
272 dev_dbg(mdm->dev, "ramdump collection done\n");
273 mdm->debug = 0;
274 init_completion(&mdm->debug_done);
275 break;
276 case ESOC_EXIT_DEBUG:
277 /*
278 * Deassert APQ to mdm err fatal
279 * Power on the mdm
280 */
281 gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
282 dev_dbg(mdm->dev, "exiting debug state after power on\n");
283 mdm->get_restart_reason = true;
284 break;
285 default:
286 return -EINVAL;
287 };
288 return 0;
289}
290
291static void mdm2ap_status_check(struct work_struct *work)
292{
293 struct mdm_ctrl *mdm =
294 container_of(work, struct mdm_ctrl,
295 mdm2ap_status_check_work.work);
296 struct device *dev = mdm->dev;
297 struct esoc_clink *esoc = mdm->esoc;
298
299 if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0) {
300 dev_dbg(dev, "MDM2AP_STATUS did not go high\n");
301 esoc_clink_evt_notify(ESOC_UNEXPECTED_RESET, esoc);
302 }
303}
304
305static void mdm_status_fn(struct work_struct *work)
306{
307 struct mdm_ctrl *mdm =
308 container_of(work, struct mdm_ctrl, mdm_status_work);
309 struct device *dev = mdm->dev;
310 int value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
311
312 dev_dbg(dev, "%s: status:%d\n", __func__, value);
313 /* Update gpio configuration to "running" config. */
314 mdm_update_gpio_configs(mdm, GPIO_UPDATE_RUNNING_CONFIG);
315}
316
317static void mdm_get_restart_reason(struct work_struct *work)
318{
319 int ret, ntries = 0;
320 char sfr_buf[RD_BUF_SIZE];
321 struct mdm_ctrl *mdm =
322 container_of(work, struct mdm_ctrl, restart_reason_work);
323 struct device *dev = mdm->dev;
324
325 do {
326 ret = sysmon_get_reason(&mdm->esoc->subsys, sfr_buf,
327 sizeof(sfr_buf));
328 if (!ret) {
329 dev_err(dev, "mdm restart reason is %s\n", sfr_buf);
330 break;
331 }
332 msleep(SFR_RETRY_INTERVAL);
333 } while (++ntries < SFR_MAX_RETRIES);
334 if (ntries == SFR_MAX_RETRIES)
335 dev_dbg(dev, "%s: Error retrieving restart reason: %d\n",
336 __func__, ret);
337 mdm->get_restart_reason = false;
338}
339
340static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc)
341{
342 bool status_down;
343 uint64_t timeout;
344 uint64_t now;
345 struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
346 struct device *dev = mdm->dev;
347
348 switch (notify) {
349 case ESOC_IMG_XFER_DONE:
350 if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0)
351 schedule_delayed_work(&mdm->mdm2ap_status_check_work,
352 msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
353 break;
354 case ESOC_BOOT_DONE:
355 esoc_clink_evt_notify(ESOC_RUN_STATE, esoc);
356 break;
357 case ESOC_IMG_XFER_RETRY:
358 mdm->init = 1;
359 mdm_toggle_soft_reset(mdm, false);
360 break;
361 case ESOC_IMG_XFER_FAIL:
362 esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc);
363 break;
364 case ESOC_BOOT_FAIL:
365 esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc);
366 break;
367 case ESOC_UPGRADE_AVAILABLE:
368 break;
369 case ESOC_DEBUG_DONE:
370 mdm->debug_fail = false;
371 mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
372 complete(&mdm->debug_done);
373 break;
374 case ESOC_DEBUG_FAIL:
375 mdm->debug_fail = true;
376 complete(&mdm->debug_done);
377 break;
378 case ESOC_PRIMARY_CRASH:
379 mdm_disable_irqs(mdm);
380 status_down = false;
381 dev_dbg(dev, "signal apq err fatal for graceful restart\n");
382 gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
383 timeout = local_clock();
384 do_div(timeout, NSEC_PER_MSEC);
385 timeout += MDM_MODEM_TIMEOUT;
386 do {
387 if (gpio_get_value(MDM_GPIO(mdm,
388 MDM2AP_STATUS)) == 0) {
389 status_down = true;
390 break;
391 }
392 now = local_clock();
393 do_div(now, NSEC_PER_MSEC);
394 } while (!time_after64(now, timeout));
395
396 if (!status_down) {
397 dev_err(mdm->dev, "%s MDM2AP status did not go low\n",
398 __func__);
399 mdm_toggle_soft_reset(mdm, true);
400 }
401 break;
402 case ESOC_PRIMARY_REBOOT:
403 mdm_disable_irqs(mdm);
404 mdm->debug = 0;
405 mdm->ready = false;
406 mdm_cold_reset(mdm);
407 break;
408 };
409}
410
411static irqreturn_t mdm_errfatal(int irq, void *dev_id)
412{
413 struct mdm_ctrl *mdm = (struct mdm_ctrl *)dev_id;
414 struct esoc_clink *esoc;
415 struct device *dev;
416
417 if (!mdm)
418 goto no_mdm_irq;
419 dev = mdm->dev;
420 if (!mdm->ready)
421 goto mdm_pwroff_irq;
422 esoc = mdm->esoc;
423 dev_err(dev, "%s: mdm sent errfatal interrupt\n",
424 __func__);
425 /* disable irq ?*/
426 esoc_clink_evt_notify(ESOC_ERR_FATAL, esoc);
427 return IRQ_HANDLED;
428mdm_pwroff_irq:
429 dev_info(dev, "errfatal irq when in pwroff\n");
430no_mdm_irq:
431 return IRQ_HANDLED;
432}
433
434static irqreturn_t mdm_status_change(int irq, void *dev_id)
435{
436 int value;
437 struct esoc_clink *esoc;
438 struct mdm_ctrl *mdm = (struct mdm_ctrl *)dev_id;
439 struct device *dev = mdm->dev;
440
441 if (!mdm)
442 return IRQ_HANDLED;
443 esoc = mdm->esoc;
444 value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
445 if (value == 0 && mdm->ready) {
446 dev_err(dev, "unexpected reset external modem\n");
447 esoc_clink_evt_notify(ESOC_UNEXPECTED_RESET, esoc);
448 } else if (value == 1) {
449 cancel_delayed_work(&mdm->mdm2ap_status_check_work);
450 dev_dbg(dev, "status = 1: mdm is now ready\n");
451 mdm->ready = true;
452 mdm_trigger_dbg(mdm);
453 queue_work(mdm->mdm_queue, &mdm->mdm_status_work);
454 if (mdm->get_restart_reason)
455 queue_work(mdm->mdm_queue, &mdm->restart_reason_work);
456 }
457 return IRQ_HANDLED;
458}
459
460static irqreturn_t mdm_pblrdy_change(int irq, void *dev_id)
461{
462 struct mdm_ctrl *mdm;
463 struct device *dev;
464 struct esoc_clink *esoc;
465
466 mdm = (struct mdm_ctrl *)dev_id;
467 if (!mdm)
468 return IRQ_HANDLED;
469 esoc = mdm->esoc;
470 dev = mdm->dev;
471 dev_dbg(dev, "pbl ready %d:\n",
472 gpio_get_value(MDM_GPIO(mdm, MDM2AP_PBLRDY)));
473 if (mdm->init) {
474 mdm->init = 0;
475 mdm_trigger_dbg(mdm);
476 esoc_clink_queue_request(ESOC_REQ_IMG, esoc);
477 return IRQ_HANDLED;
478 }
479 if (mdm->debug)
480 esoc_clink_queue_request(ESOC_REQ_DEBUG, esoc);
481 return IRQ_HANDLED;
482}
483
484static int mdm_get_status(u32 *status, struct esoc_clink *esoc)
485{
486 struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
487
488 if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0)
489 *status = 0;
490 else
491 *status = 1;
492 return 0;
493}
494
495static void mdm_configure_debug(struct mdm_ctrl *mdm)
496{
497 void __iomem *addr;
498 unsigned int val;
499 int ret;
500 struct device_node *node = mdm->dev->of_node;
501
502 addr = of_iomap(node, 0);
503 if (IS_ERR(addr)) {
504 dev_err(mdm->dev, "failed to get debug base address\n");
505 return;
506 }
507 mdm->dbg_addr = addr + MDM_DBG_OFFSET;
508 val = readl_relaxed(mdm->dbg_addr);
509 if (val == MDM_DBG_MODE) {
510 mdm->dbg_mode = true;
511 mdm->cti = coresight_cti_get(MDM_CTI_NAME);
512 if (IS_ERR(mdm->cti)) {
513 dev_err(mdm->dev, "unable to get cti handle\n");
514 goto cti_get_err;
515 }
516 ret = coresight_cti_map_trigout(mdm->cti, MDM_CTI_TRIG,
517 MDM_CTI_CH);
518 if (ret) {
519 dev_err(mdm->dev, "unable to map trig to channel\n");
520 goto cti_map_err;
521 }
522 mdm->trig_cnt = 0;
523 } else {
524 dev_dbg(mdm->dev, "Not in debug mode. debug mode = %u\n", val);
525 mdm->dbg_mode = false;
526 }
527 return;
528cti_map_err:
529 coresight_cti_put(mdm->cti);
530cti_get_err:
531 mdm->dbg_mode = false;
532}
533
534/* Fail if any of the required gpios is absent. */
535static int mdm_dt_parse_gpios(struct mdm_ctrl *mdm)
536{
537 int i, val, rc = 0;
538 struct device_node *node = mdm->dev->of_node;
539
540 for (i = 0; i < NUM_GPIOS; i++)
541 mdm->gpios[i] = INVALID_GPIO;
542
543 for (i = 0; i < ARRAY_SIZE(gpio_map); i++) {
544 val = of_get_named_gpio(node, gpio_map[i].name, 0);
545 if (val >= 0)
546 MDM_GPIO(mdm, gpio_map[i].index) = val;
547 }
548 /* These two are special because they can be inverted. */
549 /* Verify that the required gpios have valid values */
550 for (i = 0; i < ARRAY_SIZE(required_gpios); i++) {
551 if (MDM_GPIO(mdm, required_gpios[i]) == INVALID_GPIO) {
552 rc = -ENXIO;
553 break;
554 }
555 }
556 mdm_debug_gpio_show(mdm);
557 return rc;
558}
559
560static int mdm_configure_ipc(struct mdm_ctrl *mdm, struct platform_device *pdev)
561{
562 int ret = -1;
563 int irq;
564 struct device *dev = mdm->dev;
565 struct device_node *node = pdev->dev.of_node;
566
567 ret = of_property_read_u32(node, "qcom,ramdump-timeout-ms",
568 &mdm->dump_timeout_ms);
569 if (ret)
570 mdm->dump_timeout_ms = DEF_RAMDUMP_TIMEOUT;
571 ret = of_property_read_u32(node, "qcom,ramdump-delay-ms",
572 &mdm->ramdump_delay_ms);
573 if (ret)
574 mdm->ramdump_delay_ms = DEF_RAMDUMP_DELAY;
575 /* Multilple gpio_request calls are allowed */
576 if (gpio_request(MDM_GPIO(mdm, AP2MDM_STATUS), "AP2MDM_STATUS"))
577 dev_err(dev, "Failed to configure AP2MDM_STATUS gpio\n");
578 /* Multilple gpio_request calls are allowed */
579 if (gpio_request(MDM_GPIO(mdm, AP2MDM_ERRFATAL), "AP2MDM_ERRFATAL"))
580 dev_err(dev, "%s Failed to configure AP2MDM_ERRFATAL gpio\n",
581 __func__);
582 if (gpio_request(MDM_GPIO(mdm, MDM2AP_STATUS), "MDM2AP_STATUS")) {
583 dev_err(dev, "%s Failed to configure MDM2AP_STATUS gpio\n",
584 __func__);
585 goto fatal_err;
586 }
587 if (gpio_request(MDM_GPIO(mdm, MDM2AP_ERRFATAL), "MDM2AP_ERRFATAL")) {
588 dev_err(dev, "%s Failed to configure MDM2AP_ERRFATAL gpio\n",
589 __func__);
590 goto fatal_err;
591 }
592 if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
593 if (gpio_request(MDM_GPIO(mdm, MDM2AP_PBLRDY),
594 "MDM2AP_PBLRDY")) {
595 dev_err(dev, "Cannot configure MDM2AP_PBLRDY gpio\n");
596 goto fatal_err;
597 }
598 }
599 if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_WAKEUP))) {
600 if (gpio_request(MDM_GPIO(mdm, AP2MDM_WAKEUP),
601 "AP2MDM_WAKEUP")) {
602 dev_err(dev, "Cannot configure AP2MDM_WAKEUP gpio\n");
603 goto fatal_err;
604 }
605 }
606 if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_CHNLRDY))) {
607 if (gpio_request(MDM_GPIO(mdm, AP2MDM_CHNLRDY),
608 "AP2MDM_CHNLRDY")) {
609 dev_err(dev, "Cannot configure AP2MDM_CHNLRDY gpio\n");
610 goto fatal_err;
611 }
612 }
613
614 gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
615 gpio_direction_output(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
616
617 if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_CHNLRDY)))
618 gpio_direction_output(MDM_GPIO(mdm, AP2MDM_CHNLRDY), 0);
619
620 gpio_direction_input(MDM_GPIO(mdm, MDM2AP_STATUS));
621 gpio_direction_input(MDM_GPIO(mdm, MDM2AP_ERRFATAL));
622
623 /* ERR_FATAL irq. */
624 irq = gpio_to_irq(MDM_GPIO(mdm, MDM2AP_ERRFATAL));
625 if (irq < 0) {
626 dev_err(dev, "bad MDM2AP_ERRFATAL IRQ resource\n");
627 goto errfatal_err;
628
629 }
630 ret = request_irq(irq, mdm_errfatal,
631 IRQF_TRIGGER_RISING, "mdm errfatal", mdm);
632
633 if (ret < 0) {
634 dev_err(dev, "%s: MDM2AP_ERRFATAL IRQ#%d request failed,\n",
635 __func__, irq);
636 goto errfatal_err;
637 }
638 mdm->errfatal_irq = irq;
639
640errfatal_err:
641 /* status irq */
642 irq = gpio_to_irq(MDM_GPIO(mdm, MDM2AP_STATUS));
643 if (irq < 0) {
644 dev_err(dev, "%s: bad MDM2AP_STATUS IRQ resource, err = %d\n",
645 __func__, irq);
646 goto status_err;
647 }
648 ret = request_threaded_irq(irq, NULL, mdm_status_change,
649 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
650 "mdm status", mdm);
651 if (ret < 0) {
652 dev_err(dev, "%s: MDM2AP_STATUS IRQ#%d request failed, err=%d",
653 __func__, irq, ret);
654 goto status_err;
655 }
656 mdm->status_irq = irq;
657status_err:
658 if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
659 irq = platform_get_irq_byname(pdev, "plbrdy_irq");
660 if (irq < 0) {
661 dev_err(dev, "%s: MDM2AP_PBLRDY IRQ request failed\n",
662 __func__);
663 goto pblrdy_err;
664 }
665
666 ret = request_threaded_irq(irq, NULL, mdm_pblrdy_change,
667 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
668 "mdm pbl ready", mdm);
669 if (ret < 0) {
670 dev_err(dev, "MDM2AP_PBL IRQ#%d request failed %d\n",
671 irq, ret);
672 goto pblrdy_err;
673 }
674 mdm->pblrdy_irq = irq;
675 }
676 mdm_disable_irqs(mdm);
677pblrdy_err:
678 return 0;
679fatal_err:
680 mdm_deconfigure_ipc(mdm);
681 return ret;
682
683}
684
685static int mdm_pinctrl_init(struct mdm_ctrl *mdm)
686{
687 int retval = 0;
688
689 mdm->pinctrl = devm_pinctrl_get(mdm->dev);
690 if (IS_ERR_OR_NULL(mdm->pinctrl)) {
691 retval = PTR_ERR(mdm->pinctrl);
692 goto err_state_suspend;
693 }
694 mdm->gpio_state_booting =
695 pinctrl_lookup_state(mdm->pinctrl,
696 "mdm_booting");
697 if (IS_ERR_OR_NULL(mdm->gpio_state_booting)) {
698 mdm->gpio_state_running = NULL;
699 mdm->gpio_state_booting = NULL;
700 } else {
701 mdm->gpio_state_running =
702 pinctrl_lookup_state(mdm->pinctrl,
703 "mdm_running");
704 if (IS_ERR_OR_NULL(mdm->gpio_state_running)) {
705 mdm->gpio_state_booting = NULL;
706 mdm->gpio_state_running = NULL;
707 }
708 }
709 mdm->gpio_state_active =
710 pinctrl_lookup_state(mdm->pinctrl,
711 "mdm_active");
712 if (IS_ERR_OR_NULL(mdm->gpio_state_active)) {
713 retval = PTR_ERR(mdm->gpio_state_active);
714 goto err_state_active;
715 }
716 mdm->gpio_state_suspend =
717 pinctrl_lookup_state(mdm->pinctrl,
718 "mdm_suspend");
719 if (IS_ERR_OR_NULL(mdm->gpio_state_suspend)) {
720 retval = PTR_ERR(mdm->gpio_state_suspend);
721 goto err_state_suspend;
722 }
723 retval = pinctrl_select_state(mdm->pinctrl, mdm->gpio_state_active);
724 return retval;
725
726err_state_suspend:
727 mdm->gpio_state_active = NULL;
728err_state_active:
729 mdm->gpio_state_suspend = NULL;
730 mdm->gpio_state_booting = NULL;
731 mdm->gpio_state_running = NULL;
732 return retval;
733}
734static int mdm9x25_setup_hw(struct mdm_ctrl *mdm,
735 const struct mdm_ops *ops,
736 struct platform_device *pdev)
737{
738 int ret;
739 struct esoc_clink *esoc;
740 const struct esoc_clink_ops *clink_ops = ops->clink_ops;
741 const struct mdm_pon_ops *pon_ops = ops->pon_ops;
742
743 mdm->dev = &pdev->dev;
744 mdm->pon_ops = pon_ops;
745 esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
746 if (IS_ERR(esoc)) {
747 dev_err(mdm->dev, "cannot allocate esoc device\n");
748 return PTR_ERR(esoc);
749 }
750 mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
751 if (!mdm->mdm_queue) {
752 dev_err(mdm->dev, "could not create mdm_queue\n");
753 return -ENOMEM;
754 }
755 mdm->irq_mask = 0;
756 mdm->ready = false;
757 ret = mdm_dt_parse_gpios(mdm);
758 if (ret)
759 return ret;
760 dev_err(mdm->dev, "parsing gpio done\n");
761 ret = mdm_pon_dt_init(mdm);
762 if (ret)
763 return ret;
764 dev_dbg(mdm->dev, "pon dt init done\n");
765 ret = mdm_pinctrl_init(mdm);
766 if (ret)
767 return ret;
768 dev_err(mdm->dev, "pinctrl init done\n");
769 ret = mdm_pon_setup(mdm);
770 if (ret)
771 return ret;
772 dev_dbg(mdm->dev, "pon setup done\n");
773 ret = mdm_configure_ipc(mdm, pdev);
774 if (ret)
775 return ret;
776 mdm_configure_debug(mdm);
777 dev_err(mdm->dev, "ipc configure done\n");
778 esoc->name = MDM9x25_LABEL;
779 esoc->link_name = MDM9x25_HSIC;
780 esoc->clink_ops = clink_ops;
781 esoc->parent = mdm->dev;
782 esoc->owner = THIS_MODULE;
783 esoc->np = pdev->dev.of_node;
784 set_esoc_clink_data(esoc, mdm);
785 ret = esoc_clink_register(esoc);
786 if (ret) {
787 dev_err(mdm->dev, "esoc registration failed\n");
788 return ret;
789 }
790 dev_dbg(mdm->dev, "esoc registration done\n");
791 init_completion(&mdm->debug_done);
792 INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
793 INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
794 INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
795 mdm->get_restart_reason = false;
796 mdm->debug_fail = false;
797 mdm->esoc = esoc;
798 mdm->init = 0;
799 return 0;
800}
801
802static int mdm9x35_setup_hw(struct mdm_ctrl *mdm,
803 const struct mdm_ops *ops,
804 struct platform_device *pdev)
805{
806 int ret;
807 struct device_node *node;
808 struct esoc_clink *esoc;
809 const struct esoc_clink_ops *clink_ops = ops->clink_ops;
810 const struct mdm_pon_ops *pon_ops = ops->pon_ops;
811
812 mdm->dev = &pdev->dev;
813 mdm->pon_ops = pon_ops;
814 node = pdev->dev.of_node;
815 esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
816 if (IS_ERR(esoc)) {
817 dev_err(mdm->dev, "cannot allocate esoc device\n");
818 return PTR_ERR(esoc);
819 }
820 mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
821 if (!mdm->mdm_queue) {
822 dev_err(mdm->dev, "could not create mdm_queue\n");
823 return -ENOMEM;
824 }
825 mdm->irq_mask = 0;
826 mdm->ready = false;
827 ret = mdm_dt_parse_gpios(mdm);
828 if (ret)
829 return ret;
830 dev_dbg(mdm->dev, "parsing gpio done\n");
831 ret = mdm_pon_dt_init(mdm);
832 if (ret)
833 return ret;
834 dev_dbg(mdm->dev, "pon dt init done\n");
835 ret = mdm_pinctrl_init(mdm);
836 if (ret)
837 return ret;
838 dev_dbg(mdm->dev, "pinctrl init done\n");
839 ret = mdm_pon_setup(mdm);
840 if (ret)
841 return ret;
842 dev_dbg(mdm->dev, "pon setup done\n");
843 ret = mdm_configure_ipc(mdm, pdev);
844 if (ret)
845 return ret;
846 mdm_configure_debug(mdm);
847 dev_dbg(mdm->dev, "ipc configure done\n");
848 esoc->name = MDM9x35_LABEL;
849 mdm->dual_interface = of_property_read_bool(node,
850 "qcom,mdm-dual-link");
851 /* Check if link gpio is available */
852 if (gpio_is_valid(MDM_GPIO(mdm, MDM_LINK_DETECT))) {
853 if (mdm->dual_interface) {
854 if (gpio_get_value(MDM_GPIO(mdm, MDM_LINK_DETECT)))
855 esoc->link_name = MDM9x35_DUAL_LINK;
856 else
857 esoc->link_name = MDM9x35_PCIE;
858 } else {
859 if (gpio_get_value(MDM_GPIO(mdm, MDM_LINK_DETECT)))
860 esoc->link_name = MDM9x35_HSIC;
861 else
862 esoc->link_name = MDM9x35_PCIE;
863 }
864 } else if (mdm->dual_interface)
865 esoc->link_name = MDM9x35_DUAL_LINK;
866 else
867 esoc->link_name = MDM9x35_HSIC;
868 esoc->clink_ops = clink_ops;
869 esoc->parent = mdm->dev;
870 esoc->owner = THIS_MODULE;
871 esoc->np = pdev->dev.of_node;
872 set_esoc_clink_data(esoc, mdm);
873 ret = esoc_clink_register(esoc);
874 if (ret) {
875 dev_err(mdm->dev, "esoc registration failed\n");
876 return ret;
877 }
878 dev_dbg(mdm->dev, "esoc registration done\n");
879 init_completion(&mdm->debug_done);
880 INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
881 INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
882 INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
883 mdm->get_restart_reason = false;
884 mdm->debug_fail = false;
885 mdm->esoc = esoc;
886 mdm->init = 0;
887 return 0;
888}
889
890static int mdm9x55_setup_hw(struct mdm_ctrl *mdm,
891 const struct mdm_ops *ops,
892 struct platform_device *pdev)
893{
894 int ret;
895 struct device_node *node;
896 struct esoc_clink *esoc;
897 const struct esoc_clink_ops *clink_ops = ops->clink_ops;
898 const struct mdm_pon_ops *pon_ops = ops->pon_ops;
899
900 mdm->dev = &pdev->dev;
901 mdm->pon_ops = pon_ops;
902 node = pdev->dev.of_node;
903 esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
904 if (IS_ERR(esoc)) {
905 dev_err(mdm->dev, "cannot allocate esoc device\n");
906 return PTR_ERR(esoc);
907 }
908 mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
909 if (!mdm->mdm_queue) {
910 dev_err(mdm->dev, "could not create mdm_queue\n");
911 return -ENOMEM;
912 }
913 mdm->irq_mask = 0;
914 mdm->ready = false;
915 ret = mdm_dt_parse_gpios(mdm);
916 if (ret)
917 return ret;
918 dev_dbg(mdm->dev, "parsing gpio done\n");
919 ret = mdm_pon_dt_init(mdm);
920 if (ret)
921 return ret;
922 dev_dbg(mdm->dev, "pon dt init done\n");
923 ret = mdm_pinctrl_init(mdm);
924 if (ret)
925 return ret;
926 dev_dbg(mdm->dev, "pinctrl init done\n");
927 ret = mdm_pon_setup(mdm);
928 if (ret)
929 return ret;
930 dev_dbg(mdm->dev, "pon setup done\n");
931 ret = mdm_configure_ipc(mdm, pdev);
932 if (ret)
933 return ret;
934 dev_dbg(mdm->dev, "ipc configure done\n");
935 esoc->name = MDM9x55_LABEL;
936 mdm->dual_interface = of_property_read_bool(node,
937 "qcom,mdm-dual-link");
938 esoc->link_name = MDM9x55_PCIE;
939 esoc->clink_ops = clink_ops;
940 esoc->parent = mdm->dev;
941 esoc->owner = THIS_MODULE;
942 esoc->np = pdev->dev.of_node;
943 set_esoc_clink_data(esoc, mdm);
944 ret = esoc_clink_register(esoc);
945 if (ret) {
946 dev_err(mdm->dev, "esoc registration failed\n");
947 return ret;
948 }
949 dev_dbg(mdm->dev, "esoc registration done\n");
950 init_completion(&mdm->debug_done);
951 INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
952 INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
953 INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
954 mdm->get_restart_reason = false;
955 mdm->debug_fail = false;
956 mdm->esoc = esoc;
957 mdm->init = 0;
958 return 0;
959}
960
961static struct esoc_clink_ops mdm_cops = {
962 .cmd_exe = mdm_cmd_exe,
963 .get_status = mdm_get_status,
964 .notify = mdm_notify,
965};
966
967static struct mdm_ops mdm9x25_ops = {
968 .clink_ops = &mdm_cops,
969 .config_hw = mdm9x25_setup_hw,
970 .pon_ops = &mdm9x25_pon_ops,
971};
972
973static struct mdm_ops mdm9x35_ops = {
974 .clink_ops = &mdm_cops,
975 .config_hw = mdm9x35_setup_hw,
976 .pon_ops = &mdm9x35_pon_ops,
977};
978
979static struct mdm_ops mdm9x55_ops = {
980 .clink_ops = &mdm_cops,
981 .config_hw = mdm9x55_setup_hw,
982 .pon_ops = &mdm9x55_pon_ops,
983};
984
985static const struct of_device_id mdm_dt_match[] = {
986 { .compatible = "qcom,ext-mdm9x25",
987 .data = &mdm9x25_ops, },
988 { .compatible = "qcom,ext-mdm9x35",
989 .data = &mdm9x35_ops, },
990 { .compatible = "qcom,ext-mdm9x55",
991 .data = &mdm9x55_ops, },
992 {},
993};
994MODULE_DEVICE_TABLE(of, mdm_dt_match);
995
996static int mdm_probe(struct platform_device *pdev)
997{
998 const struct of_device_id *match;
999 const struct mdm_ops *mdm_ops;
1000 struct device_node *node = pdev->dev.of_node;
1001 struct mdm_ctrl *mdm;
1002
1003 match = of_match_node(mdm_dt_match, node);
1004 if (IS_ERR(match))
1005 return PTR_ERR(match);
1006 mdm_ops = match->data;
1007 mdm = devm_kzalloc(&pdev->dev, sizeof(*mdm), GFP_KERNEL);
1008 if (IS_ERR(mdm))
1009 return PTR_ERR(mdm);
1010 return mdm_ops->config_hw(mdm, mdm_ops, pdev);
1011}
1012
1013static struct platform_driver mdm_driver = {
1014 .probe = mdm_probe,
1015 .driver = {
1016 .name = "ext-mdm",
1017 .owner = THIS_MODULE,
1018 .of_match_table = of_match_ptr(mdm_dt_match),
1019 },
1020};
1021
1022static int __init mdm_register(void)
1023{
1024 return platform_driver_register(&mdm_driver);
1025}
1026module_init(mdm_register);
1027
1028static void __exit mdm_unregister(void)
1029{
1030 platform_driver_unregister(&mdm_driver);
1031}
1032module_exit(mdm_unregister);
1033MODULE_LICENSE("GPL v2");