blob: e148868a696511abbc836f8bb3623713ebc79f6b [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmberg6275b602012-11-19 13:05:04 -07004 * Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f9412012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Eric Holmberg144c2de2012-10-04 13:37:28 -060037#include <linux/suspend.h>
Jeff Hugo412356e2012-09-27 17:14:23 -060038#include <linux/of.h>
39#include <linux/of_irq.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070040
Brian Swetland2eb44eb2008-09-29 16:00:48 -070041#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070043#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053045#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070046#include <mach/proc_comm.h>
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +053047#include <mach/msm_ipc_logging.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070048#include <mach/ramdump.h>
Eric Holmberg51edef72013-04-11 14:28:33 -060049#include <mach/board.h>
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060050#include <mach/msm_smem.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070051
Ram Somani8b9589f2012-04-03 12:07:18 +053052#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070053
54#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055#include "modem_notifier.h"
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060056#include "smem_private.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070057
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060059 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060060 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070061#define CONFIG_QDSP6 1
62#endif
63
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060064#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
65 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066#define CONFIG_DSPS 1
67#endif
68
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060069#if defined(CONFIG_ARCH_MSM8960) \
70 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060072#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070074
75#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070077#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060078#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Eric Holmberge5266d32013-02-25 18:29:27 -070079#define RSPIN_INIT_WAIT_MS 1000
Eric Holmberg424d9552013-04-05 15:23:25 -060080#define SMD_FIFO_FULL_RESERVE 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081
82uint32_t SMSM_NUM_ENTRIES = 8;
83uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070084
Eric Holmberge8a39322012-04-03 15:14:02 -060085/* Legacy SMSM interrupt notifications */
86#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
87 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070088
89enum {
90 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091 MSM_SMSM_DEBUG = 1U << 1,
92 MSM_SMD_INFO = 1U << 2,
93 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070094 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095};
96
97struct smsm_shared_info {
98 uint32_t *state;
99 uint32_t *intr_mask;
100 uint32_t *intr_mux;
101};
102
103static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f9412012-03-19 10:04:22 -0600104static struct kfifo smsm_snapshot_fifo;
105static struct wake_lock smsm_snapshot_wakelock;
106static int smsm_snapshot_count;
107static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108
109struct smsm_size_info_type {
110 uint32_t num_hosts;
111 uint32_t num_entries;
112 uint32_t reserved0;
113 uint32_t reserved1;
114};
115
116struct smsm_state_cb_info {
117 struct list_head cb_list;
118 uint32_t mask;
119 void *data;
120 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
121};
122
123struct smsm_state_info {
124 struct list_head callbacks;
125 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600126 uint32_t intr_mask_set;
127 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128};
129
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530130struct interrupt_config_item {
131 /* must be initialized */
132 irqreturn_t (*irq_handler)(int req, void *data);
133 /* outgoing interrupt config (set from platform data) */
134 uint32_t out_bit_pos;
135 void __iomem *out_base;
136 uint32_t out_offset;
Eric Holmbergdeace152012-07-25 12:17:11 -0600137 int irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530138};
139
140struct interrupt_config {
141 struct interrupt_config_item smd;
142 struct interrupt_config_item smsm;
143};
144
145static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700146static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530147static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700148static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530149static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700150static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530151static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700152static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600153static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530154static irqreturn_t smsm_irq_handler(int irq, void *data);
155
156static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
157 [SMD_MODEM] = {
158 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700159 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530160 },
161 [SMD_Q6] = {
162 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700163 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530164 },
165 [SMD_DSPS] = {
166 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700167 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530168 },
169 [SMD_WCNSS] = {
170 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700171 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530172 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600173 [SMD_RPM] = {
174 .smd.irq_handler = smd_rpm_irq_handler,
175 .smsm.irq_handler = NULL, /* does not support smsm */
176 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530177};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600178
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700179struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530180
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
182#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
183 entry * SMSM_NUM_HOSTS + host)
184#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
185
186/* Internal definitions which are not exported in some targets */
187enum {
188 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700189};
190
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530191static int msm_smd_debug_mask = MSM_SMx_POWER_INFO;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700192module_param_named(debug_mask, msm_smd_debug_mask,
193 int, S_IRUGO | S_IWUSR | S_IWGRP);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530194static void *smd_log_ctx;
195#define NUM_LOG_PAGES 4
196
197#define IPC_LOG(level, x...) do { \
198 if (smd_log_ctx) \
199 ipc_log_string(smd_log_ctx, x); \
200 else \
201 printk(level x); \
202 } while (0)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700203
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204#if defined(CONFIG_MSM_SMD_DEBUG)
205#define SMD_DBG(x...) do { \
206 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530207 IPC_LOG(KERN_DEBUG, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208 } while (0)
209
210#define SMSM_DBG(x...) do { \
211 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530212 IPC_LOG(KERN_DEBUG, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213 } while (0)
214
215#define SMD_INFO(x...) do { \
216 if (msm_smd_debug_mask & MSM_SMD_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530217 IPC_LOG(KERN_INFO, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218 } while (0)
219
220#define SMSM_INFO(x...) do { \
221 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530222 IPC_LOG(KERN_INFO, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700224#define SMx_POWER_INFO(x...) do { \
225 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530226 IPC_LOG(KERN_INFO, x); \
Eric Holmberg98c6c642012-02-24 11:29:35 -0700227 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228#else
229#define SMD_DBG(x...) do { } while (0)
230#define SMSM_DBG(x...) do { } while (0)
231#define SMD_INFO(x...) do { } while (0)
232#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700233#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234#endif
235
Eric Holmberg51edef72013-04-11 14:28:33 -0600236/**
237 * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
238 *
239 * @type: type to check for overflow
240 * @a: left value to use
241 * @b: right value to use
242 * @returns: true if a + b will result in overflow; false otherwise
243 */
244#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
245 (((type)~0 - (a)) < (b) ? true : false)
246
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700247static unsigned last_heap_free = 0xffffffff;
248
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700249static inline void smd_write_intr(unsigned int val,
250 const void __iomem *addr);
251
252#if defined(CONFIG_ARCH_MSM7X30)
253#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530254 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530256 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530258 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530260 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700261#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600262#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263#define MSM_TRIG_A2WCNSS_SMD_INT
264#define MSM_TRIG_A2WCNSS_SMSM_INT
265#elif defined(CONFIG_ARCH_MSM8X60)
266#define MSM_TRIG_A2M_SMD_INT \
267 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
268#define MSM_TRIG_A2Q6_SMD_INT \
269 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
270#define MSM_TRIG_A2M_SMSM_INT \
271 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
272#define MSM_TRIG_A2Q6_SMSM_INT \
273 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
274#define MSM_TRIG_A2DSPS_SMD_INT \
275 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600276#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277#define MSM_TRIG_A2WCNSS_SMD_INT
278#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600279#elif defined(CONFIG_ARCH_MSM9615)
280#define MSM_TRIG_A2M_SMD_INT \
281 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
282#define MSM_TRIG_A2Q6_SMD_INT \
283 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
284#define MSM_TRIG_A2M_SMSM_INT \
285 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
286#define MSM_TRIG_A2Q6_SMSM_INT \
287 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
288#define MSM_TRIG_A2DSPS_SMD_INT
289#define MSM_TRIG_A2DSPS_SMSM_INT
290#define MSM_TRIG_A2WCNSS_SMD_INT
291#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292#elif defined(CONFIG_ARCH_FSM9XXX)
293#define MSM_TRIG_A2Q6_SMD_INT \
294 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
295#define MSM_TRIG_A2Q6_SMSM_INT \
296 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
297#define MSM_TRIG_A2M_SMD_INT \
298 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
299#define MSM_TRIG_A2M_SMSM_INT \
300 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
301#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600302#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303#define MSM_TRIG_A2WCNSS_SMD_INT
304#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700305#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306#define MSM_TRIG_A2M_SMD_INT \
307 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700308#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309#define MSM_TRIG_A2M_SMSM_INT \
310 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700311#define MSM_TRIG_A2Q6_SMSM_INT
312#define MSM_TRIG_A2DSPS_SMD_INT
313#define MSM_TRIG_A2DSPS_SMSM_INT
314#define MSM_TRIG_A2WCNSS_SMD_INT
315#define MSM_TRIG_A2WCNSS_SMSM_INT
316#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
317#define MSM_TRIG_A2M_SMD_INT \
318 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
319#define MSM_TRIG_A2Q6_SMD_INT
320#define MSM_TRIG_A2M_SMSM_INT \
321 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
322#define MSM_TRIG_A2Q6_SMSM_INT
323#define MSM_TRIG_A2DSPS_SMD_INT
324#define MSM_TRIG_A2DSPS_SMSM_INT
325#define MSM_TRIG_A2WCNSS_SMD_INT
326#define MSM_TRIG_A2WCNSS_SMSM_INT
327#else /* use platform device / device tree configuration */
328#define MSM_TRIG_A2M_SMD_INT
329#define MSM_TRIG_A2Q6_SMD_INT
330#define MSM_TRIG_A2M_SMSM_INT
331#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600333#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334#define MSM_TRIG_A2WCNSS_SMD_INT
335#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700336#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337
Jeff Hugoee40b152012-02-09 17:39:47 -0700338/*
339 * stub out legacy macros if they are not being used so that the legacy
340 * code compiles even though it is not used
341 *
342 * these definitions should not be used in active code and will cause
343 * an early failure
344 */
345#ifndef INT_A9_M2A_0
346#define INT_A9_M2A_0 -1
347#endif
348#ifndef INT_A9_M2A_5
349#define INT_A9_M2A_5 -1
350#endif
351#ifndef INT_ADSP_A11
352#define INT_ADSP_A11 -1
353#endif
354#ifndef INT_ADSP_A11_SMSM
355#define INT_ADSP_A11_SMSM -1
356#endif
357#ifndef INT_DSPS_A11
358#define INT_DSPS_A11 -1
359#endif
360#ifndef INT_DSPS_A11_SMSM
361#define INT_DSPS_A11_SMSM -1
362#endif
363#ifndef INT_WCNSS_A11
364#define INT_WCNSS_A11 -1
365#endif
366#ifndef INT_WCNSS_A11_SMSM
367#define INT_WCNSS_A11_SMSM -1
368#endif
369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370#define SMD_LOOPBACK_CID 100
371
372static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600374static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600376static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377static void notify_smsm_cb_clients_worker(struct work_struct *work);
378static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600379static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700380static struct smsm_state_info *smsm_states;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -0600381
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530382static int smd_stream_write_avail(struct smd_channel *ch);
383static int smd_stream_read_avail(struct smd_channel *ch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384
385static inline void smd_write_intr(unsigned int val,
386 const void __iomem *addr)
387{
388 wmb();
389 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700390}
391
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530392static inline void log_notify(uint32_t subsystem, smd_channel_t *ch)
393{
394 const char *subsys = smd_edge_to_subsystem(subsystem);
395
Jay Chokshi83b4f6132013-02-14 16:20:56 -0800396 (void) subsys;
397
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530398 if (!ch)
399 SMx_POWER_INFO("Apps->%s\n", subsys);
400 else
401 SMx_POWER_INFO(
402 "Apps->%s ch%d '%s': tx%d/rx%d %dr/%dw : %dr/%dw\n",
403 subsys, ch->n, ch->name,
404 ch->fifo_size -
405 (smd_stream_write_avail(ch) + 1),
406 smd_stream_read_avail(ch),
407 ch->half_ch->get_tail(ch->send),
408 ch->half_ch->get_head(ch->send),
409 ch->half_ch->get_tail(ch->recv),
410 ch->half_ch->get_head(ch->recv)
411 );
412}
413
414static inline void notify_modem_smd(smd_channel_t *ch)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700415{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530416 static const struct interrupt_config_item *intr
417 = &private_intr_config[SMD_MODEM].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530418
419 log_notify(SMD_APPS_MODEM, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700420 if (intr->out_base) {
421 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530422 smd_write_intr(intr->out_bit_pos,
423 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700424 } else {
425 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530426 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700427 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700428}
429
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530430static inline void notify_dsp_smd(smd_channel_t *ch)
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700431{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530432 static const struct interrupt_config_item *intr
433 = &private_intr_config[SMD_Q6].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530434
435 log_notify(SMD_APPS_QDSP, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700436 if (intr->out_base) {
437 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530438 smd_write_intr(intr->out_bit_pos,
439 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700440 } else {
441 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530442 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700443 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700444}
445
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530446static inline void notify_dsps_smd(smd_channel_t *ch)
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530447{
448 static const struct interrupt_config_item *intr
449 = &private_intr_config[SMD_DSPS].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530450
451 log_notify(SMD_APPS_DSPS, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700452 if (intr->out_base) {
453 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530454 smd_write_intr(intr->out_bit_pos,
455 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700456 } else {
457 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530458 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700459 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530460}
461
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530462static inline void notify_wcnss_smd(struct smd_channel *ch)
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530463{
464 static const struct interrupt_config_item *intr
465 = &private_intr_config[SMD_WCNSS].smd;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530466
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530467 log_notify(SMD_APPS_WCNSS, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700468 if (intr->out_base) {
469 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530470 smd_write_intr(intr->out_bit_pos,
471 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700472 } else {
473 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530474 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700475 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530476}
477
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530478static inline void notify_rpm_smd(smd_channel_t *ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600479{
480 static const struct interrupt_config_item *intr
481 = &private_intr_config[SMD_RPM].smd;
482
483 if (intr->out_base) {
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530484 log_notify(SMD_APPS_RPM, ch);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600485 ++interrupt_stats[SMD_RPM].smd_out_config_count;
486 smd_write_intr(intr->out_bit_pos,
487 intr->out_base + intr->out_offset);
488 }
489}
490
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530491static inline void notify_modem_smsm(void)
492{
493 static const struct interrupt_config_item *intr
494 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700495 if (intr->out_base) {
496 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530497 smd_write_intr(intr->out_bit_pos,
498 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700499 } else {
500 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530501 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700502 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530503}
504
505static inline void notify_dsp_smsm(void)
506{
507 static const struct interrupt_config_item *intr
508 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700509 if (intr->out_base) {
510 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530511 smd_write_intr(intr->out_bit_pos,
512 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700513 } else {
514 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530515 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700516 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530517}
518
519static inline void notify_dsps_smsm(void)
520{
521 static const struct interrupt_config_item *intr
522 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700523 if (intr->out_base) {
524 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530525 smd_write_intr(intr->out_bit_pos,
526 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700527 } else {
528 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530529 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700530 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530531}
532
533static inline void notify_wcnss_smsm(void)
534{
535 static const struct interrupt_config_item *intr
536 = &private_intr_config[SMD_WCNSS].smsm;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530537
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700538 if (intr->out_base) {
539 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530540 smd_write_intr(intr->out_bit_pos,
541 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700542 } else {
543 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530544 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700545 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530546}
547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
549{
550 /* older protocol don't use smsm_intr_mask,
551 but still communicates with modem */
552 if (!smsm_info.intr_mask ||
553 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
554 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530555 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700556
557 if (smsm_info.intr_mask &&
558 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
559 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560 uint32_t mux_val;
561
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600562 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563 mux_val = __raw_readl(
564 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
565 mux_val++;
566 __raw_writel(mux_val,
567 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
568 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530569 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700570 }
571
572 if (smsm_info.intr_mask &&
573 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
574 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530575 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576 }
577
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600578 if (smsm_info.intr_mask &&
579 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
580 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530581 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600582 }
583
Eric Holmbergda31d042012-03-28 14:01:02 -0600584 /*
585 * Notify local SMSM callback clients without wakelock since this
586 * code is used by power management during power-down/-up sequencing
587 * on DEM-based targets. Grabbing a wakelock in this case will
588 * abort the power-down sequencing.
589 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600590 if (smsm_info.intr_mask &&
591 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
592 & notify_mask)) {
593 smsm_cb_snapshot(0);
594 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700595}
596
Eric Holmberg144c2de2012-10-04 13:37:28 -0600597static int smsm_pm_notifier(struct notifier_block *nb,
598 unsigned long event, void *unused)
599{
600 switch (event) {
601 case PM_SUSPEND_PREPARE:
602 smsm_change_state(SMSM_APPS_STATE, SMSM_PROC_AWAKE, 0);
603 break;
604
605 case PM_POST_SUSPEND:
606 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_PROC_AWAKE);
607 break;
608 }
609 return NOTIFY_DONE;
610}
611
612static struct notifier_block smsm_pm_nb = {
613 .notifier_call = smsm_pm_notifier,
614 .priority = 0,
615};
616
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700618{
619 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700621
622 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
623 if (x != 0) {
624 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 SMD_INFO("smem: DIAG '%s'\n", x);
626 }
627
628 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
629 if (x != 0) {
630 x[size - 1] = 0;
631 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700632 }
633}
634
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700636static void handle_modem_crash(void)
637{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700638 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700639 smd_diag();
640
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641 /* hard reboot if possible FIXME
642 if (msm_reset_hook)
643 msm_reset_hook();
644 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700645
646 /* in this case the modem or watchdog should reboot us */
647 for (;;)
648 ;
649}
650
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700652{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 /* if the modem's not ready yet, we have to hope for the best */
654 if (!smsm_info.state)
655 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700656
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700658 handle_modem_crash();
659 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700660 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700661 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700662}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700663EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700664
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700665/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700666 * irq handler and code that mutates the channel
667 * list or fiddles with channel state
668 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700670DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700671
672/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700673 * operations to avoid races while creating or
674 * destroying smd_channel structures
675 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700676static DEFINE_MUTEX(smd_creation_mutex);
677
678static int smd_initialized;
679
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680struct smd_shared_v1 {
681 struct smd_half_channel ch0;
682 unsigned char data0[SMD_BUF_SIZE];
683 struct smd_half_channel ch1;
684 unsigned char data1[SMD_BUF_SIZE];
685};
686
687struct smd_shared_v2 {
688 struct smd_half_channel ch0;
689 struct smd_half_channel ch1;
690};
691
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600692struct smd_shared_v2_word_access {
693 struct smd_half_channel_word_access ch0;
694 struct smd_half_channel_word_access ch1;
695};
696
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697struct edge_to_pid {
698 uint32_t local_pid;
699 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700700 char subsys_name[SMD_MAX_CH_NAME_LEN];
Jeff Hugo7cc06b12013-06-17 16:13:18 -0600701 bool initialized;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700702};
703
704/**
705 * Maps edge type to local and remote processor ID's.
706 */
707static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700708 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
Stephen Boyd77db8bb2012-06-27 15:15:16 -0700709 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "adsp"},
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700710 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
711 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
712 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
713 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
714 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
715 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
716 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
717 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
718 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
719 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
720 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
721 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
722 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600723 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
724 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
725 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
726 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
FNU Ramendrae2570d12013-07-18 14:12:03 -0600727 [SMD_TZ_RPM] = {SMD_TZ, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700728};
729
730struct restart_notifier_block {
731 unsigned processor;
732 char *name;
733 struct notifier_block nb;
734};
735
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600736static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700737static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
738
739static LIST_HEAD(smd_ch_closed_list);
740static LIST_HEAD(smd_ch_closing_list);
741static LIST_HEAD(smd_ch_to_close_list);
742static LIST_HEAD(smd_ch_list_modem);
743static LIST_HEAD(smd_ch_list_dsp);
744static LIST_HEAD(smd_ch_list_dsps);
745static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600746static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700747
748static unsigned char smd_ch_allocated[64];
749static struct work_struct probe_work;
750
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700751static void finalize_channel_close_fn(struct work_struct *work);
752static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
753static struct workqueue_struct *channel_close_wq;
754
755static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
756
Jeff Hugo7cc06b12013-06-17 16:13:18 -0600757static bool smd_edge_inited(int edge)
758{
759 return edge_to_pids[edge].initialized;
760}
761
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762/* on smp systems, the probe might get called from multiple cores,
763 hence use a lock */
764static DEFINE_MUTEX(smd_probe_lock);
765
766static void smd_channel_probe_worker(struct work_struct *work)
767{
768 struct smd_alloc_elm *shared;
769 unsigned n;
770 uint32_t type;
771
772 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
773
774 if (!shared) {
775 pr_err("%s: allocation table not initialized\n", __func__);
776 return;
777 }
778
779 mutex_lock(&smd_probe_lock);
780 for (n = 0; n < 64; n++) {
781 if (smd_ch_allocated[n])
782 continue;
783
784 /* channel should be allocated only if APPS
785 processor is involved */
786 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600787 if (type >= ARRAY_SIZE(edge_to_pids) ||
788 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700789 continue;
790 if (!shared[n].ref_count)
791 continue;
792 if (!shared[n].name[0])
793 continue;
794
Jeff Hugo7cc06b12013-06-17 16:13:18 -0600795 if (!smd_initialized && !smd_edge_inited(type)) {
796 SMD_INFO("Probe skipping ch %d, edge not inited\n", n);
797 continue;
798 }
799
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700800 if (!smd_alloc_channel(&shared[n]))
801 smd_ch_allocated[n] = 1;
802 else
803 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
804 }
805 mutex_unlock(&smd_probe_lock);
806}
807
808/**
809 * Lookup processor ID and determine if it belongs to the proved edge
810 * type.
811 *
812 * @shared2: Pointer to v2 shared channel structure
813 * @type: Edge type
814 * @pid: Processor ID of processor on edge
815 * @local_ch: Channel that belongs to processor @pid
816 * @remote_ch: Other side of edge contained @pid
Jeff Hugo00be6282012-09-07 11:24:32 -0600817 * @is_word_access_ch: Bool, is this a word aligned access channel
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700818 *
819 * Returns 0 for not on edge, 1 for found on edge
820 */
Jeff Hugo00be6282012-09-07 11:24:32 -0600821static int pid_is_on_edge(void *shared2,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700822 uint32_t type, uint32_t pid,
Jeff Hugo00be6282012-09-07 11:24:32 -0600823 void **local_ch,
824 void **remote_ch,
825 int is_word_access_ch
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826 )
827{
828 int ret = 0;
829 struct edge_to_pid *edge;
Jeff Hugo00be6282012-09-07 11:24:32 -0600830 void *ch0;
831 void *ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832
833 *local_ch = 0;
834 *remote_ch = 0;
835
836 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
837 return 0;
838
Jeff Hugo00be6282012-09-07 11:24:32 -0600839 if (is_word_access_ch) {
840 ch0 = &((struct smd_shared_v2_word_access *)(shared2))->ch0;
841 ch1 = &((struct smd_shared_v2_word_access *)(shared2))->ch1;
842 } else {
843 ch0 = &((struct smd_shared_v2 *)(shared2))->ch0;
844 ch1 = &((struct smd_shared_v2 *)(shared2))->ch1;
845 }
846
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700847 edge = &edge_to_pids[type];
848 if (edge->local_pid != edge->remote_pid) {
849 if (pid == edge->local_pid) {
Jeff Hugo00be6282012-09-07 11:24:32 -0600850 *local_ch = ch0;
851 *remote_ch = ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852 ret = 1;
853 } else if (pid == edge->remote_pid) {
Jeff Hugo00be6282012-09-07 11:24:32 -0600854 *local_ch = ch1;
855 *remote_ch = ch0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856 ret = 1;
857 }
858 }
859
860 return ret;
861}
862
Eric Holmberg17992c12012-02-29 12:54:44 -0700863/*
864 * Returns a pointer to the subsystem name or NULL if no
865 * subsystem name is available.
866 *
867 * @type - Edge definition
868 */
869const char *smd_edge_to_subsystem(uint32_t type)
870{
871 const char *subsys = NULL;
872
873 if (type < ARRAY_SIZE(edge_to_pids)) {
874 subsys = edge_to_pids[type].subsys_name;
875 if (subsys[0] == 0x0)
876 subsys = NULL;
877 }
878 return subsys;
879}
880EXPORT_SYMBOL(smd_edge_to_subsystem);
881
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700882/*
883 * Returns a pointer to the subsystem name given the
884 * remote processor ID.
Arun Kumar Neelakantama35c7a72012-10-18 15:45:15 +0530885 * subsystem is not necessarily PIL-loadable
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700886 *
887 * @pid Remote processor ID
888 * @returns Pointer to subsystem name or NULL if not found
889 */
890const char *smd_pid_to_subsystem(uint32_t pid)
891{
892 const char *subsys = NULL;
893 int i;
894
895 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
Arun Kumar Neelakantama35c7a72012-10-18 15:45:15 +0530896 if (pid == edge_to_pids[i].remote_pid) {
897 if (edge_to_pids[i].subsys_name[0] != 0x0) {
898 subsys = edge_to_pids[i].subsys_name;
899 break;
900 } else if (pid == SMD_RPM) {
901 subsys = "rpm";
902 break;
903 }
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700904 }
905 }
906
907 return subsys;
908}
909EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700910
Jeff Hugo00be6282012-09-07 11:24:32 -0600911static void smd_reset_edge(void *void_ch, unsigned new_state,
912 int is_word_access_ch)
Eric Holmberg2a563c32011-10-05 14:51:43 -0600913{
Jeff Hugo00be6282012-09-07 11:24:32 -0600914 if (is_word_access_ch) {
915 struct smd_half_channel_word_access *ch =
916 (struct smd_half_channel_word_access *)(void_ch);
917 if (ch->state != SMD_SS_CLOSED) {
918 ch->state = new_state;
919 ch->fDSR = 0;
920 ch->fCTS = 0;
921 ch->fCD = 0;
922 ch->fSTATE = 1;
923 }
924 } else {
925 struct smd_half_channel *ch =
926 (struct smd_half_channel *)(void_ch);
927 if (ch->state != SMD_SS_CLOSED) {
928 ch->state = new_state;
929 ch->fDSR = 0;
930 ch->fCTS = 0;
931 ch->fCD = 0;
932 ch->fSTATE = 1;
933 }
Eric Holmberg2a563c32011-10-05 14:51:43 -0600934 }
935}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700936
937static void smd_channel_reset_state(struct smd_alloc_elm *shared,
938 unsigned new_state, unsigned pid)
939{
940 unsigned n;
Jeff Hugo00be6282012-09-07 11:24:32 -0600941 void *shared2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700942 uint32_t type;
Jeff Hugo00be6282012-09-07 11:24:32 -0600943 void *local_ch;
944 void *remote_ch;
945 int is_word_access;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946
947 for (n = 0; n < SMD_CHANNELS; n++) {
948 if (!shared[n].ref_count)
949 continue;
950 if (!shared[n].name[0])
951 continue;
952
953 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo00be6282012-09-07 11:24:32 -0600954 is_word_access = is_word_access_ch(type);
955 if (is_word_access)
956 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
957 sizeof(struct smd_shared_v2_word_access));
958 else
959 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
960 sizeof(struct smd_shared_v2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961 if (!shared2)
962 continue;
963
Jeff Hugo00be6282012-09-07 11:24:32 -0600964 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch,
965 is_word_access))
966 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967
Eric Holmberg2a563c32011-10-05 14:51:43 -0600968 /*
969 * ModemFW is in the same subsystem as ModemSW, but has
970 * separate SMD edges that need to be reset.
971 */
972 if (pid == SMSM_MODEM &&
973 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
Jeff Hugo00be6282012-09-07 11:24:32 -0600974 &local_ch, &remote_ch, is_word_access))
975 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976 }
977}
978
979
980void smd_channel_reset(uint32_t restart_pid)
981{
982 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700983 unsigned long flags;
984
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530985 SMx_POWER_INFO("%s: starting reset\n", __func__);
Eric Holmberg50ad86f2012-09-07 13:54:31 -0600986
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700987 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
988 if (!shared) {
989 pr_err("%s: allocation table not initialized\n", __func__);
990 return;
991 }
992
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700993 /* reset SMSM entry */
994 if (smsm_info.state) {
995 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
996
Eric Holmberg351a63c2011-12-02 17:49:43 -0700997 /* restart SMSM init handshake */
998 if (restart_pid == SMSM_MODEM) {
999 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -07001000 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
1001 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -07001002 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001003
1004 /* notify SMSM processors */
1005 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -07001006 notify_modem_smsm();
1007 notify_dsp_smsm();
1008 notify_dsps_smsm();
1009 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001010 }
1011
1012 /* change all remote states to CLOSING */
1013 mutex_lock(&smd_probe_lock);
1014 spin_lock_irqsave(&smd_lock, flags);
1015 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
1016 spin_unlock_irqrestore(&smd_lock, flags);
1017 mutex_unlock(&smd_probe_lock);
1018
1019 /* notify SMD processors */
1020 mb();
1021 smd_fake_irq_handler(0);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301022 notify_modem_smd(NULL);
1023 notify_dsp_smd(NULL);
1024 notify_dsps_smd(NULL);
1025 notify_wcnss_smd(NULL);
1026 notify_rpm_smd(NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001027
1028 /* change all remote states to CLOSED */
1029 mutex_lock(&smd_probe_lock);
1030 spin_lock_irqsave(&smd_lock, flags);
1031 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
1032 spin_unlock_irqrestore(&smd_lock, flags);
1033 mutex_unlock(&smd_probe_lock);
1034
1035 /* notify SMD processors */
1036 mb();
1037 smd_fake_irq_handler(0);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301038 notify_modem_smd(NULL);
1039 notify_dsp_smd(NULL);
1040 notify_dsps_smd(NULL);
1041 notify_wcnss_smd(NULL);
1042 notify_rpm_smd(NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001043
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301044 SMx_POWER_INFO("%s: finished reset\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045}
1046
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001047/* how many bytes are available for reading */
1048static int smd_stream_read_avail(struct smd_channel *ch)
1049{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001050 return (ch->half_ch->get_head(ch->recv) -
1051 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001052}
1053
1054/* how many bytes we are free to write */
1055static int smd_stream_write_avail(struct smd_channel *ch)
1056{
Eric Holmberg424d9552013-04-05 15:23:25 -06001057 int bytes_avail;
1058
1059 bytes_avail = ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
1060 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask) + 1;
1061
1062 if (bytes_avail < SMD_FIFO_FULL_RESERVE)
1063 bytes_avail = 0;
1064 else
1065 bytes_avail -= SMD_FIFO_FULL_RESERVE;
1066 return bytes_avail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001067}
1068
1069static int smd_packet_read_avail(struct smd_channel *ch)
1070{
1071 if (ch->current_packet) {
1072 int n = smd_stream_read_avail(ch);
1073 if (n > ch->current_packet)
1074 n = ch->current_packet;
1075 return n;
1076 } else {
1077 return 0;
1078 }
1079}
1080
1081static int smd_packet_write_avail(struct smd_channel *ch)
1082{
1083 int n = smd_stream_write_avail(ch);
1084 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1085}
1086
1087static int ch_is_open(struct smd_channel *ch)
1088{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001089 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1090 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1091 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001092}
1093
1094/* provide a pointer and length to readable data in the fifo */
1095static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1096{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001097 unsigned head = ch->half_ch->get_head(ch->recv);
1098 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001099 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001100
1101 if (tail <= head)
1102 return head - tail;
1103 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001104 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001105}
1106
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107static int read_intr_blocked(struct smd_channel *ch)
1108{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001109 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001110}
1111
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001112/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1113static void ch_read_done(struct smd_channel *ch, unsigned count)
1114{
1115 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001116 ch->half_ch->set_tail(ch->recv,
1117 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001119 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001120}
1121
1122/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001123 * by smd_*_read() and update_packet_state()
1124 * will read-and-discard if the _data pointer is null
1125 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001126static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001127{
1128 void *ptr;
1129 unsigned n;
1130 unsigned char *data = _data;
1131 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001132 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001133
1134 while (len > 0) {
1135 n = ch_read_buffer(ch, &ptr);
1136 if (n == 0)
1137 break;
1138
1139 if (n > len)
1140 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001141 if (_data) {
1142 if (user_buf) {
1143 r = copy_to_user(data, ptr, n);
1144 if (r > 0) {
1145 pr_err("%s: "
1146 "copy_to_user could not copy "
1147 "%i bytes.\n",
1148 __func__,
1149 r);
1150 }
1151 } else
1152 memcpy(data, ptr, n);
1153 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001154
1155 data += n;
1156 len -= n;
1157 ch_read_done(ch, n);
1158 }
1159
1160 return orig_len - len;
1161}
1162
1163static void update_stream_state(struct smd_channel *ch)
1164{
1165 /* streams have no special state requiring updating */
1166}
1167
1168static void update_packet_state(struct smd_channel *ch)
1169{
1170 unsigned hdr[5];
1171 int r;
1172
1173 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174 while (ch->current_packet == 0) {
1175 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001177 /* don't bother unless we can get the full header */
1178 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1179 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001180
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001181 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1182 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001183
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184 ch->current_packet = hdr[0];
1185 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001186}
1187
Eric Holmberg424d9552013-04-05 15:23:25 -06001188/**
1189 * ch_write_buffer() - Provide a pointer and length for the next segment of
1190 * free space in the FIFO.
1191 * @ch: channel
1192 * @ptr: Address to pointer for the next segment write
1193 * @returns: Maximum size that can be written until the FIFO is either full
1194 * or the end of the FIFO has been reached.
1195 *
1196 * The returned pointer and length are passed to memcpy, so the next segment is
1197 * defined as either the space available between the read index (tail) and the
1198 * write index (head) or the space available to the end of the FIFO.
1199 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001200static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1201{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001202 unsigned head = ch->half_ch->get_head(ch->send);
1203 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001204 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001205
1206 if (head < tail) {
Eric Holmberg424d9552013-04-05 15:23:25 -06001207 return tail - head - SMD_FIFO_FULL_RESERVE;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001208 } else {
Eric Holmberg424d9552013-04-05 15:23:25 -06001209 if (tail < SMD_FIFO_FULL_RESERVE)
1210 return ch->fifo_size + tail - head
1211 - SMD_FIFO_FULL_RESERVE;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001212 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001213 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001214 }
1215}
1216
1217/* advace the fifo write pointer after freespace
1218 * from ch_write_buffer is filled
1219 */
1220static void ch_write_done(struct smd_channel *ch, unsigned count)
1221{
1222 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001223 ch->half_ch->set_head(ch->send,
1224 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001225 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001226 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001227}
1228
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001229static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001230{
1231 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001232 ch->half_ch->set_fDSR(ch->send, 1);
1233 ch->half_ch->set_fCTS(ch->send, 1);
1234 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001235 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001236 ch->half_ch->set_fDSR(ch->send, 0);
1237 ch->half_ch->set_fCTS(ch->send, 0);
1238 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001239 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001240 ch->half_ch->set_state(ch->send, n);
1241 ch->half_ch->set_fSTATE(ch->send, 1);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301242 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001243}
1244
1245static void do_smd_probe(void)
1246{
1247 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1248 if (shared->heap_info.free_offset != last_heap_free) {
1249 last_heap_free = shared->heap_info.free_offset;
1250 schedule_work(&probe_work);
1251 }
1252}
1253
1254static void smd_state_change(struct smd_channel *ch,
1255 unsigned last, unsigned next)
1256{
1257 ch->last_state = next;
1258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001259 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001260
1261 switch (next) {
1262 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001263 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1264 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1265 ch->half_ch->set_tail(ch->recv, 0);
1266 ch->half_ch->set_head(ch->send, 0);
1267 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268 ch_set_state(ch, SMD_SS_OPENING);
1269 }
1270 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001271 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001272 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001273 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001274 ch->notify(ch->priv, SMD_EVENT_OPEN);
1275 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001276 break;
1277 case SMD_SS_FLUSHING:
1278 case SMD_SS_RESET:
1279 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001280 break;
1281 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001282 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001283 ch_set_state(ch, SMD_SS_CLOSING);
1284 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001285 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001286 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1287 }
1288 break;
1289 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001290 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001291 list_move(&ch->ch_list,
1292 &smd_ch_to_close_list);
1293 queue_work(channel_close_wq,
1294 &finalize_channel_close_work);
1295 }
1296 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001297 }
1298}
1299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001300static void handle_smd_irq_closing_list(void)
1301{
1302 unsigned long flags;
1303 struct smd_channel *ch;
1304 struct smd_channel *index;
1305 unsigned tmp;
1306
1307 spin_lock_irqsave(&smd_lock, flags);
1308 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001309 if (ch->half_ch->get_fSTATE(ch->recv))
1310 ch->half_ch->set_fSTATE(ch->recv, 0);
1311 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001312 if (tmp != ch->last_state)
1313 smd_state_change(ch, ch->last_state, tmp);
1314 }
1315 spin_unlock_irqrestore(&smd_lock, flags);
1316}
1317
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301318static void handle_smd_irq(struct list_head *list,
1319 void (*notify)(smd_channel_t *ch))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001320{
1321 unsigned long flags;
1322 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001323 unsigned ch_flags;
1324 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001325 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001326
1327 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001328 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001330 ch_flags = 0;
1331 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001332 if (ch->half_ch->get_fHEAD(ch->recv)) {
1333 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001334 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001335 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001336 if (ch->half_ch->get_fTAIL(ch->recv)) {
1337 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001338 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001339 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001340 if (ch->half_ch->get_fSTATE(ch->recv)) {
1341 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001342 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001343 }
1344 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001345 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001346 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001347 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1348 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001349 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001350 state_change = 1;
1351 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001352 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001353 ch->update_state(ch);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301354 SMx_POWER_INFO(
1355 "SMD ch%d '%s' Data event 0x%x tx%d/rx%d %dr/%dw : %dr/%dw\n",
1356 ch->n, ch->name,
1357 ch_flags,
1358 ch->fifo_size -
1359 (smd_stream_write_avail(ch) + 1),
1360 smd_stream_read_avail(ch),
1361 ch->half_ch->get_tail(ch->send),
1362 ch->half_ch->get_head(ch->send),
1363 ch->half_ch->get_tail(ch->recv),
1364 ch->half_ch->get_head(ch->recv)
1365 );
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001366 ch->notify(ch->priv, SMD_EVENT_DATA);
1367 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001368 if (ch_flags & 0x4 && !state_change) {
1369 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1370 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001371 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001372 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001373 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001374 spin_unlock_irqrestore(&smd_lock, flags);
1375 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001376}
1377
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301378static inline void log_irq(uint32_t subsystem)
1379{
1380 const char *subsys = smd_edge_to_subsystem(subsystem);
1381
Jay Chokshi83b4f6132013-02-14 16:20:56 -08001382 (void) subsys;
1383
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301384 SMx_POWER_INFO("SMD Int %s->Apps\n", subsys);
1385}
1386
Brian Swetland37521a32009-07-01 18:30:47 -07001387static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001388{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301389 log_irq(SMD_APPS_MODEM);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001390 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001391 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001392 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001393 return IRQ_HANDLED;
1394}
1395
1396static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1397{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301398 log_irq(SMD_APPS_QDSP);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001399 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001400 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001401 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001402 return IRQ_HANDLED;
1403}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001404
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001405static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1406{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301407 log_irq(SMD_APPS_DSPS);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001408 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001409 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1410 handle_smd_irq_closing_list();
1411 return IRQ_HANDLED;
1412}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001413
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001414static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1415{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301416 log_irq(SMD_APPS_WCNSS);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001417 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1419 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001420 return IRQ_HANDLED;
1421}
1422
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001423static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1424{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301425 log_irq(SMD_APPS_RPM);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001426 ++interrupt_stats[SMD_RPM].smd_in_count;
1427 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1428 handle_smd_irq_closing_list();
1429 return IRQ_HANDLED;
1430}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001431
1432static void smd_fake_irq_handler(unsigned long arg)
1433{
Brian Swetland37521a32009-07-01 18:30:47 -07001434 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1435 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001436 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1437 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001438 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001439 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001440}
1441
1442static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1443
Brian Swetland37521a32009-07-01 18:30:47 -07001444static inline int smd_need_int(struct smd_channel *ch)
1445{
1446 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001447 if (ch->half_ch->get_fHEAD(ch->recv) ||
1448 ch->half_ch->get_fTAIL(ch->recv) ||
1449 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001450 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001451 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001452 return 1;
1453 }
1454 return 0;
1455}
1456
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001457void smd_sleep_exit(void)
1458{
1459 unsigned long flags;
1460 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001461 int need_int = 0;
1462
1463 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001464 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1465 if (smd_need_int(ch)) {
1466 need_int = 1;
1467 break;
1468 }
1469 }
1470 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1471 if (smd_need_int(ch)) {
1472 need_int = 1;
1473 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001474 }
1475 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001476 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1477 if (smd_need_int(ch)) {
1478 need_int = 1;
1479 break;
1480 }
1481 }
1482 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1483 if (smd_need_int(ch)) {
1484 need_int = 1;
1485 break;
1486 }
1487 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001488 spin_unlock_irqrestore(&smd_lock, flags);
1489 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001490
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001491 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001492 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001493 tasklet_schedule(&smd_fake_irq_tasklet);
1494 }
1495}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001497
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001498static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001499{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001500 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1501 return 0;
1502 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001503 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001504
1505 /* for cases where xfer type is 0 */
1506 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001507 return 0;
1508
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001509 /* for cases where xfer type is 0 */
1510 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1511 return 0;
1512
1513 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001514 return 1;
1515 else
1516 return 0;
1517}
1518
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001519static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1520 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001521{
1522 void *ptr;
1523 const unsigned char *buf = _data;
1524 unsigned xfer;
1525 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001526 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001527
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001528 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001529 if (len < 0)
1530 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001531 else if (len == 0)
1532 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001533
1534 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001535 if (!ch_is_open(ch)) {
1536 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001537 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001538 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001539 if (xfer > len)
1540 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001541 if (user_buf) {
1542 r = copy_from_user(ptr, buf, xfer);
1543 if (r > 0) {
1544 pr_err("%s: "
1545 "copy_from_user could not copy %i "
1546 "bytes.\n",
1547 __func__,
1548 r);
1549 }
1550 } else
1551 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001552 ch_write_done(ch, xfer);
1553 len -= xfer;
1554 buf += xfer;
1555 if (len == 0)
1556 break;
1557 }
1558
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001559 if (orig_len - len)
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301560 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001561
1562 return orig_len - len;
1563}
1564
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001565static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1566 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001567{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001568 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001569 unsigned hdr[5];
1570
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001571 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001572 if (len < 0)
1573 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001574 else if (len == 0)
1575 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001576
1577 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1578 return -ENOMEM;
1579
1580 hdr[0] = len;
1581 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1582
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001583
1584 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1585 if (ret < 0 || ret != sizeof(hdr)) {
1586 SMD_DBG("%s failed to write pkt header: "
1587 "%d returned\n", __func__, ret);
1588 return -1;
1589 }
1590
1591
1592 ret = smd_stream_write(ch, _data, len, user_buf);
1593 if (ret < 0 || ret != len) {
1594 SMD_DBG("%s failed to write pkt data: "
1595 "%d returned\n", __func__, ret);
1596 return ret;
1597 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001598
1599 return len;
1600}
1601
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001602static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001603{
1604 int r;
1605
1606 if (len < 0)
1607 return -EINVAL;
1608
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001609 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001610 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001611 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301612 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001613
1614 return r;
1615}
1616
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001617static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001618{
1619 unsigned long flags;
1620 int r;
1621
1622 if (len < 0)
1623 return -EINVAL;
1624
1625 if (len > ch->current_packet)
1626 len = ch->current_packet;
1627
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001628 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001629 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001630 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301631 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001632
1633 spin_lock_irqsave(&smd_lock, flags);
1634 ch->current_packet -= r;
1635 update_packet_state(ch);
1636 spin_unlock_irqrestore(&smd_lock, flags);
1637
1638 return r;
1639}
1640
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001641static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1642 int user_buf)
1643{
1644 int r;
1645
1646 if (len < 0)
1647 return -EINVAL;
1648
1649 if (len > ch->current_packet)
1650 len = ch->current_packet;
1651
1652 r = ch_read(ch, data, len, user_buf);
1653 if (r > 0)
1654 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301655 ch->notify_other_cpu(ch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001656
1657 ch->current_packet -= r;
1658 update_packet_state(ch);
1659
1660 return r;
1661}
1662
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301663#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001664static int smd_alloc_v2(struct smd_channel *ch)
1665{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001666 void *buffer;
1667 unsigned buffer_sz;
1668
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001669 if (is_word_access_ch(ch->type)) {
1670 struct smd_shared_v2_word_access *shared2;
1671 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1672 sizeof(*shared2));
1673 if (!shared2) {
1674 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1675 return -EINVAL;
1676 }
1677 ch->send = &shared2->ch0;
1678 ch->recv = &shared2->ch1;
1679 } else {
1680 struct smd_shared_v2 *shared2;
1681 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1682 sizeof(*shared2));
1683 if (!shared2) {
1684 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1685 return -EINVAL;
1686 }
1687 ch->send = &shared2->ch0;
1688 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001689 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001690 ch->half_ch = get_half_ch_funcs(ch->type);
1691
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001692 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1693 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301694 SMD_INFO("smem_get_entry failed\n");
1695 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001696 }
1697
1698 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301699 if (buffer_sz & (buffer_sz - 1)) {
1700 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1701 return -EINVAL;
1702 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001703 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001704 ch->send_data = buffer;
1705 ch->recv_data = buffer + buffer_sz;
1706 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001707
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001708 return 0;
1709}
1710
1711static int smd_alloc_v1(struct smd_channel *ch)
1712{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301713 return -EINVAL;
1714}
1715
1716#else /* define v1 for older targets */
1717static int smd_alloc_v2(struct smd_channel *ch)
1718{
1719 return -EINVAL;
1720}
1721
1722static int smd_alloc_v1(struct smd_channel *ch)
1723{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001724 struct smd_shared_v1 *shared1;
1725 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1726 if (!shared1) {
1727 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301728 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001729 }
1730 ch->send = &shared1->ch0;
1731 ch->recv = &shared1->ch1;
1732 ch->send_data = shared1->data0;
1733 ch->recv_data = shared1->data1;
1734 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001735 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001736 return 0;
1737}
1738
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301739#endif
1740
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001741static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001742{
1743 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001744
1745 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1746 if (ch == 0) {
1747 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001748 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001749 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001750 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001751 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001752
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001753 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001754 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001755 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001756 }
1757
1758 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001759
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001760 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001761 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001762 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001763 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001764 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001765 else if (ch->type == SMD_APPS_DSPS)
1766 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001767 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001768 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001769 else if (ch->type == SMD_APPS_RPM)
1770 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001771
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001772 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001773 ch->read = smd_packet_read;
1774 ch->write = smd_packet_write;
1775 ch->read_avail = smd_packet_read_avail;
1776 ch->write_avail = smd_packet_write_avail;
1777 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001778 ch->read_from_cb = smd_packet_read_from_cb;
1779 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001780 } else {
1781 ch->read = smd_stream_read;
1782 ch->write = smd_stream_write;
1783 ch->read_avail = smd_stream_read_avail;
1784 ch->write_avail = smd_stream_write_avail;
1785 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001786 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001787 }
1788
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001789 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1790 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001791
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001792 ch->pdev.name = ch->name;
1793 ch->pdev.id = ch->type;
1794
1795 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1796 ch->name, ch->n);
1797
1798 mutex_lock(&smd_creation_mutex);
1799 list_add(&ch->ch_list, &smd_ch_closed_list);
1800 mutex_unlock(&smd_creation_mutex);
1801
1802 platform_device_register(&ch->pdev);
1803 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1804 /* create a platform driver to be used by smd_tty driver
1805 * so that it can access the loopback port
1806 */
1807 loopback_tty_pdev.id = ch->type;
1808 platform_device_register(&loopback_tty_pdev);
1809 }
1810 return 0;
1811}
1812
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301813static inline void notify_loopback_smd(smd_channel_t *ch_notif)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001814{
1815 unsigned long flags;
1816 struct smd_channel *ch;
1817
1818 spin_lock_irqsave(&smd_lock, flags);
1819 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1820 ch->notify(ch->priv, SMD_EVENT_DATA);
1821 }
1822 spin_unlock_irqrestore(&smd_lock, flags);
1823}
1824
1825static int smd_alloc_loopback_channel(void)
1826{
1827 static struct smd_half_channel smd_loopback_ctl;
1828 static char smd_loopback_data[SMD_BUF_SIZE];
1829 struct smd_channel *ch;
1830
1831 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1832 if (ch == 0) {
1833 pr_err("%s: out of memory\n", __func__);
1834 return -1;
1835 }
1836 ch->n = SMD_LOOPBACK_CID;
1837
1838 ch->send = &smd_loopback_ctl;
1839 ch->recv = &smd_loopback_ctl;
1840 ch->send_data = smd_loopback_data;
1841 ch->recv_data = smd_loopback_data;
1842 ch->fifo_size = SMD_BUF_SIZE;
1843
1844 ch->fifo_mask = ch->fifo_size - 1;
1845 ch->type = SMD_LOOPBACK_TYPE;
1846 ch->notify_other_cpu = notify_loopback_smd;
1847
1848 ch->read = smd_stream_read;
1849 ch->write = smd_stream_write;
1850 ch->read_avail = smd_stream_read_avail;
1851 ch->write_avail = smd_stream_write_avail;
1852 ch->update_state = update_stream_state;
1853 ch->read_from_cb = smd_stream_read;
1854
1855 memset(ch->name, 0, 20);
1856 memcpy(ch->name, "local_loopback", 14);
1857
1858 ch->pdev.name = ch->name;
1859 ch->pdev.id = ch->type;
1860
1861 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001862
1863 mutex_lock(&smd_creation_mutex);
1864 list_add(&ch->ch_list, &smd_ch_closed_list);
1865 mutex_unlock(&smd_creation_mutex);
1866
1867 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001868 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001869}
1870
1871static void do_nothing_notify(void *priv, unsigned flags)
1872{
1873}
1874
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001875static void finalize_channel_close_fn(struct work_struct *work)
1876{
1877 unsigned long flags;
1878 struct smd_channel *ch;
1879 struct smd_channel *index;
1880
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001881 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001882 spin_lock_irqsave(&smd_lock, flags);
1883 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1884 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001885 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001886 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1887 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001888 }
1889 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001890 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001891}
1892
1893struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001894{
1895 struct smd_channel *ch;
1896
1897 mutex_lock(&smd_creation_mutex);
1898 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001899 if (!strcmp(name, ch->name) &&
1900 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001901 list_del(&ch->ch_list);
1902 mutex_unlock(&smd_creation_mutex);
1903 return ch;
1904 }
1905 }
1906 mutex_unlock(&smd_creation_mutex);
1907
1908 return NULL;
1909}
1910
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001911int smd_named_open_on_edge(const char *name, uint32_t edge,
1912 smd_channel_t **_ch,
1913 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001914{
1915 struct smd_channel *ch;
1916 unsigned long flags;
1917
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001918 if (smd_initialized == 0 && !smd_edge_inited(edge)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001919 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001920 return -ENODEV;
1921 }
1922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001923 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1924
1925 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001926 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001927 /* check closing list for port */
1928 spin_lock_irqsave(&smd_lock, flags);
1929 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1930 if (!strncmp(name, ch->name, 20) &&
1931 (edge == ch->type)) {
1932 /* channel exists, but is being closed */
1933 spin_unlock_irqrestore(&smd_lock, flags);
1934 return -EAGAIN;
1935 }
1936 }
1937
1938 /* check closing workqueue list for port */
1939 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1940 if (!strncmp(name, ch->name, 20) &&
1941 (edge == ch->type)) {
1942 /* channel exists, but is being closed */
1943 spin_unlock_irqrestore(&smd_lock, flags);
1944 return -EAGAIN;
1945 }
1946 }
1947 spin_unlock_irqrestore(&smd_lock, flags);
1948
1949 /* one final check to handle closing->closed race condition */
1950 ch = smd_get_channel(name, edge);
1951 if (!ch)
1952 return -ENODEV;
1953 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001954
1955 if (notify == 0)
1956 notify = do_nothing_notify;
1957
1958 ch->notify = notify;
1959 ch->current_packet = 0;
1960 ch->last_state = SMD_SS_CLOSED;
1961 ch->priv = priv;
1962
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001963 if (edge == SMD_LOOPBACK_TYPE) {
1964 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001965 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1966 ch->half_ch->set_fDSR(ch->send, 1);
1967 ch->half_ch->set_fCTS(ch->send, 1);
1968 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001969 }
1970
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001971 *_ch = ch;
1972
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001973 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1974
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001975 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001976 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001977 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001978 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001979 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001980 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1981 list_add(&ch->ch_list, &smd_ch_list_dsps);
1982 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1983 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001984 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1985 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001986 else
1987 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001988
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001989 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1990
1991 if (edge != SMD_LOOPBACK_TYPE)
1992 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1993
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001994 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001995
1996 return 0;
1997}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001998EXPORT_SYMBOL(smd_named_open_on_edge);
1999
2000
2001int smd_open(const char *name, smd_channel_t **_ch,
2002 void *priv, void (*notify)(void *, unsigned))
2003{
2004 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
2005 notify);
2006}
2007EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002008
2009int smd_close(smd_channel_t *ch)
2010{
2011 unsigned long flags;
2012
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002013 if (ch == 0)
2014 return -1;
2015
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002016 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002017
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018 spin_lock_irqsave(&smd_lock, flags);
2019 list_del(&ch->ch_list);
2020 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002021 ch->half_ch->set_fDSR(ch->send, 0);
2022 ch->half_ch->set_fCTS(ch->send, 0);
2023 ch->half_ch->set_fCD(ch->send, 0);
2024 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002025 } else
2026 ch_set_state(ch, SMD_SS_CLOSED);
2027
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002028 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002029 list_add(&ch->ch_list, &smd_ch_closing_list);
2030 spin_unlock_irqrestore(&smd_lock, flags);
2031 } else {
2032 spin_unlock_irqrestore(&smd_lock, flags);
2033 ch->notify = do_nothing_notify;
2034 mutex_lock(&smd_creation_mutex);
2035 list_add(&ch->ch_list, &smd_ch_closed_list);
2036 mutex_unlock(&smd_creation_mutex);
2037 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002038
2039 return 0;
2040}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002041EXPORT_SYMBOL(smd_close);
2042
2043int smd_write_start(smd_channel_t *ch, int len)
2044{
2045 int ret;
2046 unsigned hdr[5];
2047
2048 if (!ch) {
2049 pr_err("%s: Invalid channel specified\n", __func__);
2050 return -ENODEV;
2051 }
2052 if (!ch->is_pkt_ch) {
2053 pr_err("%s: non-packet channel specified\n", __func__);
2054 return -EACCES;
2055 }
2056 if (len < 1) {
2057 pr_err("%s: invalid length: %d\n", __func__, len);
2058 return -EINVAL;
2059 }
2060
2061 if (ch->pending_pkt_sz) {
2062 pr_err("%s: packet of size: %d in progress\n", __func__,
2063 ch->pending_pkt_sz);
2064 return -EBUSY;
2065 }
2066 ch->pending_pkt_sz = len;
2067
2068 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
2069 ch->pending_pkt_sz = 0;
2070 SMD_DBG("%s: no space to write packet header\n", __func__);
2071 return -EAGAIN;
2072 }
2073
2074 hdr[0] = len;
2075 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
2076
2077
2078 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
2079 if (ret < 0 || ret != sizeof(hdr)) {
2080 ch->pending_pkt_sz = 0;
2081 pr_err("%s: packet header failed to write\n", __func__);
2082 return -EPERM;
2083 }
2084 return 0;
2085}
2086EXPORT_SYMBOL(smd_write_start);
2087
2088int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
2089{
2090 int bytes_written;
2091
2092 if (!ch) {
2093 pr_err("%s: Invalid channel specified\n", __func__);
2094 return -ENODEV;
2095 }
2096 if (len < 1) {
2097 pr_err("%s: invalid length: %d\n", __func__, len);
2098 return -EINVAL;
2099 }
2100
2101 if (!ch->pending_pkt_sz) {
2102 pr_err("%s: no transaction in progress\n", __func__);
2103 return -ENOEXEC;
2104 }
2105 if (ch->pending_pkt_sz - len < 0) {
2106 pr_err("%s: segment of size: %d will make packet go over "
2107 "length\n", __func__, len);
2108 return -EINVAL;
2109 }
2110
2111 bytes_written = smd_stream_write(ch, data, len, user_buf);
2112
2113 ch->pending_pkt_sz -= bytes_written;
2114
2115 return bytes_written;
2116}
2117EXPORT_SYMBOL(smd_write_segment);
2118
2119int smd_write_end(smd_channel_t *ch)
2120{
2121
2122 if (!ch) {
2123 pr_err("%s: Invalid channel specified\n", __func__);
2124 return -ENODEV;
2125 }
2126 if (ch->pending_pkt_sz) {
2127 pr_err("%s: current packet not completely written\n", __func__);
2128 return -E2BIG;
2129 }
2130
2131 return 0;
2132}
2133EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002134
Jeff Hugo44fd9832013-04-04 15:56:21 -06002135int smd_write_segment_avail(smd_channel_t *ch)
2136{
2137 int n;
2138
2139 if (!ch) {
2140 pr_err("%s: Invalid channel specified\n", __func__);
2141 return -ENODEV;
2142 }
2143 if (!ch->is_pkt_ch) {
2144 pr_err("%s: non-packet channel specified\n", __func__);
2145 return -ENODEV;
2146 }
2147
2148 n = smd_stream_write_avail(ch);
2149
2150 /* pkt hdr already written, no need to reserve space for it */
2151 if (ch->pending_pkt_sz)
2152 return n;
2153
2154 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
2155}
2156EXPORT_SYMBOL(smd_write_segment_avail);
2157
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002158int smd_read(smd_channel_t *ch, void *data, int len)
2159{
Jack Pham1b236d12012-03-19 15:27:18 -07002160 if (!ch) {
2161 pr_err("%s: Invalid channel specified\n", __func__);
2162 return -ENODEV;
2163 }
2164
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002165 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002166}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002167EXPORT_SYMBOL(smd_read);
2168
2169int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2170{
Jack Pham1b236d12012-03-19 15:27:18 -07002171 if (!ch) {
2172 pr_err("%s: Invalid channel specified\n", __func__);
2173 return -ENODEV;
2174 }
2175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002176 return ch->read(ch, data, len, 1);
2177}
2178EXPORT_SYMBOL(smd_read_user_buffer);
2179
2180int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2181{
Jack Pham1b236d12012-03-19 15:27:18 -07002182 if (!ch) {
2183 pr_err("%s: Invalid channel specified\n", __func__);
2184 return -ENODEV;
2185 }
2186
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002187 return ch->read_from_cb(ch, data, len, 0);
2188}
2189EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002190
2191int smd_write(smd_channel_t *ch, const void *data, int len)
2192{
Jack Pham1b236d12012-03-19 15:27:18 -07002193 if (!ch) {
2194 pr_err("%s: Invalid channel specified\n", __func__);
2195 return -ENODEV;
2196 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002198 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002199}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002200EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002202int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002203{
Jack Pham1b236d12012-03-19 15:27:18 -07002204 if (!ch) {
2205 pr_err("%s: Invalid channel specified\n", __func__);
2206 return -ENODEV;
2207 }
2208
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002209 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002210}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002211EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002212
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002213int smd_read_avail(smd_channel_t *ch)
2214{
Jack Pham1b236d12012-03-19 15:27:18 -07002215 if (!ch) {
2216 pr_err("%s: Invalid channel specified\n", __func__);
2217 return -ENODEV;
2218 }
2219
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002220 return ch->read_avail(ch);
2221}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002222EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002223
2224int smd_write_avail(smd_channel_t *ch)
2225{
Jack Pham1b236d12012-03-19 15:27:18 -07002226 if (!ch) {
2227 pr_err("%s: Invalid channel specified\n", __func__);
2228 return -ENODEV;
2229 }
2230
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002231 return ch->write_avail(ch);
2232}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002233EXPORT_SYMBOL(smd_write_avail);
2234
2235void smd_enable_read_intr(smd_channel_t *ch)
2236{
2237 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002238 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002239}
2240EXPORT_SYMBOL(smd_enable_read_intr);
2241
2242void smd_disable_read_intr(smd_channel_t *ch)
2243{
2244 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002245 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002246}
2247EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002248
Eric Holmbergdeace152012-07-25 12:17:11 -06002249/**
2250 * Enable/disable receive interrupts for the remote processor used by a
2251 * particular channel.
2252 * @ch: open channel handle to use for the edge
2253 * @mask: 1 = mask interrupts; 0 = unmask interrupts
2254 * @returns: 0 for success; < 0 for failure
2255 *
2256 * Note that this enables/disables all interrupts from the remote subsystem for
2257 * all channels. As such, it should be used with care and only for specific
2258 * use cases such as power-collapse sequencing.
2259 */
2260int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
2261{
2262 struct irq_chip *irq_chip;
2263 struct irq_data *irq_data;
2264 struct interrupt_config_item *int_cfg;
2265
2266 if (!ch)
2267 return -EINVAL;
2268
2269 if (ch->type >= ARRAY_SIZE(edge_to_pids))
2270 return -ENODEV;
2271
2272 int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
2273
2274 if (int_cfg->irq_id < 0)
2275 return -ENODEV;
2276
2277 irq_chip = irq_get_chip(int_cfg->irq_id);
2278 if (!irq_chip)
2279 return -ENODEV;
2280
2281 irq_data = irq_get_irq_data(int_cfg->irq_id);
2282 if (!irq_data)
2283 return -ENODEV;
2284
2285 if (mask) {
2286 SMx_POWER_INFO("SMD Masking interrupts from %s\n",
2287 edge_to_pids[ch->type].subsys_name);
2288 irq_chip->irq_mask(irq_data);
2289 } else {
2290 SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
2291 edge_to_pids[ch->type].subsys_name);
2292 irq_chip->irq_unmask(irq_data);
2293 }
2294
2295 return 0;
2296}
2297EXPORT_SYMBOL(smd_mask_receive_interrupt);
2298
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002299int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2300{
2301 return -1;
2302}
2303
2304int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2305{
2306 return -1;
2307}
2308
2309int smd_cur_packet_size(smd_channel_t *ch)
2310{
Jack Pham1b236d12012-03-19 15:27:18 -07002311 if (!ch) {
2312 pr_err("%s: Invalid channel specified\n", __func__);
2313 return -ENODEV;
2314 }
2315
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002316 return ch->current_packet;
2317}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002318EXPORT_SYMBOL(smd_cur_packet_size);
2319
2320int smd_tiocmget(smd_channel_t *ch)
2321{
Jack Pham1b236d12012-03-19 15:27:18 -07002322 if (!ch) {
2323 pr_err("%s: Invalid channel specified\n", __func__);
2324 return -ENODEV;
2325 }
2326
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002327 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2328 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2329 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2330 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2331 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2332 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002333}
2334EXPORT_SYMBOL(smd_tiocmget);
2335
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002336/* this api will be called while holding smd_lock */
2337int
2338smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002339{
Jack Pham1b236d12012-03-19 15:27:18 -07002340 if (!ch) {
2341 pr_err("%s: Invalid channel specified\n", __func__);
2342 return -ENODEV;
2343 }
2344
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002345 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002346 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002347
2348 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002349 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002350
2351 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002352 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002353
2354 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002355 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002356
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002357 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002358 barrier();
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05302359 ch->notify_other_cpu(ch);
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002360
2361 return 0;
2362}
2363EXPORT_SYMBOL(smd_tiocmset_from_cb);
2364
2365int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2366{
2367 unsigned long flags;
2368
Jack Pham1b236d12012-03-19 15:27:18 -07002369 if (!ch) {
2370 pr_err("%s: Invalid channel specified\n", __func__);
2371 return -ENODEV;
2372 }
2373
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002374 spin_lock_irqsave(&smd_lock, flags);
2375 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002376 spin_unlock_irqrestore(&smd_lock, flags);
2377
2378 return 0;
2379}
2380EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002381
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002382int smd_is_pkt_avail(smd_channel_t *ch)
2383{
Jeff Hugoa8549f12012-08-13 20:36:18 -06002384 unsigned long flags;
2385
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002386 if (!ch || !ch->is_pkt_ch)
2387 return -EINVAL;
2388
2389 if (ch->current_packet)
2390 return 1;
2391
Jeff Hugoa8549f12012-08-13 20:36:18 -06002392 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002393 update_packet_state(ch);
Jeff Hugoa8549f12012-08-13 20:36:18 -06002394 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002395
2396 return ch->current_packet ? 1 : 0;
2397}
2398EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002400static int smsm_cb_init(void)
2401{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002402 struct smsm_state_info *state_info;
2403 int n;
2404 int ret = 0;
2405
2406 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2407 GFP_KERNEL);
2408
2409 if (!smsm_states) {
2410 pr_err("%s: SMSM init failed\n", __func__);
2411 return -ENOMEM;
2412 }
2413
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002414 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2415 if (!smsm_cb_wq) {
2416 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2417 kfree(smsm_states);
2418 return -EFAULT;
2419 }
2420
Eric Holmbergc8002902011-09-16 13:55:57 -06002421 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002422 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2423 state_info = &smsm_states[n];
2424 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002425 state_info->intr_mask_set = 0x0;
2426 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002427 INIT_LIST_HEAD(&state_info->callbacks);
2428 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002429 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002430
2431 return ret;
2432}
2433
2434static int smsm_init(void)
2435{
2436 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2437 int i;
2438 struct smsm_size_info_type *smsm_size_info;
Eric Holmberge5266d32013-02-25 18:29:27 -07002439 unsigned long flags;
2440 unsigned long j_start;
Jeff Hugo7cc06b12013-06-17 16:13:18 -06002441 static int first = 1;
2442 remote_spinlock_t *remote_spinlock;
2443
2444 if (!first)
2445 return 0;
2446 first = 0;
Eric Holmberge5266d32013-02-25 18:29:27 -07002447
2448 /* Verify that remote spinlock is not deadlocked */
Jeff Hugo7cc06b12013-06-17 16:13:18 -06002449 remote_spinlock = smem_get_remote_spinlock();
Eric Holmberge5266d32013-02-25 18:29:27 -07002450 j_start = jiffies;
Jeff Hugo7cc06b12013-06-17 16:13:18 -06002451 while (!remote_spin_trylock_irqsave(remote_spinlock, flags)) {
Eric Holmberge5266d32013-02-25 18:29:27 -07002452 if (jiffies_to_msecs(jiffies - j_start) > RSPIN_INIT_WAIT_MS) {
2453 panic("%s: Remote processor %d will not release spinlock\n",
Jeff Hugo7cc06b12013-06-17 16:13:18 -06002454 __func__, remote_spin_owner(remote_spinlock));
Eric Holmberge5266d32013-02-25 18:29:27 -07002455 }
2456 }
Jeff Hugo7cc06b12013-06-17 16:13:18 -06002457 remote_spin_unlock_irqrestore(remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002459 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2460 sizeof(struct smsm_size_info_type));
2461 if (smsm_size_info) {
2462 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2463 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2464 }
2465
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002466 i = kfifo_alloc(&smsm_snapshot_fifo,
2467 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2468 GFP_KERNEL);
2469 if (i) {
2470 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2471 return i;
2472 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002473 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2474 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002475
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002476 if (!smsm_info.state) {
2477 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2478 SMSM_NUM_ENTRIES *
2479 sizeof(uint32_t));
2480
2481 if (smsm_info.state) {
2482 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2483 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2484 __raw_writel(0, \
2485 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2486 }
2487 }
2488
2489 if (!smsm_info.intr_mask) {
2490 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2491 SMSM_NUM_ENTRIES *
2492 SMSM_NUM_HOSTS *
2493 sizeof(uint32_t));
2494
Eric Holmberge8a39322012-04-03 15:14:02 -06002495 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002496 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002497 __raw_writel(0x0,
2498 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2499
2500 /* Configure legacy modem bits */
2501 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2502 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2503 SMSM_APPS));
2504 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002505 }
2506
2507 if (!smsm_info.intr_mux)
2508 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2509 SMSM_NUM_INTR_MUX *
2510 sizeof(uint32_t));
2511
2512 i = smsm_cb_init();
2513 if (i)
2514 return i;
2515
2516 wmb();
Eric Holmberg144c2de2012-10-04 13:37:28 -06002517
2518 smsm_pm_notifier(&smsm_pm_nb, PM_POST_SUSPEND, NULL);
2519 i = register_pm_notifier(&smsm_pm_nb);
2520 if (i)
2521 pr_err("%s: power state notif error %d\n", __func__, i);
2522
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002523 return 0;
2524}
2525
2526void smsm_reset_modem(unsigned mode)
2527{
2528 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2529 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2530 } else if (mode == SMSM_MODEM_WAIT) {
2531 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2532 } else { /* reset_mode is SMSM_RESET or default */
2533 mode = SMSM_RESET;
2534 }
2535
2536 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2537}
2538EXPORT_SYMBOL(smsm_reset_modem);
2539
2540void smsm_reset_modem_cont(void)
2541{
2542 unsigned long flags;
2543 uint32_t state;
2544
2545 if (!smsm_info.state)
2546 return;
2547
2548 spin_lock_irqsave(&smem_lock, flags);
2549 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2550 & ~SMSM_MODEM_WAIT;
2551 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2552 wmb();
2553 spin_unlock_irqrestore(&smem_lock, flags);
2554}
2555EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002556
Eric Holmbergda31d042012-03-28 14:01:02 -06002557static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002558{
2559 int n;
2560 uint32_t new_state;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002561 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002562 int ret;
2563
2564 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002565 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002566 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2567 return;
2568 }
2569
Eric Holmberg96b55f62012-04-03 19:10:46 -06002570 /*
2571 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2572 * following sequence must be followed:
2573 * 1) increment snapshot count
2574 * 2) insert data into FIFO
2575 *
2576 * Potentially in parallel, the worker:
2577 * a) verifies >= 1 snapshots are in FIFO
2578 * b) processes snapshot
2579 * c) decrements reference count
2580 *
2581 * This order ensures that 1 will always occur before abc.
2582 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002583 if (use_wakelock) {
2584 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2585 if (smsm_snapshot_count == 0) {
2586 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2587 wake_lock(&smsm_snapshot_wakelock);
2588 }
2589 ++smsm_snapshot_count;
2590 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2591 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002592
2593 /* queue state entries */
2594 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2595 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2596
2597 ret = kfifo_in(&smsm_snapshot_fifo,
2598 &new_state, sizeof(new_state));
2599 if (ret != sizeof(new_state)) {
2600 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2601 goto restore_snapshot_count;
2602 }
2603 }
2604
2605 /* queue wakelock usage flag */
2606 ret = kfifo_in(&smsm_snapshot_fifo,
2607 &use_wakelock, sizeof(use_wakelock));
2608 if (ret != sizeof(use_wakelock)) {
2609 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2610 goto restore_snapshot_count;
2611 }
2612
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002613 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002614 return;
2615
2616restore_snapshot_count:
2617 if (use_wakelock) {
2618 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2619 if (smsm_snapshot_count) {
2620 --smsm_snapshot_count;
2621 if (smsm_snapshot_count == 0) {
2622 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2623 wake_unlock(&smsm_snapshot_wakelock);
2624 }
2625 } else {
2626 pr_err("%s: invalid snapshot count\n", __func__);
2627 }
2628 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2629 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002630}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002631
2632static irqreturn_t smsm_irq_handler(int irq, void *data)
2633{
2634 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002635
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002636 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002637 uint32_t mux_val;
2638 static uint32_t prev_smem_q6_apps_smsm;
2639
2640 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2641 mux_val = __raw_readl(
2642 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2643 if (mux_val != prev_smem_q6_apps_smsm)
2644 prev_smem_q6_apps_smsm = mux_val;
2645 }
2646
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002647 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002648 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002649 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002650 return IRQ_HANDLED;
2651 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002652
2653 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002654 if (!smsm_info.state) {
2655 SMSM_INFO("<SM NO STATE>\n");
2656 } else {
2657 unsigned old_apps, apps;
2658 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002659
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002660 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002661
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002662 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2663 if (apps & SMSM_RESET) {
2664 /* If we get an interrupt and the apps SMSM_RESET
2665 bit is already set, the modem is acking the
2666 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002667 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302668 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002669 /* Issue a fake irq to handle any
2670 * smd state changes during reset
2671 */
2672 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002673
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002674 /* queue modem restart notify chain */
2675 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002676
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002677 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002678 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302679 if (!disable_smsm_reset_handshake) {
2680 apps |= SMSM_RESET;
2681 flush_cache_all();
2682 outer_flush_all();
2683 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002684 modem_queue_start_reset_notify();
2685
2686 } else if (modm & SMSM_INIT) {
2687 if (!(apps & SMSM_INIT)) {
2688 apps |= SMSM_INIT;
2689 modem_queue_smsm_init_notify();
2690 }
2691
2692 if (modm & SMSM_SMDINIT)
2693 apps |= SMSM_SMDINIT;
2694 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2695 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2696 apps |= SMSM_RUN;
2697 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2698 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2699 modem_queue_start_reset_notify();
2700 }
2701
2702 if (old_apps != apps) {
2703 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2704 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2705 do_smd_probe();
2706 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2707 }
2708
Eric Holmbergda31d042012-03-28 14:01:02 -06002709 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002710 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002711 spin_unlock_irqrestore(&smem_lock, flags);
2712 return IRQ_HANDLED;
2713}
2714
Eric Holmberg98c6c642012-02-24 11:29:35 -07002715static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002716{
Eric Holmberg98c6c642012-02-24 11:29:35 -07002717 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002718 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002719 return smsm_irq_handler(irq, data);
2720}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002721
Eric Holmberg98c6c642012-02-24 11:29:35 -07002722static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2723{
2724 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002725 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002726 return smsm_irq_handler(irq, data);
2727}
2728
2729static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2730{
2731 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002732 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002733 return smsm_irq_handler(irq, data);
2734}
2735
2736static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2737{
2738 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002739 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002740 return smsm_irq_handler(irq, data);
2741}
2742
Eric Holmberge8a39322012-04-03 15:14:02 -06002743/*
2744 * Changes the global interrupt mask. The set and clear masks are re-applied
2745 * every time the global interrupt mask is updated for callback registration
2746 * and de-registration.
2747 *
2748 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2749 * mask and the set mask, the result will be that the interrupt is set.
2750 *
2751 * @smsm_entry SMSM entry to change
2752 * @clear_mask 1 = clear bit, 0 = no-op
2753 * @set_mask 1 = set bit, 0 = no-op
2754 *
2755 * @returns 0 for success, < 0 for error
2756 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002757int smsm_change_intr_mask(uint32_t smsm_entry,
2758 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002759{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002760 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002761 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002762
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002763 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2764 pr_err("smsm_change_state: Invalid entry %d\n",
2765 smsm_entry);
2766 return -EINVAL;
2767 }
2768
2769 if (!smsm_info.intr_mask) {
2770 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002771 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002772 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002773
2774 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002775 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2776 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002777
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002778 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2779 new_mask = (old_mask & ~clear_mask) | set_mask;
2780 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002781
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002782 wmb();
2783 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002784
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002785 return 0;
2786}
2787EXPORT_SYMBOL(smsm_change_intr_mask);
2788
2789int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2790{
2791 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2792 pr_err("smsm_change_state: Invalid entry %d\n",
2793 smsm_entry);
2794 return -EINVAL;
2795 }
2796
2797 if (!smsm_info.intr_mask) {
2798 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2799 return -EIO;
2800 }
2801
2802 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2803 return 0;
2804}
2805EXPORT_SYMBOL(smsm_get_intr_mask);
2806
2807int smsm_change_state(uint32_t smsm_entry,
2808 uint32_t clear_mask, uint32_t set_mask)
2809{
2810 unsigned long flags;
2811 uint32_t old_state, new_state;
2812
2813 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2814 pr_err("smsm_change_state: Invalid entry %d",
2815 smsm_entry);
2816 return -EINVAL;
2817 }
2818
2819 if (!smsm_info.state) {
2820 pr_err("smsm_change_state <SM NO STATE>\n");
2821 return -EIO;
2822 }
2823 spin_lock_irqsave(&smem_lock, flags);
2824
2825 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2826 new_state = (old_state & ~clear_mask) | set_mask;
2827 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2828 SMSM_DBG("smsm_change_state %x\n", new_state);
2829 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002830
2831 spin_unlock_irqrestore(&smem_lock, flags);
2832
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002833 return 0;
2834}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002835EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002836
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002837uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002838{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002839 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002840
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002841 /* needs interface change to return error code */
2842 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2843 pr_err("smsm_change_state: Invalid entry %d",
2844 smsm_entry);
2845 return 0;
2846 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002847
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002848 if (!smsm_info.state) {
2849 pr_err("smsm_get_state <SM NO STATE>\n");
2850 } else {
2851 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2852 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002853
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002854 return rv;
2855}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002856EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002857
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002858/**
2859 * Performs SMSM callback client notifiction.
2860 */
2861void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002862{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002863 struct smsm_state_cb_info *cb_info;
2864 struct smsm_state_info *state_info;
2865 int n;
2866 uint32_t new_state;
2867 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002868 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002869 int ret;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002870 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002871
Eric Holmbergda31d042012-03-28 14:01:02 -06002872 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002873 mutex_lock(&smsm_lock);
2874 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2875 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002876
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002877 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2878 sizeof(new_state));
2879 if (ret != sizeof(new_state)) {
2880 pr_err("%s: snapshot underflow %d\n",
2881 __func__, ret);
2882 mutex_unlock(&smsm_lock);
2883 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002884 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002885
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002886 state_changes = state_info->last_value ^ new_state;
2887 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002888 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2889 n, state_info->last_value,
2890 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002891 list_for_each_entry(cb_info,
2892 &state_info->callbacks, cb_list) {
2893
2894 if (cb_info->mask & state_changes)
2895 cb_info->notify(cb_info->data,
2896 state_info->last_value,
2897 new_state);
2898 }
2899 state_info->last_value = new_state;
2900 }
2901 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002902
Eric Holmbergda31d042012-03-28 14:01:02 -06002903 /* read wakelock flag */
2904 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2905 sizeof(use_wakelock));
2906 if (ret != sizeof(use_wakelock)) {
2907 pr_err("%s: snapshot underflow %d\n",
2908 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002909 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002910 return;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002911 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002912 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002913
2914 if (use_wakelock) {
2915 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2916 if (smsm_snapshot_count) {
2917 --smsm_snapshot_count;
2918 if (smsm_snapshot_count == 0) {
2919 SMx_POWER_INFO("SMSM snapshot"
2920 " wake unlock\n");
2921 wake_unlock(&smsm_snapshot_wakelock);
2922 }
2923 } else {
2924 pr_err("%s: invalid snapshot count\n",
2925 __func__);
2926 }
2927 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2928 flags);
2929 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002930 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002931}
2932
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002933
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002934/**
2935 * Registers callback for SMSM state notifications when the specified
2936 * bits change.
2937 *
2938 * @smsm_entry Processor entry to deregister
2939 * @mask Bits to deregister (if result is 0, callback is removed)
2940 * @notify Notification function to deregister
2941 * @data Opaque data passed in to callback
2942 *
2943 * @returns Status code
2944 * <0 error code
2945 * 0 inserted new entry
2946 * 1 updated mask of existing entry
2947 */
2948int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2949 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002950{
Eric Holmberge8a39322012-04-03 15:14:02 -06002951 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002952 struct smsm_state_cb_info *cb_info;
2953 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06002954 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002955 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002956
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002957 if (smsm_entry >= SMSM_NUM_ENTRIES)
2958 return -EINVAL;
2959
Eric Holmbergc8002902011-09-16 13:55:57 -06002960 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002961
2962 if (!smsm_states) {
2963 /* smsm not yet initialized */
2964 ret = -ENODEV;
2965 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002966 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002967
Eric Holmberge8a39322012-04-03 15:14:02 -06002968 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002969 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06002970 &state->callbacks, cb_list) {
2971 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002972 (cb_info->data == data)) {
2973 cb_info->mask |= mask;
2974 cb_found = cb_info;
2975 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002976 }
Eric Holmberge8a39322012-04-03 15:14:02 -06002977 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002978 }
2979
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002980 if (!cb_found) {
2981 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2982 GFP_ATOMIC);
2983 if (!cb_info) {
2984 ret = -ENOMEM;
2985 goto cleanup;
2986 }
2987
2988 cb_info->mask = mask;
2989 cb_info->notify = notify;
2990 cb_info->data = data;
2991 INIT_LIST_HEAD(&cb_info->cb_list);
2992 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06002993 &state->callbacks);
2994 new_mask |= mask;
2995 }
2996
2997 /* update interrupt notification mask */
2998 if (smsm_entry == SMSM_MODEM_STATE)
2999 new_mask |= LEGACY_MODEM_SMSM_MASK;
3000
3001 if (smsm_info.intr_mask) {
3002 unsigned long flags;
3003
3004 spin_lock_irqsave(&smem_lock, flags);
3005 new_mask = (new_mask & ~state->intr_mask_clear)
3006 | state->intr_mask_set;
3007 __raw_writel(new_mask,
3008 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3009 wmb();
3010 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003011 }
3012
3013cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06003014 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003015 return ret;
3016}
3017EXPORT_SYMBOL(smsm_state_cb_register);
3018
3019
3020/**
3021 * Deregisters for SMSM state notifications for the specified bits.
3022 *
3023 * @smsm_entry Processor entry to deregister
3024 * @mask Bits to deregister (if result is 0, callback is removed)
3025 * @notify Notification function to deregister
3026 * @data Opaque data passed in to callback
3027 *
3028 * @returns Status code
3029 * <0 error code
3030 * 0 not found
3031 * 1 updated mask
3032 * 2 removed callback
3033 */
3034int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
3035 void (*notify)(void *, uint32_t, uint32_t), void *data)
3036{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003037 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06003038 struct smsm_state_cb_info *cb_tmp;
3039 struct smsm_state_info *state;
3040 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003041 int ret = 0;
3042
3043 if (smsm_entry >= SMSM_NUM_ENTRIES)
3044 return -EINVAL;
3045
Eric Holmbergc8002902011-09-16 13:55:57 -06003046 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003047
3048 if (!smsm_states) {
3049 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003050 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003051 return -ENODEV;
3052 }
3053
Eric Holmberge8a39322012-04-03 15:14:02 -06003054 state = &smsm_states[smsm_entry];
3055 list_for_each_entry_safe(cb_info, cb_tmp,
3056 &state->callbacks, cb_list) {
3057 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003058 (cb_info->data == data)) {
3059 cb_info->mask &= ~mask;
3060 ret = 1;
3061 if (!cb_info->mask) {
3062 /* no mask bits set, remove callback */
3063 list_del(&cb_info->cb_list);
3064 kfree(cb_info);
3065 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003066 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003067 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003068 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003069 new_mask |= cb_info->mask;
3070 }
3071
3072 /* update interrupt notification mask */
3073 if (smsm_entry == SMSM_MODEM_STATE)
3074 new_mask |= LEGACY_MODEM_SMSM_MASK;
3075
3076 if (smsm_info.intr_mask) {
3077 unsigned long flags;
3078
3079 spin_lock_irqsave(&smem_lock, flags);
3080 new_mask = (new_mask & ~state->intr_mask_clear)
3081 | state->intr_mask_set;
3082 __raw_writel(new_mask,
3083 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3084 wmb();
3085 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003086 }
3087
Eric Holmbergc8002902011-09-16 13:55:57 -06003088 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003089 return ret;
3090}
3091EXPORT_SYMBOL(smsm_state_cb_deregister);
3092
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003093int smd_core_init(void)
3094{
3095 int r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003096 unsigned long flags = IRQF_TRIGGER_RISING;
3097 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003098
Brian Swetland37521a32009-07-01 18:30:47 -07003099 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003100 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003101 if (r < 0)
3102 return r;
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303103 interrupt_stats[SMD_MODEM].smd_interrupt_id = INT_A9_M2A_0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003104 r = enable_irq_wake(INT_A9_M2A_0);
3105 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003106 pr_err("smd_core_init: "
3107 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003108
Eric Holmberg98c6c642012-02-24 11:29:35 -07003109 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003110 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003111 if (r < 0) {
3112 free_irq(INT_A9_M2A_0, 0);
3113 return r;
3114 }
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303115 interrupt_stats[SMD_MODEM].smsm_interrupt_id = INT_A9_M2A_5;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003116 r = enable_irq_wake(INT_A9_M2A_5);
3117 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003118 pr_err("smd_core_init: "
3119 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003120
Brian Swetland37521a32009-07-01 18:30:47 -07003121#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003122#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3123 flags |= IRQF_SHARED;
3124#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003125 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003126 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003127 if (r < 0) {
3128 free_irq(INT_A9_M2A_0, 0);
3129 free_irq(INT_A9_M2A_5, 0);
3130 return r;
3131 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003132
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303133 interrupt_stats[SMD_Q6].smd_interrupt_id = INT_ADSP_A11;
Eric Holmberg98c6c642012-02-24 11:29:35 -07003134 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3135 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003136 if (r < 0) {
3137 free_irq(INT_A9_M2A_0, 0);
3138 free_irq(INT_A9_M2A_5, 0);
3139 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3140 return r;
3141 }
3142
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303143 interrupt_stats[SMD_Q6].smsm_interrupt_id = INT_ADSP_A11_SMSM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003144 r = enable_irq_wake(INT_ADSP_A11);
3145 if (r < 0)
3146 pr_err("smd_core_init: "
3147 "enable_irq_wake failed for INT_ADSP_A11\n");
3148
3149#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3150 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3151 if (r < 0)
3152 pr_err("smd_core_init: enable_irq_wake "
3153 "failed for INT_ADSP_A11_SMSM\n");
3154#endif
3155 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003156#endif
3157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003158#if defined(CONFIG_DSPS)
3159 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3160 flags, "smd_dev", smd_dsps_irq_handler);
3161 if (r < 0) {
3162 free_irq(INT_A9_M2A_0, 0);
3163 free_irq(INT_A9_M2A_5, 0);
3164 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003165 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003166 return r;
3167 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003168
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303169 interrupt_stats[SMD_DSPS].smd_interrupt_id = INT_DSPS_A11;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003170 r = enable_irq_wake(INT_DSPS_A11);
3171 if (r < 0)
3172 pr_err("smd_core_init: "
3173 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003174#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003176#if defined(CONFIG_WCNSS)
3177 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3178 flags, "smd_dev", smd_wcnss_irq_handler);
3179 if (r < 0) {
3180 free_irq(INT_A9_M2A_0, 0);
3181 free_irq(INT_A9_M2A_5, 0);
3182 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003183 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003184 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3185 return r;
3186 }
3187
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303188 interrupt_stats[SMD_WCNSS].smd_interrupt_id = INT_WCNSS_A11;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003189 r = enable_irq_wake(INT_WCNSS_A11);
3190 if (r < 0)
3191 pr_err("smd_core_init: "
3192 "enable_irq_wake failed for INT_WCNSS_A11\n");
3193
Eric Holmberg98c6c642012-02-24 11:29:35 -07003194 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3195 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003196 if (r < 0) {
3197 free_irq(INT_A9_M2A_0, 0);
3198 free_irq(INT_A9_M2A_5, 0);
3199 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003200 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003201 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3202 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3203 return r;
3204 }
3205
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303206 interrupt_stats[SMD_WCNSS].smsm_interrupt_id = INT_WCNSS_A11_SMSM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003207 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3208 if (r < 0)
3209 pr_err("smd_core_init: "
3210 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3211#endif
3212
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003213#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003214 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3215 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003216 if (r < 0) {
3217 free_irq(INT_A9_M2A_0, 0);
3218 free_irq(INT_A9_M2A_5, 0);
3219 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003220 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003221 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3222 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003223 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003224 return r;
3225 }
3226
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303227 interrupt_stats[SMD_DSPS].smsm_interrupt_id = INT_DSPS_A11_SMSM;
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003228 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3229 if (r < 0)
3230 pr_err("smd_core_init: "
3231 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3232#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003233 SMD_INFO("smd_core_init() done\n");
3234
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003235 return 0;
3236}
3237
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303238static int intr_init(struct interrupt_config_item *private_irq,
3239 struct smd_irq_config *platform_irq,
3240 struct platform_device *pdev
3241 )
3242{
3243 int irq_id;
3244 int ret;
3245 int ret_wake;
3246
3247 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3248 private_irq->out_offset = platform_irq->out_offset;
3249 private_irq->out_base = platform_irq->out_base;
3250
3251 irq_id = platform_get_irq_byname(
3252 pdev,
3253 platform_irq->irq_name
3254 );
3255 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3256 platform_irq->irq_name, irq_id);
3257 ret = request_irq(irq_id,
3258 private_irq->irq_handler,
3259 platform_irq->flags,
3260 platform_irq->device_name,
3261 (void *)platform_irq->dev_id
3262 );
3263 if (ret < 0) {
3264 platform_irq->irq_id = ret;
Eric Holmbergdeace152012-07-25 12:17:11 -06003265 private_irq->irq_id = ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303266 } else {
3267 platform_irq->irq_id = irq_id;
Eric Holmbergdeace152012-07-25 12:17:11 -06003268 private_irq->irq_id = irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303269 ret_wake = enable_irq_wake(irq_id);
3270 if (ret_wake < 0) {
3271 pr_err("smd: enable_irq_wake failed on %s",
3272 platform_irq->irq_name);
3273 }
3274 }
3275
3276 return ret;
3277}
3278
3279int smd_core_platform_init(struct platform_device *pdev)
3280{
3281 int i;
3282 int ret;
3283 uint32_t num_ss;
3284 struct smd_platform *smd_platform_data;
3285 struct smd_subsystem_config *smd_ss_config_list;
3286 struct smd_subsystem_config *cfg;
3287 int err_ret = 0;
3288
3289 smd_platform_data = pdev->dev.platform_data;
3290 num_ss = smd_platform_data->num_ss_configs;
3291 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3292
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003293 if (smd_platform_data->smd_ssr_config)
3294 disable_smsm_reset_handshake = smd_platform_data->
3295 smd_ssr_config->disable_smsm_reset_handshake;
3296
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303297 for (i = 0; i < num_ss; i++) {
3298 cfg = &smd_ss_config_list[i];
3299
3300 ret = intr_init(
3301 &private_intr_config[cfg->irq_config_id].smd,
3302 &cfg->smd_int,
3303 pdev
3304 );
3305
3306 if (ret < 0) {
3307 err_ret = ret;
3308 pr_err("smd: register irq failed on %s\n",
3309 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003310 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303311 }
3312
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303313 interrupt_stats[cfg->irq_config_id].smd_interrupt_id
3314 = cfg->smd_int.irq_id;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003315 /* only init smsm structs if this edge supports smsm */
3316 if (cfg->smsm_int.irq_id)
3317 ret = intr_init(
3318 &private_intr_config[cfg->irq_config_id].smsm,
3319 &cfg->smsm_int,
3320 pdev
3321 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303322
3323 if (ret < 0) {
3324 err_ret = ret;
3325 pr_err("smd: register irq failed on %s\n",
3326 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003327 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303328 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003329
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303330 if (cfg->smsm_int.irq_id)
3331 interrupt_stats[cfg->irq_config_id].smsm_interrupt_id
3332 = cfg->smsm_int.irq_id;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003333 if (cfg->subsys_name)
3334 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003335 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303336 }
3337
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303338 SMD_INFO("smd_core_platform_init() done\n");
Eric Holmberg51edef72013-04-11 14:28:33 -06003339
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303340 return 0;
3341
Jeff Hugobdc734d2012-03-26 16:05:39 -06003342intr_failed:
3343 pr_err("smd: deregistering IRQs\n");
3344 for (i = 0; i < num_ss; ++i) {
3345 cfg = &smd_ss_config_list[i];
3346
3347 if (cfg->smd_int.irq_id >= 0)
3348 free_irq(cfg->smd_int.irq_id,
3349 (void *)cfg->smd_int.dev_id
3350 );
3351 if (cfg->smsm_int.irq_id >= 0)
3352 free_irq(cfg->smsm_int.irq_id,
3353 (void *)cfg->smsm_int.dev_id
3354 );
3355 }
Jeff Hugobdc734d2012-03-26 16:05:39 -06003356 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303357}
3358
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003359static int msm_smsm_probe(struct platform_device *pdev)
3360{
3361 uint32_t edge;
3362 char *key;
3363 int ret;
3364 uint32_t irq_offset;
3365 uint32_t irq_bitmask;
3366 uint32_t irq_line;
3367 struct interrupt_config_item *private_irq;
3368 struct device_node *node;
3369 void *irq_out_base;
3370 resource_size_t irq_out_size;
3371 struct platform_device *parent_pdev;
3372 struct resource *r;
3373
3374 disable_smsm_reset_handshake = 1;
3375
3376 node = pdev->dev.of_node;
3377
3378 if (!pdev->dev.parent) {
3379 pr_err("%s: missing link to parent device\n", __func__);
3380 return -ENODEV;
3381 }
3382
3383 parent_pdev = to_platform_device(pdev->dev.parent);
3384
3385 key = "irq-reg-base";
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003386 r = platform_get_resource_byname(parent_pdev, IORESOURCE_MEM, key);
Brent Hronik159b1f72013-06-28 17:01:08 -06003387 if (!r)
3388 goto missing_key;
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003389 irq_out_size = resource_size(r);
3390 irq_out_base = ioremap_nocache(r->start, irq_out_size);
3391 if (!irq_out_base) {
3392 pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
3393 __func__, &r->start, &irq_out_size);
3394 return -ENOMEM;
3395 }
3396 SMSM_DBG("%s: %s = %p", __func__, key, irq_out_base);
3397
3398 key = "qcom,smsm-edge";
3399 ret = of_property_read_u32(node, key, &edge);
3400 if (ret)
3401 goto missing_key;
3402 SMSM_DBG("%s: %s = %d", __func__, key, edge);
3403
3404 key = "qcom,smsm-irq-offset";
3405 ret = of_property_read_u32(node, key, &irq_offset);
3406 if (ret)
3407 goto missing_key;
3408 SMSM_DBG("%s: %s = %x", __func__, key, irq_offset);
3409
3410 key = "qcom,smsm-irq-bitmask";
3411 ret = of_property_read_u32(node, key, &irq_bitmask);
3412 if (ret)
3413 goto missing_key;
3414 SMSM_DBG("%s: %s = %x", __func__, key, irq_bitmask);
3415
3416 key = "interrupts";
3417 irq_line = irq_of_parse_and_map(node, 0);
3418 if (!irq_line)
3419 goto missing_key;
3420 SMSM_DBG("%s: %s = %d", __func__, key, irq_line);
3421
3422 private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smsm;
3423 private_irq->out_bit_pos = irq_bitmask;
3424 private_irq->out_offset = irq_offset;
3425 private_irq->out_base = irq_out_base;
3426 private_irq->irq_id = irq_line;
3427
3428 ret = request_irq(irq_line,
3429 private_irq->irq_handler,
3430 IRQF_TRIGGER_RISING,
3431 "smsm_dev",
3432 NULL);
3433 if (ret < 0) {
3434 pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
3435 return ret;
3436 } else {
3437 ret = enable_irq_wake(irq_line);
3438 if (ret < 0)
3439 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
3440 irq_line);
3441 }
3442
3443 if (smsm_init())
3444 pr_err("smsm_init() failed\n");
3445
3446 smsm_irq_handler(0, 0);
3447
3448 return 0;
3449
3450missing_key:
3451 pr_err("%s: missing key: %s", __func__, key);
3452 return -ENODEV;
3453}
3454
3455static int msm_smd_probe(struct platform_device *pdev)
Jeff Hugo412356e2012-09-27 17:14:23 -06003456{
3457 uint32_t edge;
3458 char *key;
3459 int ret;
3460 uint32_t irq_offset;
3461 uint32_t irq_bitmask;
3462 uint32_t irq_line;
3463 unsigned long irq_flags = IRQF_TRIGGER_RISING;
3464 const char *pilstr;
3465 struct interrupt_config_item *private_irq;
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003466 struct device_node *node;
3467 void *irq_out_base;
3468 resource_size_t irq_out_size;
3469 struct platform_device *parent_pdev;
3470 struct resource *r;
3471
3472 node = pdev->dev.of_node;
3473
3474 if (!pdev->dev.parent) {
3475 pr_err("%s: missing link to parent device\n", __func__);
3476 return -ENODEV;
3477 }
3478
3479 parent_pdev = to_platform_device(pdev->dev.parent);
3480
3481 key = "irq-reg-base";
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003482 r = platform_get_resource_byname(parent_pdev, IORESOURCE_MEM, key);
Brent Hronik159b1f72013-06-28 17:01:08 -06003483 if (!r)
3484 goto missing_key;
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003485 irq_out_size = resource_size(r);
3486 irq_out_base = ioremap_nocache(r->start, irq_out_size);
3487 if (!irq_out_base) {
3488 pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
3489 __func__, &r->start, &irq_out_size);
3490 return -ENOMEM;
3491 }
3492 SMD_DBG("%s: %s = %p", __func__, key, irq_out_base);
Jeff Hugo412356e2012-09-27 17:14:23 -06003493
3494 key = "qcom,smd-edge";
3495 ret = of_property_read_u32(node, key, &edge);
3496 if (ret)
3497 goto missing_key;
3498 SMD_DBG("%s: %s = %d", __func__, key, edge);
3499
3500 key = "qcom,smd-irq-offset";
3501 ret = of_property_read_u32(node, key, &irq_offset);
3502 if (ret)
3503 goto missing_key;
3504 SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
3505
3506 key = "qcom,smd-irq-bitmask";
3507 ret = of_property_read_u32(node, key, &irq_bitmask);
3508 if (ret)
3509 goto missing_key;
3510 SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
3511
3512 key = "interrupts";
3513 irq_line = irq_of_parse_and_map(node, 0);
3514 if (!irq_line)
3515 goto missing_key;
3516 SMD_DBG("%s: %s = %d", __func__, key, irq_line);
3517
3518 key = "qcom,pil-string";
3519 pilstr = of_get_property(node, key, NULL);
3520 if (pilstr)
3521 SMD_DBG("%s: %s = %s", __func__, key, pilstr);
3522
3523 key = "qcom,irq-no-suspend";
3524 ret = of_property_read_bool(node, key);
3525 if (ret)
3526 irq_flags |= IRQF_NO_SUSPEND;
3527
3528 private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smd;
3529 private_irq->out_bit_pos = irq_bitmask;
3530 private_irq->out_offset = irq_offset;
3531 private_irq->out_base = irq_out_base;
3532 private_irq->irq_id = irq_line;
3533
3534 ret = request_irq(irq_line,
3535 private_irq->irq_handler,
3536 irq_flags,
3537 "smd_dev",
3538 NULL);
3539 if (ret < 0) {
3540 pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
3541 return ret;
3542 } else {
3543 ret = enable_irq_wake(irq_line);
3544 if (ret < 0)
3545 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
3546 irq_line);
3547 }
3548
3549 if (pilstr)
3550 strlcpy(edge_to_pids[edge].subsys_name, pilstr,
3551 SMD_MAX_CH_NAME_LEN);
3552
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003553 edge_to_pids[edge].initialized = true;
Jeff Hugo412356e2012-09-27 17:14:23 -06003554
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003555 schedule_work(&probe_work);
Jeff Hugo412356e2012-09-27 17:14:23 -06003556
3557 return 0;
3558
3559missing_key:
3560 pr_err("%s: missing key: %s", __func__, key);
3561 return -ENODEV;
3562}
3563
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003564static int msm_smd_probe_legacy(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003565{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303566 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003567
Jeff Hugo429dc2c2013-05-28 15:06:07 -06003568 if (!smem_initialized_check())
3569 return -ENODEV;
3570
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303571 SMD_INFO("smd probe\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003572
3573 if (smsm_init()) {
3574 pr_err("smsm_init() failed\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003575 return -1;
3576 }
3577
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303578 if (pdev) {
3579 if (pdev->dev.of_node) {
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003580 pr_err("%s: invalid device tree init\n", __func__);
3581 return -ENODEV;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303582 } else if (pdev->dev.platform_data) {
3583 ret = smd_core_platform_init(pdev);
3584 if (ret) {
3585 pr_err(
3586 "SMD: smd_core_platform_init() failed\n");
3587 return -ENODEV;
3588 }
3589 } else {
3590 ret = smd_core_init();
3591 if (ret) {
3592 pr_err("smd_core_init() failed\n");
3593 return -ENODEV;
3594 }
3595 }
3596 } else {
3597 pr_err("SMD: PDEV not found\n");
3598 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003599 }
Iliyan Malchev1207bab2009-11-15 18:16:43 -08003600
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003601 smd_initialized = 1;
3602
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003603 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003604 smsm_irq_handler(0, 0);
3605 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003606
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003607 return 0;
3608}
3609
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003610static int restart_notifier_cb(struct notifier_block *this,
3611 unsigned long code,
3612 void *data);
3613
3614static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003615 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3616 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
Sameer Thalappil52381142012-10-04 17:22:24 -07003617 {SMD_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
Eric Holmbergca7ead22011-12-01 17:21:15 -07003618 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003619 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Jeff Hugod3cf6ec2012-09-26 15:30:10 -06003620 {SMD_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003621};
3622
3623static int restart_notifier_cb(struct notifier_block *this,
3624 unsigned long code,
3625 void *data)
3626{
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003627 remote_spinlock_t *remote_spinlock;
3628
Jeff Hugo73f356f2012-12-14 17:56:19 -07003629 /*
3630 * Some SMD or SMSM clients assume SMD/SMSM SSR handling will be
3631 * done in the AFTER_SHUTDOWN level. If this ever changes, extra
3632 * care should be taken to verify no clients are broken.
3633 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003634 if (code == SUBSYS_AFTER_SHUTDOWN) {
3635 struct restart_notifier_block *notifier;
3636
3637 notifier = container_of(this,
3638 struct restart_notifier_block, nb);
3639 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3640 __func__, notifier->processor,
3641 notifier->name);
3642
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003643 remote_spinlock = smem_get_remote_spinlock();
3644 remote_spin_release(remote_spinlock, notifier->processor);
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003645 remote_spin_release_all(notifier->processor);
3646
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003647 smd_channel_reset(notifier->processor);
3648 }
3649
3650 return NOTIFY_DONE;
3651}
3652
3653static __init int modem_restart_late_init(void)
3654{
3655 int i;
3656 void *handle;
3657 struct restart_notifier_block *nb;
3658
3659 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3660 nb = &restart_notifiers[i];
3661 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3662 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3663 __func__, nb->name, handle);
3664 }
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003665
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003666 return 0;
3667}
3668late_initcall(modem_restart_late_init);
3669
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003670static struct of_device_id msm_smd_match_table[] = {
3671 { .compatible = "qcom,smd" },
Jeff Hugo412356e2012-09-27 17:14:23 -06003672 {},
3673};
3674
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003675static struct platform_driver msm_smd_driver = {
3676 .probe = msm_smd_probe,
3677 .driver = {
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003678 .name = "msm_smd_dt",
3679 .owner = THIS_MODULE,
3680 .of_match_table = msm_smd_match_table,
3681 },
3682};
3683
3684static struct of_device_id msm_smsm_match_table[] = {
3685 { .compatible = "qcom,smsm" },
3686 {},
3687};
3688
3689static struct platform_driver msm_smsm_driver = {
3690 .probe = msm_smsm_probe,
3691 .driver = {
3692 .name = "msm_smsm",
3693 .owner = THIS_MODULE,
3694 .of_match_table = msm_smsm_match_table,
3695 },
3696};
3697
3698static struct platform_driver msm_smd_driver_legacy = {
3699 .probe = msm_smd_probe_legacy,
3700 .driver = {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003701 .name = MODULE_NAME,
3702 .owner = THIS_MODULE,
3703 },
3704};
3705
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003706int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003707{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003708 static bool registered;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003709 int rc;
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003710
3711 if (registered)
3712 return 0;
3713
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05303714 smd_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smd");
3715 if (!smd_log_ctx) {
3716 pr_err("%s: unable to create logging context\n", __func__);
3717 msm_smd_debug_mask = 0;
3718 }
3719
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003720 registered = true;
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003721
3722 INIT_WORK(&probe_work, smd_channel_probe_worker);
3723
3724 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3725 if (IS_ERR(channel_close_wq)) {
3726 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3727 return -ENOMEM;
3728 }
3729
3730 rc = platform_driver_register(&msm_smd_driver_legacy);
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003731 if (rc) {
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003732 pr_err("%s: msm_smd_driver_legacy register failed %d\n",
3733 __func__, rc);
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003734 return rc;
3735 }
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003736
3737 rc = platform_driver_register(&msm_smd_driver);
3738 if (rc) {
3739 pr_err("%s: msm_smd_driver register failed %d\n",
3740 __func__, rc);
3741 return rc;
3742 }
3743
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003744 rc = platform_driver_register(&msm_smsm_driver);
3745 if (rc) {
3746 pr_err("%s: msm_smsm_driver register failed %d\n",
3747 __func__, rc);
3748 return rc;
3749 }
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003750
3751 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003752}
3753
3754module_init(msm_smd_init);
3755
3756MODULE_DESCRIPTION("MSM Shared Memory Core");
3757MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3758MODULE_LICENSE("GPL");