blob: a177593e27b1447bd3d07aaefbd3a38e27073cf8 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmberg6275b602012-11-19 13:05:04 -07004 * Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f9412012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Eric Holmberg144c2de2012-10-04 13:37:28 -060037#include <linux/suspend.h>
Jeff Hugo412356e2012-09-27 17:14:23 -060038#include <linux/of.h>
39#include <linux/of_irq.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070040
Brian Swetland2eb44eb2008-09-29 16:00:48 -070041#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070043#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053045#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070046#include <mach/proc_comm.h>
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +053047#include <mach/msm_ipc_logging.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070048#include <mach/ramdump.h>
Eric Holmberg51edef72013-04-11 14:28:33 -060049#include <mach/board.h>
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060050#include <mach/msm_smem.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070051
Ram Somani8b9589f2012-04-03 12:07:18 +053052#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070053
54#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055#include "modem_notifier.h"
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060056#include "smem_private.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070057
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060059 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060060 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070061#define CONFIG_QDSP6 1
62#endif
63
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060064#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
65 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066#define CONFIG_DSPS 1
67#endif
68
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060069#if defined(CONFIG_ARCH_MSM8960) \
70 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060072#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070074
75#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076#define SMEM_VERSION 0x000B
77#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070078#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060079#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Eric Holmberge5266d32013-02-25 18:29:27 -070080#define RSPIN_INIT_WAIT_MS 1000
Eric Holmberg424d9552013-04-05 15:23:25 -060081#define SMD_FIFO_FULL_RESERVE 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082
83uint32_t SMSM_NUM_ENTRIES = 8;
84uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070085
Eric Holmberge8a39322012-04-03 15:14:02 -060086/* Legacy SMSM interrupt notifications */
87#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
88 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070089
90enum {
91 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092 MSM_SMSM_DEBUG = 1U << 1,
93 MSM_SMD_INFO = 1U << 2,
94 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070095 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096};
97
98struct smsm_shared_info {
99 uint32_t *state;
100 uint32_t *intr_mask;
101 uint32_t *intr_mux;
102};
103
104static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f9412012-03-19 10:04:22 -0600105static struct kfifo smsm_snapshot_fifo;
106static struct wake_lock smsm_snapshot_wakelock;
107static int smsm_snapshot_count;
108static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109
110struct smsm_size_info_type {
111 uint32_t num_hosts;
112 uint32_t num_entries;
113 uint32_t reserved0;
114 uint32_t reserved1;
115};
116
117struct smsm_state_cb_info {
118 struct list_head cb_list;
119 uint32_t mask;
120 void *data;
121 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
122};
123
124struct smsm_state_info {
125 struct list_head callbacks;
126 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600127 uint32_t intr_mask_set;
128 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129};
130
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530131struct interrupt_config_item {
132 /* must be initialized */
133 irqreturn_t (*irq_handler)(int req, void *data);
134 /* outgoing interrupt config (set from platform data) */
135 uint32_t out_bit_pos;
136 void __iomem *out_base;
137 uint32_t out_offset;
Eric Holmbergdeace152012-07-25 12:17:11 -0600138 int irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530139};
140
141struct interrupt_config {
142 struct interrupt_config_item smd;
143 struct interrupt_config_item smsm;
144};
145
146static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700147static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530148static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700149static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530150static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700151static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530152static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700153static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600154static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530155static irqreturn_t smsm_irq_handler(int irq, void *data);
156
157static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
158 [SMD_MODEM] = {
159 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700160 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530161 },
162 [SMD_Q6] = {
163 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700164 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530165 },
166 [SMD_DSPS] = {
167 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700168 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530169 },
170 [SMD_WCNSS] = {
171 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700172 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530173 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600174 [SMD_RPM] = {
175 .smd.irq_handler = smd_rpm_irq_handler,
176 .smsm.irq_handler = NULL, /* does not support smsm */
177 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530178};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600179
Eric Holmbergcfbc1d52013-03-13 18:30:19 -0600180static void *smd_dev;
Jeff Hugobdc734d2012-03-26 16:05:39 -0600181
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700182struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530183
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
185#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
186 entry * SMSM_NUM_HOSTS + host)
187#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
188
189/* Internal definitions which are not exported in some targets */
190enum {
191 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700192};
193
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530194static int msm_smd_debug_mask = MSM_SMx_POWER_INFO;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700195module_param_named(debug_mask, msm_smd_debug_mask,
196 int, S_IRUGO | S_IWUSR | S_IWGRP);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530197static void *smd_log_ctx;
198#define NUM_LOG_PAGES 4
199
200#define IPC_LOG(level, x...) do { \
201 if (smd_log_ctx) \
202 ipc_log_string(smd_log_ctx, x); \
203 else \
204 printk(level x); \
205 } while (0)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207#if defined(CONFIG_MSM_SMD_DEBUG)
208#define SMD_DBG(x...) do { \
209 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530210 IPC_LOG(KERN_DEBUG, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211 } while (0)
212
213#define SMSM_DBG(x...) do { \
214 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530215 IPC_LOG(KERN_DEBUG, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216 } while (0)
217
218#define SMD_INFO(x...) do { \
219 if (msm_smd_debug_mask & MSM_SMD_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530220 IPC_LOG(KERN_INFO, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221 } while (0)
222
223#define SMSM_INFO(x...) do { \
224 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530225 IPC_LOG(KERN_INFO, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700227#define SMx_POWER_INFO(x...) do { \
228 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530229 IPC_LOG(KERN_INFO, x); \
Eric Holmberg98c6c642012-02-24 11:29:35 -0700230 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231#else
232#define SMD_DBG(x...) do { } while (0)
233#define SMSM_DBG(x...) do { } while (0)
234#define SMD_INFO(x...) do { } while (0)
235#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700236#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237#endif
238
Eric Holmberg51edef72013-04-11 14:28:33 -0600239/**
240 * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
241 *
242 * @type: type to check for overflow
243 * @a: left value to use
244 * @b: right value to use
245 * @returns: true if a + b will result in overflow; false otherwise
246 */
247#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
248 (((type)~0 - (a)) < (b) ? true : false)
249
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700250static unsigned last_heap_free = 0xffffffff;
251
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252static inline void smd_write_intr(unsigned int val,
253 const void __iomem *addr);
254
255#if defined(CONFIG_ARCH_MSM7X30)
256#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530257 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530259 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530261 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530263 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600265#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266#define MSM_TRIG_A2WCNSS_SMD_INT
267#define MSM_TRIG_A2WCNSS_SMSM_INT
268#elif defined(CONFIG_ARCH_MSM8X60)
269#define MSM_TRIG_A2M_SMD_INT \
270 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
271#define MSM_TRIG_A2Q6_SMD_INT \
272 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
273#define MSM_TRIG_A2M_SMSM_INT \
274 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
275#define MSM_TRIG_A2Q6_SMSM_INT \
276 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
277#define MSM_TRIG_A2DSPS_SMD_INT \
278 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600279#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280#define MSM_TRIG_A2WCNSS_SMD_INT
281#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600282#elif defined(CONFIG_ARCH_MSM9615)
283#define MSM_TRIG_A2M_SMD_INT \
284 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
285#define MSM_TRIG_A2Q6_SMD_INT \
286 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
287#define MSM_TRIG_A2M_SMSM_INT \
288 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
289#define MSM_TRIG_A2Q6_SMSM_INT \
290 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
291#define MSM_TRIG_A2DSPS_SMD_INT
292#define MSM_TRIG_A2DSPS_SMSM_INT
293#define MSM_TRIG_A2WCNSS_SMD_INT
294#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295#elif defined(CONFIG_ARCH_FSM9XXX)
296#define MSM_TRIG_A2Q6_SMD_INT \
297 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
298#define MSM_TRIG_A2Q6_SMSM_INT \
299 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
300#define MSM_TRIG_A2M_SMD_INT \
301 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
302#define MSM_TRIG_A2M_SMSM_INT \
303 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
304#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600305#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306#define MSM_TRIG_A2WCNSS_SMD_INT
307#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700308#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309#define MSM_TRIG_A2M_SMD_INT \
310 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700311#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312#define MSM_TRIG_A2M_SMSM_INT \
313 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700314#define MSM_TRIG_A2Q6_SMSM_INT
315#define MSM_TRIG_A2DSPS_SMD_INT
316#define MSM_TRIG_A2DSPS_SMSM_INT
317#define MSM_TRIG_A2WCNSS_SMD_INT
318#define MSM_TRIG_A2WCNSS_SMSM_INT
319#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
320#define MSM_TRIG_A2M_SMD_INT \
321 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
322#define MSM_TRIG_A2Q6_SMD_INT
323#define MSM_TRIG_A2M_SMSM_INT \
324 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
325#define MSM_TRIG_A2Q6_SMSM_INT
326#define MSM_TRIG_A2DSPS_SMD_INT
327#define MSM_TRIG_A2DSPS_SMSM_INT
328#define MSM_TRIG_A2WCNSS_SMD_INT
329#define MSM_TRIG_A2WCNSS_SMSM_INT
330#else /* use platform device / device tree configuration */
331#define MSM_TRIG_A2M_SMD_INT
332#define MSM_TRIG_A2Q6_SMD_INT
333#define MSM_TRIG_A2M_SMSM_INT
334#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700335#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600336#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337#define MSM_TRIG_A2WCNSS_SMD_INT
338#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700339#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340
Jeff Hugoee40b152012-02-09 17:39:47 -0700341/*
342 * stub out legacy macros if they are not being used so that the legacy
343 * code compiles even though it is not used
344 *
345 * these definitions should not be used in active code and will cause
346 * an early failure
347 */
348#ifndef INT_A9_M2A_0
349#define INT_A9_M2A_0 -1
350#endif
351#ifndef INT_A9_M2A_5
352#define INT_A9_M2A_5 -1
353#endif
354#ifndef INT_ADSP_A11
355#define INT_ADSP_A11 -1
356#endif
357#ifndef INT_ADSP_A11_SMSM
358#define INT_ADSP_A11_SMSM -1
359#endif
360#ifndef INT_DSPS_A11
361#define INT_DSPS_A11 -1
362#endif
363#ifndef INT_DSPS_A11_SMSM
364#define INT_DSPS_A11_SMSM -1
365#endif
366#ifndef INT_WCNSS_A11
367#define INT_WCNSS_A11 -1
368#endif
369#ifndef INT_WCNSS_A11_SMSM
370#define INT_WCNSS_A11_SMSM -1
371#endif
372
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373#define SMD_LOOPBACK_CID 100
374
375static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600377static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700378
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600379static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700380static void notify_smsm_cb_clients_worker(struct work_struct *work);
381static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600382static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383static struct smsm_state_info *smsm_states;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -0600384
385/**
386 * Variables to indicate smd module initialization.
387 * Dependents to register for smd module init notifier.
388 */
389static int smd_module_inited;
390static RAW_NOTIFIER_HEAD(smd_module_init_notifier_list);
391static DEFINE_MUTEX(smd_module_init_notifier_lock);
392static void smd_module_init_notify(uint32_t state, void *data);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530393static int smd_stream_write_avail(struct smd_channel *ch);
394static int smd_stream_read_avail(struct smd_channel *ch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395
396static inline void smd_write_intr(unsigned int val,
397 const void __iomem *addr)
398{
399 wmb();
400 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700401}
402
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530403static inline void log_notify(uint32_t subsystem, smd_channel_t *ch)
404{
405 const char *subsys = smd_edge_to_subsystem(subsystem);
406
Jay Chokshi83b4f6132013-02-14 16:20:56 -0800407 (void) subsys;
408
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530409 if (!ch)
410 SMx_POWER_INFO("Apps->%s\n", subsys);
411 else
412 SMx_POWER_INFO(
413 "Apps->%s ch%d '%s': tx%d/rx%d %dr/%dw : %dr/%dw\n",
414 subsys, ch->n, ch->name,
415 ch->fifo_size -
416 (smd_stream_write_avail(ch) + 1),
417 smd_stream_read_avail(ch),
418 ch->half_ch->get_tail(ch->send),
419 ch->half_ch->get_head(ch->send),
420 ch->half_ch->get_tail(ch->recv),
421 ch->half_ch->get_head(ch->recv)
422 );
423}
424
425static inline void notify_modem_smd(smd_channel_t *ch)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700426{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530427 static const struct interrupt_config_item *intr
428 = &private_intr_config[SMD_MODEM].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530429
430 log_notify(SMD_APPS_MODEM, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700431 if (intr->out_base) {
432 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530433 smd_write_intr(intr->out_bit_pos,
434 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700435 } else {
436 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530437 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700438 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700439}
440
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530441static inline void notify_dsp_smd(smd_channel_t *ch)
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700442{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530443 static const struct interrupt_config_item *intr
444 = &private_intr_config[SMD_Q6].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530445
446 log_notify(SMD_APPS_QDSP, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700447 if (intr->out_base) {
448 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530449 smd_write_intr(intr->out_bit_pos,
450 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700451 } else {
452 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530453 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700454 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700455}
456
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530457static inline void notify_dsps_smd(smd_channel_t *ch)
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530458{
459 static const struct interrupt_config_item *intr
460 = &private_intr_config[SMD_DSPS].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530461
462 log_notify(SMD_APPS_DSPS, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700463 if (intr->out_base) {
464 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530465 smd_write_intr(intr->out_bit_pos,
466 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700467 } else {
468 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530469 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700470 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530471}
472
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530473static inline void notify_wcnss_smd(struct smd_channel *ch)
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530474{
475 static const struct interrupt_config_item *intr
476 = &private_intr_config[SMD_WCNSS].smd;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530477
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530478 log_notify(SMD_APPS_WCNSS, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700479 if (intr->out_base) {
480 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530481 smd_write_intr(intr->out_bit_pos,
482 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700483 } else {
484 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530485 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700486 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530487}
488
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530489static inline void notify_rpm_smd(smd_channel_t *ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600490{
491 static const struct interrupt_config_item *intr
492 = &private_intr_config[SMD_RPM].smd;
493
494 if (intr->out_base) {
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530495 log_notify(SMD_APPS_RPM, ch);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600496 ++interrupt_stats[SMD_RPM].smd_out_config_count;
497 smd_write_intr(intr->out_bit_pos,
498 intr->out_base + intr->out_offset);
499 }
500}
501
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530502static inline void notify_modem_smsm(void)
503{
504 static const struct interrupt_config_item *intr
505 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700506 if (intr->out_base) {
507 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530508 smd_write_intr(intr->out_bit_pos,
509 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700510 } else {
511 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530512 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700513 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530514}
515
516static inline void notify_dsp_smsm(void)
517{
518 static const struct interrupt_config_item *intr
519 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700520 if (intr->out_base) {
521 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530522 smd_write_intr(intr->out_bit_pos,
523 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700524 } else {
525 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530526 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700527 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530528}
529
530static inline void notify_dsps_smsm(void)
531{
532 static const struct interrupt_config_item *intr
533 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700534 if (intr->out_base) {
535 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530536 smd_write_intr(intr->out_bit_pos,
537 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700538 } else {
539 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530540 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700541 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530542}
543
544static inline void notify_wcnss_smsm(void)
545{
546 static const struct interrupt_config_item *intr
547 = &private_intr_config[SMD_WCNSS].smsm;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530548
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700549 if (intr->out_base) {
550 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530551 smd_write_intr(intr->out_bit_pos,
552 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700553 } else {
554 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530555 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700556 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530557}
558
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
560{
561 /* older protocol don't use smsm_intr_mask,
562 but still communicates with modem */
563 if (!smsm_info.intr_mask ||
564 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
565 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530566 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700567
568 if (smsm_info.intr_mask &&
569 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
570 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 uint32_t mux_val;
572
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600573 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 mux_val = __raw_readl(
575 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
576 mux_val++;
577 __raw_writel(mux_val,
578 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
579 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530580 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 }
582
583 if (smsm_info.intr_mask &&
584 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
585 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530586 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 }
588
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600589 if (smsm_info.intr_mask &&
590 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
591 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530592 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600593 }
594
Eric Holmbergda31d042012-03-28 14:01:02 -0600595 /*
596 * Notify local SMSM callback clients without wakelock since this
597 * code is used by power management during power-down/-up sequencing
598 * on DEM-based targets. Grabbing a wakelock in this case will
599 * abort the power-down sequencing.
600 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600601 if (smsm_info.intr_mask &&
602 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
603 & notify_mask)) {
604 smsm_cb_snapshot(0);
605 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700606}
607
Eric Holmberg144c2de2012-10-04 13:37:28 -0600608static int smsm_pm_notifier(struct notifier_block *nb,
609 unsigned long event, void *unused)
610{
611 switch (event) {
612 case PM_SUSPEND_PREPARE:
613 smsm_change_state(SMSM_APPS_STATE, SMSM_PROC_AWAKE, 0);
614 break;
615
616 case PM_POST_SUSPEND:
617 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_PROC_AWAKE);
618 break;
619 }
620 return NOTIFY_DONE;
621}
622
623static struct notifier_block smsm_pm_nb = {
624 .notifier_call = smsm_pm_notifier,
625 .priority = 0,
626};
627
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700629{
630 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700632
633 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
634 if (x != 0) {
635 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636 SMD_INFO("smem: DIAG '%s'\n", x);
637 }
638
639 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
640 if (x != 0) {
641 x[size - 1] = 0;
642 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700643 }
644}
645
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700647static void handle_modem_crash(void)
648{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700650 smd_diag();
651
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652 /* hard reboot if possible FIXME
653 if (msm_reset_hook)
654 msm_reset_hook();
655 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700656
657 /* in this case the modem or watchdog should reboot us */
658 for (;;)
659 ;
660}
661
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700662int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700663{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664 /* if the modem's not ready yet, we have to hope for the best */
665 if (!smsm_info.state)
666 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700667
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700669 handle_modem_crash();
670 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700671 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700672 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700673}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700675
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700676/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700677 * irq handler and code that mutates the channel
678 * list or fiddles with channel state
679 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700681DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700682
683/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700684 * operations to avoid races while creating or
685 * destroying smd_channel structures
686 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700687static DEFINE_MUTEX(smd_creation_mutex);
688
689static int smd_initialized;
690
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691struct smd_shared_v1 {
692 struct smd_half_channel ch0;
693 unsigned char data0[SMD_BUF_SIZE];
694 struct smd_half_channel ch1;
695 unsigned char data1[SMD_BUF_SIZE];
696};
697
698struct smd_shared_v2 {
699 struct smd_half_channel ch0;
700 struct smd_half_channel ch1;
701};
702
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600703struct smd_shared_v2_word_access {
704 struct smd_half_channel_word_access ch0;
705 struct smd_half_channel_word_access ch1;
706};
707
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700708struct edge_to_pid {
709 uint32_t local_pid;
710 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700711 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712};
713
714/**
715 * Maps edge type to local and remote processor ID's.
716 */
717static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700718 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
Stephen Boyd77db8bb2012-06-27 15:15:16 -0700719 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "adsp"},
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700720 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
721 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
722 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
723 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
724 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
725 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
726 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
727 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
728 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
729 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
730 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
731 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
732 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600733 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
734 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
735 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
736 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700737};
738
739struct restart_notifier_block {
740 unsigned processor;
741 char *name;
742 struct notifier_block nb;
743};
744
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600745static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
747
748static LIST_HEAD(smd_ch_closed_list);
749static LIST_HEAD(smd_ch_closing_list);
750static LIST_HEAD(smd_ch_to_close_list);
751static LIST_HEAD(smd_ch_list_modem);
752static LIST_HEAD(smd_ch_list_dsp);
753static LIST_HEAD(smd_ch_list_dsps);
754static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600755static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700756
757static unsigned char smd_ch_allocated[64];
758static struct work_struct probe_work;
759
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700760static void finalize_channel_close_fn(struct work_struct *work);
761static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
762static struct workqueue_struct *channel_close_wq;
763
764static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
765
766/* on smp systems, the probe might get called from multiple cores,
767 hence use a lock */
768static DEFINE_MUTEX(smd_probe_lock);
769
770static void smd_channel_probe_worker(struct work_struct *work)
771{
772 struct smd_alloc_elm *shared;
773 unsigned n;
774 uint32_t type;
775
776 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
777
778 if (!shared) {
779 pr_err("%s: allocation table not initialized\n", __func__);
780 return;
781 }
782
783 mutex_lock(&smd_probe_lock);
784 for (n = 0; n < 64; n++) {
785 if (smd_ch_allocated[n])
786 continue;
787
788 /* channel should be allocated only if APPS
789 processor is involved */
790 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600791 if (type >= ARRAY_SIZE(edge_to_pids) ||
792 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793 continue;
794 if (!shared[n].ref_count)
795 continue;
796 if (!shared[n].name[0])
797 continue;
798
799 if (!smd_alloc_channel(&shared[n]))
800 smd_ch_allocated[n] = 1;
801 else
802 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
803 }
804 mutex_unlock(&smd_probe_lock);
805}
806
807/**
808 * Lookup processor ID and determine if it belongs to the proved edge
809 * type.
810 *
811 * @shared2: Pointer to v2 shared channel structure
812 * @type: Edge type
813 * @pid: Processor ID of processor on edge
814 * @local_ch: Channel that belongs to processor @pid
815 * @remote_ch: Other side of edge contained @pid
Jeff Hugo00be6282012-09-07 11:24:32 -0600816 * @is_word_access_ch: Bool, is this a word aligned access channel
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700817 *
818 * Returns 0 for not on edge, 1 for found on edge
819 */
Jeff Hugo00be6282012-09-07 11:24:32 -0600820static int pid_is_on_edge(void *shared2,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821 uint32_t type, uint32_t pid,
Jeff Hugo00be6282012-09-07 11:24:32 -0600822 void **local_ch,
823 void **remote_ch,
824 int is_word_access_ch
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825 )
826{
827 int ret = 0;
828 struct edge_to_pid *edge;
Jeff Hugo00be6282012-09-07 11:24:32 -0600829 void *ch0;
830 void *ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831
832 *local_ch = 0;
833 *remote_ch = 0;
834
835 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
836 return 0;
837
Jeff Hugo00be6282012-09-07 11:24:32 -0600838 if (is_word_access_ch) {
839 ch0 = &((struct smd_shared_v2_word_access *)(shared2))->ch0;
840 ch1 = &((struct smd_shared_v2_word_access *)(shared2))->ch1;
841 } else {
842 ch0 = &((struct smd_shared_v2 *)(shared2))->ch0;
843 ch1 = &((struct smd_shared_v2 *)(shared2))->ch1;
844 }
845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 edge = &edge_to_pids[type];
847 if (edge->local_pid != edge->remote_pid) {
848 if (pid == edge->local_pid) {
Jeff Hugo00be6282012-09-07 11:24:32 -0600849 *local_ch = ch0;
850 *remote_ch = ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 ret = 1;
852 } else if (pid == edge->remote_pid) {
Jeff Hugo00be6282012-09-07 11:24:32 -0600853 *local_ch = ch1;
854 *remote_ch = ch0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855 ret = 1;
856 }
857 }
858
859 return ret;
860}
861
Eric Holmberg17992c12012-02-29 12:54:44 -0700862/*
863 * Returns a pointer to the subsystem name or NULL if no
864 * subsystem name is available.
865 *
866 * @type - Edge definition
867 */
868const char *smd_edge_to_subsystem(uint32_t type)
869{
870 const char *subsys = NULL;
871
872 if (type < ARRAY_SIZE(edge_to_pids)) {
873 subsys = edge_to_pids[type].subsys_name;
874 if (subsys[0] == 0x0)
875 subsys = NULL;
876 }
877 return subsys;
878}
879EXPORT_SYMBOL(smd_edge_to_subsystem);
880
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700881/*
882 * Returns a pointer to the subsystem name given the
883 * remote processor ID.
Arun Kumar Neelakantama35c7a72012-10-18 15:45:15 +0530884 * subsystem is not necessarily PIL-loadable
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700885 *
886 * @pid Remote processor ID
887 * @returns Pointer to subsystem name or NULL if not found
888 */
889const char *smd_pid_to_subsystem(uint32_t pid)
890{
891 const char *subsys = NULL;
892 int i;
893
894 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
Arun Kumar Neelakantama35c7a72012-10-18 15:45:15 +0530895 if (pid == edge_to_pids[i].remote_pid) {
896 if (edge_to_pids[i].subsys_name[0] != 0x0) {
897 subsys = edge_to_pids[i].subsys_name;
898 break;
899 } else if (pid == SMD_RPM) {
900 subsys = "rpm";
901 break;
902 }
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700903 }
904 }
905
906 return subsys;
907}
908EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700909
Jeff Hugo00be6282012-09-07 11:24:32 -0600910static void smd_reset_edge(void *void_ch, unsigned new_state,
911 int is_word_access_ch)
Eric Holmberg2a563c32011-10-05 14:51:43 -0600912{
Jeff Hugo00be6282012-09-07 11:24:32 -0600913 if (is_word_access_ch) {
914 struct smd_half_channel_word_access *ch =
915 (struct smd_half_channel_word_access *)(void_ch);
916 if (ch->state != SMD_SS_CLOSED) {
917 ch->state = new_state;
918 ch->fDSR = 0;
919 ch->fCTS = 0;
920 ch->fCD = 0;
921 ch->fSTATE = 1;
922 }
923 } else {
924 struct smd_half_channel *ch =
925 (struct smd_half_channel *)(void_ch);
926 if (ch->state != SMD_SS_CLOSED) {
927 ch->state = new_state;
928 ch->fDSR = 0;
929 ch->fCTS = 0;
930 ch->fCD = 0;
931 ch->fSTATE = 1;
932 }
Eric Holmberg2a563c32011-10-05 14:51:43 -0600933 }
934}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700935
936static void smd_channel_reset_state(struct smd_alloc_elm *shared,
937 unsigned new_state, unsigned pid)
938{
939 unsigned n;
Jeff Hugo00be6282012-09-07 11:24:32 -0600940 void *shared2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700941 uint32_t type;
Jeff Hugo00be6282012-09-07 11:24:32 -0600942 void *local_ch;
943 void *remote_ch;
944 int is_word_access;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945
946 for (n = 0; n < SMD_CHANNELS; n++) {
947 if (!shared[n].ref_count)
948 continue;
949 if (!shared[n].name[0])
950 continue;
951
952 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo00be6282012-09-07 11:24:32 -0600953 is_word_access = is_word_access_ch(type);
954 if (is_word_access)
955 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
956 sizeof(struct smd_shared_v2_word_access));
957 else
958 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
959 sizeof(struct smd_shared_v2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960 if (!shared2)
961 continue;
962
Jeff Hugo00be6282012-09-07 11:24:32 -0600963 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch,
964 is_word_access))
965 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700966
Eric Holmberg2a563c32011-10-05 14:51:43 -0600967 /*
968 * ModemFW is in the same subsystem as ModemSW, but has
969 * separate SMD edges that need to be reset.
970 */
971 if (pid == SMSM_MODEM &&
972 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
Jeff Hugo00be6282012-09-07 11:24:32 -0600973 &local_ch, &remote_ch, is_word_access))
974 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975 }
976}
977
978
979void smd_channel_reset(uint32_t restart_pid)
980{
981 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700982 unsigned long flags;
983
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530984 SMx_POWER_INFO("%s: starting reset\n", __func__);
Eric Holmberg50ad86f2012-09-07 13:54:31 -0600985
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
987 if (!shared) {
988 pr_err("%s: allocation table not initialized\n", __func__);
989 return;
990 }
991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992 /* reset SMSM entry */
993 if (smsm_info.state) {
994 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
995
Eric Holmberg351a63c2011-12-02 17:49:43 -0700996 /* restart SMSM init handshake */
997 if (restart_pid == SMSM_MODEM) {
998 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700999 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
1000 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -07001001 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001002
1003 /* notify SMSM processors */
1004 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -07001005 notify_modem_smsm();
1006 notify_dsp_smsm();
1007 notify_dsps_smsm();
1008 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001009 }
1010
1011 /* change all remote states to CLOSING */
1012 mutex_lock(&smd_probe_lock);
1013 spin_lock_irqsave(&smd_lock, flags);
1014 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
1015 spin_unlock_irqrestore(&smd_lock, flags);
1016 mutex_unlock(&smd_probe_lock);
1017
1018 /* notify SMD processors */
1019 mb();
1020 smd_fake_irq_handler(0);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301021 notify_modem_smd(NULL);
1022 notify_dsp_smd(NULL);
1023 notify_dsps_smd(NULL);
1024 notify_wcnss_smd(NULL);
1025 notify_rpm_smd(NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026
1027 /* change all remote states to CLOSED */
1028 mutex_lock(&smd_probe_lock);
1029 spin_lock_irqsave(&smd_lock, flags);
1030 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
1031 spin_unlock_irqrestore(&smd_lock, flags);
1032 mutex_unlock(&smd_probe_lock);
1033
1034 /* notify SMD processors */
1035 mb();
1036 smd_fake_irq_handler(0);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301037 notify_modem_smd(NULL);
1038 notify_dsp_smd(NULL);
1039 notify_dsps_smd(NULL);
1040 notify_wcnss_smd(NULL);
1041 notify_rpm_smd(NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301043 SMx_POWER_INFO("%s: finished reset\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001044}
1045
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001046/* how many bytes are available for reading */
1047static int smd_stream_read_avail(struct smd_channel *ch)
1048{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001049 return (ch->half_ch->get_head(ch->recv) -
1050 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001051}
1052
1053/* how many bytes we are free to write */
1054static int smd_stream_write_avail(struct smd_channel *ch)
1055{
Eric Holmberg424d9552013-04-05 15:23:25 -06001056 int bytes_avail;
1057
1058 bytes_avail = ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
1059 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask) + 1;
1060
1061 if (bytes_avail < SMD_FIFO_FULL_RESERVE)
1062 bytes_avail = 0;
1063 else
1064 bytes_avail -= SMD_FIFO_FULL_RESERVE;
1065 return bytes_avail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001066}
1067
1068static int smd_packet_read_avail(struct smd_channel *ch)
1069{
1070 if (ch->current_packet) {
1071 int n = smd_stream_read_avail(ch);
1072 if (n > ch->current_packet)
1073 n = ch->current_packet;
1074 return n;
1075 } else {
1076 return 0;
1077 }
1078}
1079
1080static int smd_packet_write_avail(struct smd_channel *ch)
1081{
1082 int n = smd_stream_write_avail(ch);
1083 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1084}
1085
1086static int ch_is_open(struct smd_channel *ch)
1087{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001088 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1089 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1090 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001091}
1092
1093/* provide a pointer and length to readable data in the fifo */
1094static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1095{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001096 unsigned head = ch->half_ch->get_head(ch->recv);
1097 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001098 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001099
1100 if (tail <= head)
1101 return head - tail;
1102 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001103 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001104}
1105
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106static int read_intr_blocked(struct smd_channel *ch)
1107{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001108 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001109}
1110
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001111/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1112static void ch_read_done(struct smd_channel *ch, unsigned count)
1113{
1114 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001115 ch->half_ch->set_tail(ch->recv,
1116 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001117 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001118 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001119}
1120
1121/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001122 * by smd_*_read() and update_packet_state()
1123 * will read-and-discard if the _data pointer is null
1124 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001125static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001126{
1127 void *ptr;
1128 unsigned n;
1129 unsigned char *data = _data;
1130 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001132
1133 while (len > 0) {
1134 n = ch_read_buffer(ch, &ptr);
1135 if (n == 0)
1136 break;
1137
1138 if (n > len)
1139 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001140 if (_data) {
1141 if (user_buf) {
1142 r = copy_to_user(data, ptr, n);
1143 if (r > 0) {
1144 pr_err("%s: "
1145 "copy_to_user could not copy "
1146 "%i bytes.\n",
1147 __func__,
1148 r);
1149 }
1150 } else
1151 memcpy(data, ptr, n);
1152 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001153
1154 data += n;
1155 len -= n;
1156 ch_read_done(ch, n);
1157 }
1158
1159 return orig_len - len;
1160}
1161
1162static void update_stream_state(struct smd_channel *ch)
1163{
1164 /* streams have no special state requiring updating */
1165}
1166
1167static void update_packet_state(struct smd_channel *ch)
1168{
1169 unsigned hdr[5];
1170 int r;
1171
1172 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001173 while (ch->current_packet == 0) {
1174 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001176 /* don't bother unless we can get the full header */
1177 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1178 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001180 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1181 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001182
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001183 ch->current_packet = hdr[0];
1184 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001185}
1186
Eric Holmberg424d9552013-04-05 15:23:25 -06001187/**
1188 * ch_write_buffer() - Provide a pointer and length for the next segment of
1189 * free space in the FIFO.
1190 * @ch: channel
1191 * @ptr: Address to pointer for the next segment write
1192 * @returns: Maximum size that can be written until the FIFO is either full
1193 * or the end of the FIFO has been reached.
1194 *
1195 * The returned pointer and length are passed to memcpy, so the next segment is
1196 * defined as either the space available between the read index (tail) and the
1197 * write index (head) or the space available to the end of the FIFO.
1198 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001199static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1200{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001201 unsigned head = ch->half_ch->get_head(ch->send);
1202 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001203 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001204
1205 if (head < tail) {
Eric Holmberg424d9552013-04-05 15:23:25 -06001206 return tail - head - SMD_FIFO_FULL_RESERVE;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001207 } else {
Eric Holmberg424d9552013-04-05 15:23:25 -06001208 if (tail < SMD_FIFO_FULL_RESERVE)
1209 return ch->fifo_size + tail - head
1210 - SMD_FIFO_FULL_RESERVE;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001211 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001212 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001213 }
1214}
1215
1216/* advace the fifo write pointer after freespace
1217 * from ch_write_buffer is filled
1218 */
1219static void ch_write_done(struct smd_channel *ch, unsigned count)
1220{
1221 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001222 ch->half_ch->set_head(ch->send,
1223 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001225 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001226}
1227
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001228static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001229{
1230 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001231 ch->half_ch->set_fDSR(ch->send, 1);
1232 ch->half_ch->set_fCTS(ch->send, 1);
1233 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001234 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001235 ch->half_ch->set_fDSR(ch->send, 0);
1236 ch->half_ch->set_fCTS(ch->send, 0);
1237 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001238 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001239 ch->half_ch->set_state(ch->send, n);
1240 ch->half_ch->set_fSTATE(ch->send, 1);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301241 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001242}
1243
1244static void do_smd_probe(void)
1245{
1246 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1247 if (shared->heap_info.free_offset != last_heap_free) {
1248 last_heap_free = shared->heap_info.free_offset;
1249 schedule_work(&probe_work);
1250 }
1251}
1252
1253static void smd_state_change(struct smd_channel *ch,
1254 unsigned last, unsigned next)
1255{
1256 ch->last_state = next;
1257
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001259
1260 switch (next) {
1261 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001262 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1263 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1264 ch->half_ch->set_tail(ch->recv, 0);
1265 ch->half_ch->set_head(ch->send, 0);
1266 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267 ch_set_state(ch, SMD_SS_OPENING);
1268 }
1269 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001270 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001271 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001272 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273 ch->notify(ch->priv, SMD_EVENT_OPEN);
1274 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001275 break;
1276 case SMD_SS_FLUSHING:
1277 case SMD_SS_RESET:
1278 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279 break;
1280 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001281 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001282 ch_set_state(ch, SMD_SS_CLOSING);
1283 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001284 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001285 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1286 }
1287 break;
1288 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001289 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001290 list_move(&ch->ch_list,
1291 &smd_ch_to_close_list);
1292 queue_work(channel_close_wq,
1293 &finalize_channel_close_work);
1294 }
1295 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001296 }
1297}
1298
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001299static void handle_smd_irq_closing_list(void)
1300{
1301 unsigned long flags;
1302 struct smd_channel *ch;
1303 struct smd_channel *index;
1304 unsigned tmp;
1305
1306 spin_lock_irqsave(&smd_lock, flags);
1307 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001308 if (ch->half_ch->get_fSTATE(ch->recv))
1309 ch->half_ch->set_fSTATE(ch->recv, 0);
1310 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001311 if (tmp != ch->last_state)
1312 smd_state_change(ch, ch->last_state, tmp);
1313 }
1314 spin_unlock_irqrestore(&smd_lock, flags);
1315}
1316
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301317static void handle_smd_irq(struct list_head *list,
1318 void (*notify)(smd_channel_t *ch))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001319{
1320 unsigned long flags;
1321 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001322 unsigned ch_flags;
1323 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001324 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001325
1326 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001327 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001328 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001329 ch_flags = 0;
1330 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001331 if (ch->half_ch->get_fHEAD(ch->recv)) {
1332 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001333 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001334 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001335 if (ch->half_ch->get_fTAIL(ch->recv)) {
1336 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001337 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001338 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001339 if (ch->half_ch->get_fSTATE(ch->recv)) {
1340 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001341 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001342 }
1343 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001344 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001345 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001346 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1347 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001348 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001349 state_change = 1;
1350 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001351 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001352 ch->update_state(ch);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301353 SMx_POWER_INFO(
1354 "SMD ch%d '%s' Data event 0x%x tx%d/rx%d %dr/%dw : %dr/%dw\n",
1355 ch->n, ch->name,
1356 ch_flags,
1357 ch->fifo_size -
1358 (smd_stream_write_avail(ch) + 1),
1359 smd_stream_read_avail(ch),
1360 ch->half_ch->get_tail(ch->send),
1361 ch->half_ch->get_head(ch->send),
1362 ch->half_ch->get_tail(ch->recv),
1363 ch->half_ch->get_head(ch->recv)
1364 );
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001365 ch->notify(ch->priv, SMD_EVENT_DATA);
1366 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001367 if (ch_flags & 0x4 && !state_change) {
1368 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1369 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001370 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001371 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001372 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001373 spin_unlock_irqrestore(&smd_lock, flags);
1374 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001375}
1376
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301377static inline void log_irq(uint32_t subsystem)
1378{
1379 const char *subsys = smd_edge_to_subsystem(subsystem);
1380
Jay Chokshi83b4f6132013-02-14 16:20:56 -08001381 (void) subsys;
1382
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301383 SMx_POWER_INFO("SMD Int %s->Apps\n", subsys);
1384}
1385
Brian Swetland37521a32009-07-01 18:30:47 -07001386static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001387{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301388 log_irq(SMD_APPS_MODEM);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001389 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001390 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001391 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001392 return IRQ_HANDLED;
1393}
1394
1395static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1396{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301397 log_irq(SMD_APPS_QDSP);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001398 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001399 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001400 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001401 return IRQ_HANDLED;
1402}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001403
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001404static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1405{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301406 log_irq(SMD_APPS_DSPS);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001407 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001408 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1409 handle_smd_irq_closing_list();
1410 return IRQ_HANDLED;
1411}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001412
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001413static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1414{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301415 log_irq(SMD_APPS_WCNSS);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001416 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001417 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1418 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001419 return IRQ_HANDLED;
1420}
1421
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001422static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1423{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301424 log_irq(SMD_APPS_RPM);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001425 ++interrupt_stats[SMD_RPM].smd_in_count;
1426 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1427 handle_smd_irq_closing_list();
1428 return IRQ_HANDLED;
1429}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001430
1431static void smd_fake_irq_handler(unsigned long arg)
1432{
Brian Swetland37521a32009-07-01 18:30:47 -07001433 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1434 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001435 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1436 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001437 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001438 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001439}
1440
1441static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1442
Brian Swetland37521a32009-07-01 18:30:47 -07001443static inline int smd_need_int(struct smd_channel *ch)
1444{
1445 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001446 if (ch->half_ch->get_fHEAD(ch->recv) ||
1447 ch->half_ch->get_fTAIL(ch->recv) ||
1448 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001449 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001450 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001451 return 1;
1452 }
1453 return 0;
1454}
1455
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001456void smd_sleep_exit(void)
1457{
1458 unsigned long flags;
1459 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001460 int need_int = 0;
1461
1462 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001463 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1464 if (smd_need_int(ch)) {
1465 need_int = 1;
1466 break;
1467 }
1468 }
1469 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1470 if (smd_need_int(ch)) {
1471 need_int = 1;
1472 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001473 }
1474 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001475 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1476 if (smd_need_int(ch)) {
1477 need_int = 1;
1478 break;
1479 }
1480 }
1481 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1482 if (smd_need_int(ch)) {
1483 need_int = 1;
1484 break;
1485 }
1486 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001487 spin_unlock_irqrestore(&smd_lock, flags);
1488 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001489
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001490 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001491 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001492 tasklet_schedule(&smd_fake_irq_tasklet);
1493 }
1494}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001495EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001496
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001497static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001498{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001499 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1500 return 0;
1501 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001502 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001503
1504 /* for cases where xfer type is 0 */
1505 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001506 return 0;
1507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001508 /* for cases where xfer type is 0 */
1509 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1510 return 0;
1511
1512 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001513 return 1;
1514 else
1515 return 0;
1516}
1517
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001518static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1519 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001520{
1521 void *ptr;
1522 const unsigned char *buf = _data;
1523 unsigned xfer;
1524 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001525 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001526
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001527 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001528 if (len < 0)
1529 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001530 else if (len == 0)
1531 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001532
1533 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001534 if (!ch_is_open(ch)) {
1535 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001536 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001537 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001538 if (xfer > len)
1539 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001540 if (user_buf) {
1541 r = copy_from_user(ptr, buf, xfer);
1542 if (r > 0) {
1543 pr_err("%s: "
1544 "copy_from_user could not copy %i "
1545 "bytes.\n",
1546 __func__,
1547 r);
1548 }
1549 } else
1550 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001551 ch_write_done(ch, xfer);
1552 len -= xfer;
1553 buf += xfer;
1554 if (len == 0)
1555 break;
1556 }
1557
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558 if (orig_len - len)
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301559 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001560
1561 return orig_len - len;
1562}
1563
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001564static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1565 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001566{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001567 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001568 unsigned hdr[5];
1569
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001570 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001571 if (len < 0)
1572 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001573 else if (len == 0)
1574 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001575
1576 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1577 return -ENOMEM;
1578
1579 hdr[0] = len;
1580 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1581
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001582
1583 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1584 if (ret < 0 || ret != sizeof(hdr)) {
1585 SMD_DBG("%s failed to write pkt header: "
1586 "%d returned\n", __func__, ret);
1587 return -1;
1588 }
1589
1590
1591 ret = smd_stream_write(ch, _data, len, user_buf);
1592 if (ret < 0 || ret != len) {
1593 SMD_DBG("%s failed to write pkt data: "
1594 "%d returned\n", __func__, ret);
1595 return ret;
1596 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001597
1598 return len;
1599}
1600
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001601static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001602{
1603 int r;
1604
1605 if (len < 0)
1606 return -EINVAL;
1607
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001608 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001609 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001610 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301611 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001612
1613 return r;
1614}
1615
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001616static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001617{
1618 unsigned long flags;
1619 int r;
1620
1621 if (len < 0)
1622 return -EINVAL;
1623
1624 if (len > ch->current_packet)
1625 len = ch->current_packet;
1626
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001627 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001628 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001629 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301630 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001631
1632 spin_lock_irqsave(&smd_lock, flags);
1633 ch->current_packet -= r;
1634 update_packet_state(ch);
1635 spin_unlock_irqrestore(&smd_lock, flags);
1636
1637 return r;
1638}
1639
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001640static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1641 int user_buf)
1642{
1643 int r;
1644
1645 if (len < 0)
1646 return -EINVAL;
1647
1648 if (len > ch->current_packet)
1649 len = ch->current_packet;
1650
1651 r = ch_read(ch, data, len, user_buf);
1652 if (r > 0)
1653 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301654 ch->notify_other_cpu(ch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001655
1656 ch->current_packet -= r;
1657 update_packet_state(ch);
1658
1659 return r;
1660}
1661
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301662#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001663static int smd_alloc_v2(struct smd_channel *ch)
1664{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001665 void *buffer;
1666 unsigned buffer_sz;
1667
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001668 if (is_word_access_ch(ch->type)) {
1669 struct smd_shared_v2_word_access *shared2;
1670 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1671 sizeof(*shared2));
1672 if (!shared2) {
1673 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1674 return -EINVAL;
1675 }
1676 ch->send = &shared2->ch0;
1677 ch->recv = &shared2->ch1;
1678 } else {
1679 struct smd_shared_v2 *shared2;
1680 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1681 sizeof(*shared2));
1682 if (!shared2) {
1683 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1684 return -EINVAL;
1685 }
1686 ch->send = &shared2->ch0;
1687 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001688 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001689 ch->half_ch = get_half_ch_funcs(ch->type);
1690
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001691 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1692 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301693 SMD_INFO("smem_get_entry failed\n");
1694 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001695 }
1696
1697 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301698 if (buffer_sz & (buffer_sz - 1)) {
1699 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1700 return -EINVAL;
1701 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001702 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001703 ch->send_data = buffer;
1704 ch->recv_data = buffer + buffer_sz;
1705 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001706
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001707 return 0;
1708}
1709
1710static int smd_alloc_v1(struct smd_channel *ch)
1711{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301712 return -EINVAL;
1713}
1714
1715#else /* define v1 for older targets */
1716static int smd_alloc_v2(struct smd_channel *ch)
1717{
1718 return -EINVAL;
1719}
1720
1721static int smd_alloc_v1(struct smd_channel *ch)
1722{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001723 struct smd_shared_v1 *shared1;
1724 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1725 if (!shared1) {
1726 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301727 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001728 }
1729 ch->send = &shared1->ch0;
1730 ch->recv = &shared1->ch1;
1731 ch->send_data = shared1->data0;
1732 ch->recv_data = shared1->data1;
1733 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001734 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001735 return 0;
1736}
1737
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301738#endif
1739
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001740static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001741{
1742 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001743
1744 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1745 if (ch == 0) {
1746 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001747 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001748 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001749 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001750 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001751
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001752 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001753 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001754 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001755 }
1756
1757 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001758
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001759 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001760 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001761 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001762 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001763 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001764 else if (ch->type == SMD_APPS_DSPS)
1765 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001766 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001767 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001768 else if (ch->type == SMD_APPS_RPM)
1769 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001770
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001771 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001772 ch->read = smd_packet_read;
1773 ch->write = smd_packet_write;
1774 ch->read_avail = smd_packet_read_avail;
1775 ch->write_avail = smd_packet_write_avail;
1776 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001777 ch->read_from_cb = smd_packet_read_from_cb;
1778 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001779 } else {
1780 ch->read = smd_stream_read;
1781 ch->write = smd_stream_write;
1782 ch->read_avail = smd_stream_read_avail;
1783 ch->write_avail = smd_stream_write_avail;
1784 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001785 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001786 }
1787
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001788 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1789 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001790
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001791 ch->pdev.name = ch->name;
1792 ch->pdev.id = ch->type;
1793
1794 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1795 ch->name, ch->n);
1796
1797 mutex_lock(&smd_creation_mutex);
1798 list_add(&ch->ch_list, &smd_ch_closed_list);
1799 mutex_unlock(&smd_creation_mutex);
1800
1801 platform_device_register(&ch->pdev);
1802 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1803 /* create a platform driver to be used by smd_tty driver
1804 * so that it can access the loopback port
1805 */
1806 loopback_tty_pdev.id = ch->type;
1807 platform_device_register(&loopback_tty_pdev);
1808 }
1809 return 0;
1810}
1811
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301812static inline void notify_loopback_smd(smd_channel_t *ch_notif)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001813{
1814 unsigned long flags;
1815 struct smd_channel *ch;
1816
1817 spin_lock_irqsave(&smd_lock, flags);
1818 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1819 ch->notify(ch->priv, SMD_EVENT_DATA);
1820 }
1821 spin_unlock_irqrestore(&smd_lock, flags);
1822}
1823
1824static int smd_alloc_loopback_channel(void)
1825{
1826 static struct smd_half_channel smd_loopback_ctl;
1827 static char smd_loopback_data[SMD_BUF_SIZE];
1828 struct smd_channel *ch;
1829
1830 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1831 if (ch == 0) {
1832 pr_err("%s: out of memory\n", __func__);
1833 return -1;
1834 }
1835 ch->n = SMD_LOOPBACK_CID;
1836
1837 ch->send = &smd_loopback_ctl;
1838 ch->recv = &smd_loopback_ctl;
1839 ch->send_data = smd_loopback_data;
1840 ch->recv_data = smd_loopback_data;
1841 ch->fifo_size = SMD_BUF_SIZE;
1842
1843 ch->fifo_mask = ch->fifo_size - 1;
1844 ch->type = SMD_LOOPBACK_TYPE;
1845 ch->notify_other_cpu = notify_loopback_smd;
1846
1847 ch->read = smd_stream_read;
1848 ch->write = smd_stream_write;
1849 ch->read_avail = smd_stream_read_avail;
1850 ch->write_avail = smd_stream_write_avail;
1851 ch->update_state = update_stream_state;
1852 ch->read_from_cb = smd_stream_read;
1853
1854 memset(ch->name, 0, 20);
1855 memcpy(ch->name, "local_loopback", 14);
1856
1857 ch->pdev.name = ch->name;
1858 ch->pdev.id = ch->type;
1859
1860 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001861
1862 mutex_lock(&smd_creation_mutex);
1863 list_add(&ch->ch_list, &smd_ch_closed_list);
1864 mutex_unlock(&smd_creation_mutex);
1865
1866 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001867 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001868}
1869
1870static void do_nothing_notify(void *priv, unsigned flags)
1871{
1872}
1873
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001874static void finalize_channel_close_fn(struct work_struct *work)
1875{
1876 unsigned long flags;
1877 struct smd_channel *ch;
1878 struct smd_channel *index;
1879
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001880 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001881 spin_lock_irqsave(&smd_lock, flags);
1882 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1883 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001884 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001885 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1886 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001887 }
1888 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001889 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001890}
1891
1892struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001893{
1894 struct smd_channel *ch;
1895
1896 mutex_lock(&smd_creation_mutex);
1897 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001898 if (!strcmp(name, ch->name) &&
1899 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001900 list_del(&ch->ch_list);
1901 mutex_unlock(&smd_creation_mutex);
1902 return ch;
1903 }
1904 }
1905 mutex_unlock(&smd_creation_mutex);
1906
1907 return NULL;
1908}
1909
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001910int smd_named_open_on_edge(const char *name, uint32_t edge,
1911 smd_channel_t **_ch,
1912 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001913{
1914 struct smd_channel *ch;
1915 unsigned long flags;
1916
1917 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001918 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001919 return -ENODEV;
1920 }
1921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001922 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1923
1924 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001925 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001926 /* check closing list for port */
1927 spin_lock_irqsave(&smd_lock, flags);
1928 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1929 if (!strncmp(name, ch->name, 20) &&
1930 (edge == ch->type)) {
1931 /* channel exists, but is being closed */
1932 spin_unlock_irqrestore(&smd_lock, flags);
1933 return -EAGAIN;
1934 }
1935 }
1936
1937 /* check closing workqueue list for port */
1938 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1939 if (!strncmp(name, ch->name, 20) &&
1940 (edge == ch->type)) {
1941 /* channel exists, but is being closed */
1942 spin_unlock_irqrestore(&smd_lock, flags);
1943 return -EAGAIN;
1944 }
1945 }
1946 spin_unlock_irqrestore(&smd_lock, flags);
1947
1948 /* one final check to handle closing->closed race condition */
1949 ch = smd_get_channel(name, edge);
1950 if (!ch)
1951 return -ENODEV;
1952 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001953
1954 if (notify == 0)
1955 notify = do_nothing_notify;
1956
1957 ch->notify = notify;
1958 ch->current_packet = 0;
1959 ch->last_state = SMD_SS_CLOSED;
1960 ch->priv = priv;
1961
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001962 if (edge == SMD_LOOPBACK_TYPE) {
1963 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001964 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1965 ch->half_ch->set_fDSR(ch->send, 1);
1966 ch->half_ch->set_fCTS(ch->send, 1);
1967 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001968 }
1969
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001970 *_ch = ch;
1971
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001972 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1973
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001974 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001975 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001976 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001977 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001978 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001979 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1980 list_add(&ch->ch_list, &smd_ch_list_dsps);
1981 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1982 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001983 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1984 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001985 else
1986 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001987
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001988 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1989
1990 if (edge != SMD_LOOPBACK_TYPE)
1991 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1992
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001993 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001994
1995 return 0;
1996}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001997EXPORT_SYMBOL(smd_named_open_on_edge);
1998
1999
2000int smd_open(const char *name, smd_channel_t **_ch,
2001 void *priv, void (*notify)(void *, unsigned))
2002{
2003 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
2004 notify);
2005}
2006EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002007
2008int smd_close(smd_channel_t *ch)
2009{
2010 unsigned long flags;
2011
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002012 if (ch == 0)
2013 return -1;
2014
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002015 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002016
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002017 spin_lock_irqsave(&smd_lock, flags);
2018 list_del(&ch->ch_list);
2019 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002020 ch->half_ch->set_fDSR(ch->send, 0);
2021 ch->half_ch->set_fCTS(ch->send, 0);
2022 ch->half_ch->set_fCD(ch->send, 0);
2023 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002024 } else
2025 ch_set_state(ch, SMD_SS_CLOSED);
2026
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002027 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002028 list_add(&ch->ch_list, &smd_ch_closing_list);
2029 spin_unlock_irqrestore(&smd_lock, flags);
2030 } else {
2031 spin_unlock_irqrestore(&smd_lock, flags);
2032 ch->notify = do_nothing_notify;
2033 mutex_lock(&smd_creation_mutex);
2034 list_add(&ch->ch_list, &smd_ch_closed_list);
2035 mutex_unlock(&smd_creation_mutex);
2036 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002037
2038 return 0;
2039}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002040EXPORT_SYMBOL(smd_close);
2041
2042int smd_write_start(smd_channel_t *ch, int len)
2043{
2044 int ret;
2045 unsigned hdr[5];
2046
2047 if (!ch) {
2048 pr_err("%s: Invalid channel specified\n", __func__);
2049 return -ENODEV;
2050 }
2051 if (!ch->is_pkt_ch) {
2052 pr_err("%s: non-packet channel specified\n", __func__);
2053 return -EACCES;
2054 }
2055 if (len < 1) {
2056 pr_err("%s: invalid length: %d\n", __func__, len);
2057 return -EINVAL;
2058 }
2059
2060 if (ch->pending_pkt_sz) {
2061 pr_err("%s: packet of size: %d in progress\n", __func__,
2062 ch->pending_pkt_sz);
2063 return -EBUSY;
2064 }
2065 ch->pending_pkt_sz = len;
2066
2067 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
2068 ch->pending_pkt_sz = 0;
2069 SMD_DBG("%s: no space to write packet header\n", __func__);
2070 return -EAGAIN;
2071 }
2072
2073 hdr[0] = len;
2074 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
2075
2076
2077 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
2078 if (ret < 0 || ret != sizeof(hdr)) {
2079 ch->pending_pkt_sz = 0;
2080 pr_err("%s: packet header failed to write\n", __func__);
2081 return -EPERM;
2082 }
2083 return 0;
2084}
2085EXPORT_SYMBOL(smd_write_start);
2086
2087int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
2088{
2089 int bytes_written;
2090
2091 if (!ch) {
2092 pr_err("%s: Invalid channel specified\n", __func__);
2093 return -ENODEV;
2094 }
2095 if (len < 1) {
2096 pr_err("%s: invalid length: %d\n", __func__, len);
2097 return -EINVAL;
2098 }
2099
2100 if (!ch->pending_pkt_sz) {
2101 pr_err("%s: no transaction in progress\n", __func__);
2102 return -ENOEXEC;
2103 }
2104 if (ch->pending_pkt_sz - len < 0) {
2105 pr_err("%s: segment of size: %d will make packet go over "
2106 "length\n", __func__, len);
2107 return -EINVAL;
2108 }
2109
2110 bytes_written = smd_stream_write(ch, data, len, user_buf);
2111
2112 ch->pending_pkt_sz -= bytes_written;
2113
2114 return bytes_written;
2115}
2116EXPORT_SYMBOL(smd_write_segment);
2117
2118int smd_write_end(smd_channel_t *ch)
2119{
2120
2121 if (!ch) {
2122 pr_err("%s: Invalid channel specified\n", __func__);
2123 return -ENODEV;
2124 }
2125 if (ch->pending_pkt_sz) {
2126 pr_err("%s: current packet not completely written\n", __func__);
2127 return -E2BIG;
2128 }
2129
2130 return 0;
2131}
2132EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002133
Jeff Hugo44fd9832013-04-04 15:56:21 -06002134int smd_write_segment_avail(smd_channel_t *ch)
2135{
2136 int n;
2137
2138 if (!ch) {
2139 pr_err("%s: Invalid channel specified\n", __func__);
2140 return -ENODEV;
2141 }
2142 if (!ch->is_pkt_ch) {
2143 pr_err("%s: non-packet channel specified\n", __func__);
2144 return -ENODEV;
2145 }
2146
2147 n = smd_stream_write_avail(ch);
2148
2149 /* pkt hdr already written, no need to reserve space for it */
2150 if (ch->pending_pkt_sz)
2151 return n;
2152
2153 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
2154}
2155EXPORT_SYMBOL(smd_write_segment_avail);
2156
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002157int smd_read(smd_channel_t *ch, void *data, int len)
2158{
Jack Pham1b236d12012-03-19 15:27:18 -07002159 if (!ch) {
2160 pr_err("%s: Invalid channel specified\n", __func__);
2161 return -ENODEV;
2162 }
2163
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002164 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002165}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002166EXPORT_SYMBOL(smd_read);
2167
2168int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2169{
Jack Pham1b236d12012-03-19 15:27:18 -07002170 if (!ch) {
2171 pr_err("%s: Invalid channel specified\n", __func__);
2172 return -ENODEV;
2173 }
2174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002175 return ch->read(ch, data, len, 1);
2176}
2177EXPORT_SYMBOL(smd_read_user_buffer);
2178
2179int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2180{
Jack Pham1b236d12012-03-19 15:27:18 -07002181 if (!ch) {
2182 pr_err("%s: Invalid channel specified\n", __func__);
2183 return -ENODEV;
2184 }
2185
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002186 return ch->read_from_cb(ch, data, len, 0);
2187}
2188EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002189
2190int smd_write(smd_channel_t *ch, const void *data, int len)
2191{
Jack Pham1b236d12012-03-19 15:27:18 -07002192 if (!ch) {
2193 pr_err("%s: Invalid channel specified\n", __func__);
2194 return -ENODEV;
2195 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002196
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002197 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002198}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002199EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002201int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002202{
Jack Pham1b236d12012-03-19 15:27:18 -07002203 if (!ch) {
2204 pr_err("%s: Invalid channel specified\n", __func__);
2205 return -ENODEV;
2206 }
2207
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002208 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002209}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002210EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002211
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002212int smd_read_avail(smd_channel_t *ch)
2213{
Jack Pham1b236d12012-03-19 15:27:18 -07002214 if (!ch) {
2215 pr_err("%s: Invalid channel specified\n", __func__);
2216 return -ENODEV;
2217 }
2218
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002219 return ch->read_avail(ch);
2220}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002221EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002222
2223int smd_write_avail(smd_channel_t *ch)
2224{
Jack Pham1b236d12012-03-19 15:27:18 -07002225 if (!ch) {
2226 pr_err("%s: Invalid channel specified\n", __func__);
2227 return -ENODEV;
2228 }
2229
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002230 return ch->write_avail(ch);
2231}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002232EXPORT_SYMBOL(smd_write_avail);
2233
2234void smd_enable_read_intr(smd_channel_t *ch)
2235{
2236 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002237 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002238}
2239EXPORT_SYMBOL(smd_enable_read_intr);
2240
2241void smd_disable_read_intr(smd_channel_t *ch)
2242{
2243 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002244 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002245}
2246EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002247
Eric Holmbergdeace152012-07-25 12:17:11 -06002248/**
2249 * Enable/disable receive interrupts for the remote processor used by a
2250 * particular channel.
2251 * @ch: open channel handle to use for the edge
2252 * @mask: 1 = mask interrupts; 0 = unmask interrupts
2253 * @returns: 0 for success; < 0 for failure
2254 *
2255 * Note that this enables/disables all interrupts from the remote subsystem for
2256 * all channels. As such, it should be used with care and only for specific
2257 * use cases such as power-collapse sequencing.
2258 */
2259int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
2260{
2261 struct irq_chip *irq_chip;
2262 struct irq_data *irq_data;
2263 struct interrupt_config_item *int_cfg;
2264
2265 if (!ch)
2266 return -EINVAL;
2267
2268 if (ch->type >= ARRAY_SIZE(edge_to_pids))
2269 return -ENODEV;
2270
2271 int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
2272
2273 if (int_cfg->irq_id < 0)
2274 return -ENODEV;
2275
2276 irq_chip = irq_get_chip(int_cfg->irq_id);
2277 if (!irq_chip)
2278 return -ENODEV;
2279
2280 irq_data = irq_get_irq_data(int_cfg->irq_id);
2281 if (!irq_data)
2282 return -ENODEV;
2283
2284 if (mask) {
2285 SMx_POWER_INFO("SMD Masking interrupts from %s\n",
2286 edge_to_pids[ch->type].subsys_name);
2287 irq_chip->irq_mask(irq_data);
2288 } else {
2289 SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
2290 edge_to_pids[ch->type].subsys_name);
2291 irq_chip->irq_unmask(irq_data);
2292 }
2293
2294 return 0;
2295}
2296EXPORT_SYMBOL(smd_mask_receive_interrupt);
2297
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002298int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2299{
2300 return -1;
2301}
2302
2303int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2304{
2305 return -1;
2306}
2307
2308int smd_cur_packet_size(smd_channel_t *ch)
2309{
Jack Pham1b236d12012-03-19 15:27:18 -07002310 if (!ch) {
2311 pr_err("%s: Invalid channel specified\n", __func__);
2312 return -ENODEV;
2313 }
2314
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002315 return ch->current_packet;
2316}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002317EXPORT_SYMBOL(smd_cur_packet_size);
2318
2319int smd_tiocmget(smd_channel_t *ch)
2320{
Jack Pham1b236d12012-03-19 15:27:18 -07002321 if (!ch) {
2322 pr_err("%s: Invalid channel specified\n", __func__);
2323 return -ENODEV;
2324 }
2325
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002326 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2327 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2328 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2329 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2330 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2331 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002332}
2333EXPORT_SYMBOL(smd_tiocmget);
2334
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002335/* this api will be called while holding smd_lock */
2336int
2337smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002338{
Jack Pham1b236d12012-03-19 15:27:18 -07002339 if (!ch) {
2340 pr_err("%s: Invalid channel specified\n", __func__);
2341 return -ENODEV;
2342 }
2343
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002344 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002345 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002346
2347 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002348 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002349
2350 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002351 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002352
2353 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002354 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002355
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002356 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002357 barrier();
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05302358 ch->notify_other_cpu(ch);
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002359
2360 return 0;
2361}
2362EXPORT_SYMBOL(smd_tiocmset_from_cb);
2363
2364int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2365{
2366 unsigned long flags;
2367
Jack Pham1b236d12012-03-19 15:27:18 -07002368 if (!ch) {
2369 pr_err("%s: Invalid channel specified\n", __func__);
2370 return -ENODEV;
2371 }
2372
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002373 spin_lock_irqsave(&smd_lock, flags);
2374 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002375 spin_unlock_irqrestore(&smd_lock, flags);
2376
2377 return 0;
2378}
2379EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002380
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002381int smd_is_pkt_avail(smd_channel_t *ch)
2382{
Jeff Hugoa8549f12012-08-13 20:36:18 -06002383 unsigned long flags;
2384
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002385 if (!ch || !ch->is_pkt_ch)
2386 return -EINVAL;
2387
2388 if (ch->current_packet)
2389 return 1;
2390
Jeff Hugoa8549f12012-08-13 20:36:18 -06002391 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002392 update_packet_state(ch);
Jeff Hugoa8549f12012-08-13 20:36:18 -06002393 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002394
2395 return ch->current_packet ? 1 : 0;
2396}
2397EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002398
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002399static int smsm_cb_init(void)
2400{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002401 struct smsm_state_info *state_info;
2402 int n;
2403 int ret = 0;
2404
2405 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2406 GFP_KERNEL);
2407
2408 if (!smsm_states) {
2409 pr_err("%s: SMSM init failed\n", __func__);
2410 return -ENOMEM;
2411 }
2412
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002413 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2414 if (!smsm_cb_wq) {
2415 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2416 kfree(smsm_states);
2417 return -EFAULT;
2418 }
2419
Eric Holmbergc8002902011-09-16 13:55:57 -06002420 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002421 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2422 state_info = &smsm_states[n];
2423 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002424 state_info->intr_mask_set = 0x0;
2425 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002426 INIT_LIST_HEAD(&state_info->callbacks);
2427 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002428 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002429
2430 return ret;
2431}
2432
2433static int smsm_init(void)
2434{
2435 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2436 int i;
2437 struct smsm_size_info_type *smsm_size_info;
Eric Holmberge5266d32013-02-25 18:29:27 -07002438 unsigned long flags;
2439 unsigned long j_start;
2440
2441 /* Verify that remote spinlock is not deadlocked */
2442 j_start = jiffies;
2443 while (!remote_spin_trylock_irqsave(&remote_spinlock, flags)) {
2444 if (jiffies_to_msecs(jiffies - j_start) > RSPIN_INIT_WAIT_MS) {
2445 panic("%s: Remote processor %d will not release spinlock\n",
2446 __func__, remote_spin_owner(&remote_spinlock));
2447 }
2448 }
2449 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002450
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002451 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2452 sizeof(struct smsm_size_info_type));
2453 if (smsm_size_info) {
2454 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2455 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2456 }
2457
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002458 i = kfifo_alloc(&smsm_snapshot_fifo,
2459 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2460 GFP_KERNEL);
2461 if (i) {
2462 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2463 return i;
2464 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002465 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2466 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002468 if (!smsm_info.state) {
2469 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2470 SMSM_NUM_ENTRIES *
2471 sizeof(uint32_t));
2472
2473 if (smsm_info.state) {
2474 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2475 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2476 __raw_writel(0, \
2477 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2478 }
2479 }
2480
2481 if (!smsm_info.intr_mask) {
2482 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2483 SMSM_NUM_ENTRIES *
2484 SMSM_NUM_HOSTS *
2485 sizeof(uint32_t));
2486
Eric Holmberge8a39322012-04-03 15:14:02 -06002487 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002488 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002489 __raw_writel(0x0,
2490 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2491
2492 /* Configure legacy modem bits */
2493 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2494 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2495 SMSM_APPS));
2496 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002497 }
2498
2499 if (!smsm_info.intr_mux)
2500 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2501 SMSM_NUM_INTR_MUX *
2502 sizeof(uint32_t));
2503
2504 i = smsm_cb_init();
2505 if (i)
2506 return i;
2507
2508 wmb();
Eric Holmberg144c2de2012-10-04 13:37:28 -06002509
2510 smsm_pm_notifier(&smsm_pm_nb, PM_POST_SUSPEND, NULL);
2511 i = register_pm_notifier(&smsm_pm_nb);
2512 if (i)
2513 pr_err("%s: power state notif error %d\n", __func__, i);
2514
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002515 return 0;
2516}
2517
2518void smsm_reset_modem(unsigned mode)
2519{
2520 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2521 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2522 } else if (mode == SMSM_MODEM_WAIT) {
2523 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2524 } else { /* reset_mode is SMSM_RESET or default */
2525 mode = SMSM_RESET;
2526 }
2527
2528 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2529}
2530EXPORT_SYMBOL(smsm_reset_modem);
2531
2532void smsm_reset_modem_cont(void)
2533{
2534 unsigned long flags;
2535 uint32_t state;
2536
2537 if (!smsm_info.state)
2538 return;
2539
2540 spin_lock_irqsave(&smem_lock, flags);
2541 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2542 & ~SMSM_MODEM_WAIT;
2543 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2544 wmb();
2545 spin_unlock_irqrestore(&smem_lock, flags);
2546}
2547EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002548
Eric Holmbergda31d042012-03-28 14:01:02 -06002549static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002550{
2551 int n;
2552 uint32_t new_state;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002553 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002554 int ret;
2555
2556 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002557 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002558 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2559 return;
2560 }
2561
Eric Holmberg96b55f62012-04-03 19:10:46 -06002562 /*
2563 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2564 * following sequence must be followed:
2565 * 1) increment snapshot count
2566 * 2) insert data into FIFO
2567 *
2568 * Potentially in parallel, the worker:
2569 * a) verifies >= 1 snapshots are in FIFO
2570 * b) processes snapshot
2571 * c) decrements reference count
2572 *
2573 * This order ensures that 1 will always occur before abc.
2574 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002575 if (use_wakelock) {
2576 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2577 if (smsm_snapshot_count == 0) {
2578 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2579 wake_lock(&smsm_snapshot_wakelock);
2580 }
2581 ++smsm_snapshot_count;
2582 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2583 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002584
2585 /* queue state entries */
2586 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2587 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2588
2589 ret = kfifo_in(&smsm_snapshot_fifo,
2590 &new_state, sizeof(new_state));
2591 if (ret != sizeof(new_state)) {
2592 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2593 goto restore_snapshot_count;
2594 }
2595 }
2596
2597 /* queue wakelock usage flag */
2598 ret = kfifo_in(&smsm_snapshot_fifo,
2599 &use_wakelock, sizeof(use_wakelock));
2600 if (ret != sizeof(use_wakelock)) {
2601 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2602 goto restore_snapshot_count;
2603 }
2604
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002605 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002606 return;
2607
2608restore_snapshot_count:
2609 if (use_wakelock) {
2610 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2611 if (smsm_snapshot_count) {
2612 --smsm_snapshot_count;
2613 if (smsm_snapshot_count == 0) {
2614 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2615 wake_unlock(&smsm_snapshot_wakelock);
2616 }
2617 } else {
2618 pr_err("%s: invalid snapshot count\n", __func__);
2619 }
2620 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2621 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002622}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002623
2624static irqreturn_t smsm_irq_handler(int irq, void *data)
2625{
2626 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002627
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002628 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002629 uint32_t mux_val;
2630 static uint32_t prev_smem_q6_apps_smsm;
2631
2632 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2633 mux_val = __raw_readl(
2634 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2635 if (mux_val != prev_smem_q6_apps_smsm)
2636 prev_smem_q6_apps_smsm = mux_val;
2637 }
2638
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002639 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002640 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002641 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002642 return IRQ_HANDLED;
2643 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002644
2645 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002646 if (!smsm_info.state) {
2647 SMSM_INFO("<SM NO STATE>\n");
2648 } else {
2649 unsigned old_apps, apps;
2650 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002651
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002652 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002653
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002654 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2655 if (apps & SMSM_RESET) {
2656 /* If we get an interrupt and the apps SMSM_RESET
2657 bit is already set, the modem is acking the
2658 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002659 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302660 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002661 /* Issue a fake irq to handle any
2662 * smd state changes during reset
2663 */
2664 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002665
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002666 /* queue modem restart notify chain */
2667 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002668
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002669 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002670 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302671 if (!disable_smsm_reset_handshake) {
2672 apps |= SMSM_RESET;
2673 flush_cache_all();
2674 outer_flush_all();
2675 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002676 modem_queue_start_reset_notify();
2677
2678 } else if (modm & SMSM_INIT) {
2679 if (!(apps & SMSM_INIT)) {
2680 apps |= SMSM_INIT;
2681 modem_queue_smsm_init_notify();
2682 }
2683
2684 if (modm & SMSM_SMDINIT)
2685 apps |= SMSM_SMDINIT;
2686 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2687 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2688 apps |= SMSM_RUN;
2689 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2690 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2691 modem_queue_start_reset_notify();
2692 }
2693
2694 if (old_apps != apps) {
2695 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2696 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2697 do_smd_probe();
2698 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2699 }
2700
Eric Holmbergda31d042012-03-28 14:01:02 -06002701 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002702 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002703 spin_unlock_irqrestore(&smem_lock, flags);
2704 return IRQ_HANDLED;
2705}
2706
Eric Holmberg98c6c642012-02-24 11:29:35 -07002707static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002708{
Eric Holmberg98c6c642012-02-24 11:29:35 -07002709 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002710 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002711 return smsm_irq_handler(irq, data);
2712}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002713
Eric Holmberg98c6c642012-02-24 11:29:35 -07002714static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2715{
2716 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002717 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002718 return smsm_irq_handler(irq, data);
2719}
2720
2721static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2722{
2723 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002724 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002725 return smsm_irq_handler(irq, data);
2726}
2727
2728static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2729{
2730 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002731 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002732 return smsm_irq_handler(irq, data);
2733}
2734
Eric Holmberge8a39322012-04-03 15:14:02 -06002735/*
2736 * Changes the global interrupt mask. The set and clear masks are re-applied
2737 * every time the global interrupt mask is updated for callback registration
2738 * and de-registration.
2739 *
2740 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2741 * mask and the set mask, the result will be that the interrupt is set.
2742 *
2743 * @smsm_entry SMSM entry to change
2744 * @clear_mask 1 = clear bit, 0 = no-op
2745 * @set_mask 1 = set bit, 0 = no-op
2746 *
2747 * @returns 0 for success, < 0 for error
2748 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002749int smsm_change_intr_mask(uint32_t smsm_entry,
2750 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002751{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002752 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002753 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002754
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002755 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2756 pr_err("smsm_change_state: Invalid entry %d\n",
2757 smsm_entry);
2758 return -EINVAL;
2759 }
2760
2761 if (!smsm_info.intr_mask) {
2762 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002763 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002764 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002765
2766 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002767 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2768 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002769
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002770 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2771 new_mask = (old_mask & ~clear_mask) | set_mask;
2772 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002773
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002774 wmb();
2775 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002777 return 0;
2778}
2779EXPORT_SYMBOL(smsm_change_intr_mask);
2780
2781int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2782{
2783 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2784 pr_err("smsm_change_state: Invalid entry %d\n",
2785 smsm_entry);
2786 return -EINVAL;
2787 }
2788
2789 if (!smsm_info.intr_mask) {
2790 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2791 return -EIO;
2792 }
2793
2794 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2795 return 0;
2796}
2797EXPORT_SYMBOL(smsm_get_intr_mask);
2798
2799int smsm_change_state(uint32_t smsm_entry,
2800 uint32_t clear_mask, uint32_t set_mask)
2801{
2802 unsigned long flags;
2803 uint32_t old_state, new_state;
2804
2805 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2806 pr_err("smsm_change_state: Invalid entry %d",
2807 smsm_entry);
2808 return -EINVAL;
2809 }
2810
2811 if (!smsm_info.state) {
2812 pr_err("smsm_change_state <SM NO STATE>\n");
2813 return -EIO;
2814 }
2815 spin_lock_irqsave(&smem_lock, flags);
2816
2817 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2818 new_state = (old_state & ~clear_mask) | set_mask;
2819 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2820 SMSM_DBG("smsm_change_state %x\n", new_state);
2821 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002822
2823 spin_unlock_irqrestore(&smem_lock, flags);
2824
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002825 return 0;
2826}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002827EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002828
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002829uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002830{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002831 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002832
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002833 /* needs interface change to return error code */
2834 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2835 pr_err("smsm_change_state: Invalid entry %d",
2836 smsm_entry);
2837 return 0;
2838 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002839
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002840 if (!smsm_info.state) {
2841 pr_err("smsm_get_state <SM NO STATE>\n");
2842 } else {
2843 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2844 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002845
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002846 return rv;
2847}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002848EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002849
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002850/**
2851 * Performs SMSM callback client notifiction.
2852 */
2853void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002854{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002855 struct smsm_state_cb_info *cb_info;
2856 struct smsm_state_info *state_info;
2857 int n;
2858 uint32_t new_state;
2859 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002860 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002861 int ret;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002862 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002863
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002864 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002865 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002866
Eric Holmbergda31d042012-03-28 14:01:02 -06002867 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002868 mutex_lock(&smsm_lock);
2869 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2870 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002871
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002872 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2873 sizeof(new_state));
2874 if (ret != sizeof(new_state)) {
2875 pr_err("%s: snapshot underflow %d\n",
2876 __func__, ret);
2877 mutex_unlock(&smsm_lock);
2878 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002879 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002880
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002881 state_changes = state_info->last_value ^ new_state;
2882 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002883 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2884 n, state_info->last_value,
2885 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002886 list_for_each_entry(cb_info,
2887 &state_info->callbacks, cb_list) {
2888
2889 if (cb_info->mask & state_changes)
2890 cb_info->notify(cb_info->data,
2891 state_info->last_value,
2892 new_state);
2893 }
2894 state_info->last_value = new_state;
2895 }
2896 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002897
Eric Holmbergda31d042012-03-28 14:01:02 -06002898 /* read wakelock flag */
2899 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2900 sizeof(use_wakelock));
2901 if (ret != sizeof(use_wakelock)) {
2902 pr_err("%s: snapshot underflow %d\n",
2903 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002904 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002905 return;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002906 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002907 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002908
2909 if (use_wakelock) {
2910 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2911 if (smsm_snapshot_count) {
2912 --smsm_snapshot_count;
2913 if (smsm_snapshot_count == 0) {
2914 SMx_POWER_INFO("SMSM snapshot"
2915 " wake unlock\n");
2916 wake_unlock(&smsm_snapshot_wakelock);
2917 }
2918 } else {
2919 pr_err("%s: invalid snapshot count\n",
2920 __func__);
2921 }
2922 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2923 flags);
2924 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002925 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002926}
2927
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002929/**
2930 * Registers callback for SMSM state notifications when the specified
2931 * bits change.
2932 *
2933 * @smsm_entry Processor entry to deregister
2934 * @mask Bits to deregister (if result is 0, callback is removed)
2935 * @notify Notification function to deregister
2936 * @data Opaque data passed in to callback
2937 *
2938 * @returns Status code
2939 * <0 error code
2940 * 0 inserted new entry
2941 * 1 updated mask of existing entry
2942 */
2943int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2944 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002945{
Eric Holmberge8a39322012-04-03 15:14:02 -06002946 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002947 struct smsm_state_cb_info *cb_info;
2948 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06002949 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002950 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002951
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002952 if (smsm_entry >= SMSM_NUM_ENTRIES)
2953 return -EINVAL;
2954
Eric Holmbergc8002902011-09-16 13:55:57 -06002955 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002956
2957 if (!smsm_states) {
2958 /* smsm not yet initialized */
2959 ret = -ENODEV;
2960 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002961 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002962
Eric Holmberge8a39322012-04-03 15:14:02 -06002963 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002964 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06002965 &state->callbacks, cb_list) {
2966 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002967 (cb_info->data == data)) {
2968 cb_info->mask |= mask;
2969 cb_found = cb_info;
2970 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002971 }
Eric Holmberge8a39322012-04-03 15:14:02 -06002972 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002973 }
2974
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002975 if (!cb_found) {
2976 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2977 GFP_ATOMIC);
2978 if (!cb_info) {
2979 ret = -ENOMEM;
2980 goto cleanup;
2981 }
2982
2983 cb_info->mask = mask;
2984 cb_info->notify = notify;
2985 cb_info->data = data;
2986 INIT_LIST_HEAD(&cb_info->cb_list);
2987 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06002988 &state->callbacks);
2989 new_mask |= mask;
2990 }
2991
2992 /* update interrupt notification mask */
2993 if (smsm_entry == SMSM_MODEM_STATE)
2994 new_mask |= LEGACY_MODEM_SMSM_MASK;
2995
2996 if (smsm_info.intr_mask) {
2997 unsigned long flags;
2998
2999 spin_lock_irqsave(&smem_lock, flags);
3000 new_mask = (new_mask & ~state->intr_mask_clear)
3001 | state->intr_mask_set;
3002 __raw_writel(new_mask,
3003 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3004 wmb();
3005 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003006 }
3007
3008cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06003009 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003010 return ret;
3011}
3012EXPORT_SYMBOL(smsm_state_cb_register);
3013
3014
3015/**
3016 * Deregisters for SMSM state notifications for the specified bits.
3017 *
3018 * @smsm_entry Processor entry to deregister
3019 * @mask Bits to deregister (if result is 0, callback is removed)
3020 * @notify Notification function to deregister
3021 * @data Opaque data passed in to callback
3022 *
3023 * @returns Status code
3024 * <0 error code
3025 * 0 not found
3026 * 1 updated mask
3027 * 2 removed callback
3028 */
3029int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
3030 void (*notify)(void *, uint32_t, uint32_t), void *data)
3031{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003032 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06003033 struct smsm_state_cb_info *cb_tmp;
3034 struct smsm_state_info *state;
3035 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003036 int ret = 0;
3037
3038 if (smsm_entry >= SMSM_NUM_ENTRIES)
3039 return -EINVAL;
3040
Eric Holmbergc8002902011-09-16 13:55:57 -06003041 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003042
3043 if (!smsm_states) {
3044 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003045 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003046 return -ENODEV;
3047 }
3048
Eric Holmberge8a39322012-04-03 15:14:02 -06003049 state = &smsm_states[smsm_entry];
3050 list_for_each_entry_safe(cb_info, cb_tmp,
3051 &state->callbacks, cb_list) {
3052 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003053 (cb_info->data == data)) {
3054 cb_info->mask &= ~mask;
3055 ret = 1;
3056 if (!cb_info->mask) {
3057 /* no mask bits set, remove callback */
3058 list_del(&cb_info->cb_list);
3059 kfree(cb_info);
3060 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003061 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003062 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003063 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003064 new_mask |= cb_info->mask;
3065 }
3066
3067 /* update interrupt notification mask */
3068 if (smsm_entry == SMSM_MODEM_STATE)
3069 new_mask |= LEGACY_MODEM_SMSM_MASK;
3070
3071 if (smsm_info.intr_mask) {
3072 unsigned long flags;
3073
3074 spin_lock_irqsave(&smem_lock, flags);
3075 new_mask = (new_mask & ~state->intr_mask_clear)
3076 | state->intr_mask_set;
3077 __raw_writel(new_mask,
3078 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3079 wmb();
3080 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003081 }
3082
Eric Holmbergc8002902011-09-16 13:55:57 -06003083 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003084 return ret;
3085}
3086EXPORT_SYMBOL(smsm_state_cb_deregister);
3087
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003088int smd_module_init_notifier_register(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003089{
3090 int ret;
3091 if (!nb)
3092 return -EINVAL;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003093 mutex_lock(&smd_module_init_notifier_lock);
3094 ret = raw_notifier_chain_register(&smd_module_init_notifier_list, nb);
3095 if (smd_module_inited)
3096 nb->notifier_call(nb, 0, NULL);
3097 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003098 return ret;
3099}
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003100EXPORT_SYMBOL(smd_module_init_notifier_register);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003101
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003102int smd_module_init_notifier_unregister(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003103{
3104 int ret;
3105 if (!nb)
3106 return -EINVAL;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003107 mutex_lock(&smd_module_init_notifier_lock);
3108 ret = raw_notifier_chain_unregister(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003109 nb);
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003110 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003111 return ret;
3112}
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003113EXPORT_SYMBOL(smd_module_init_notifier_unregister);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003114
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003115static void smd_module_init_notify(uint32_t state, void *data)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003116{
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003117 mutex_lock(&smd_module_init_notifier_lock);
3118 smd_module_inited = 1;
3119 raw_notifier_call_chain(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003120 state, data);
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003121 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003122}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003123
3124int smd_core_init(void)
3125{
3126 int r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003127 unsigned long flags = IRQF_TRIGGER_RISING;
3128 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003129
Brian Swetland37521a32009-07-01 18:30:47 -07003130 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003131 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003132 if (r < 0)
3133 return r;
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303134 interrupt_stats[SMD_MODEM].smd_interrupt_id = INT_A9_M2A_0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003135 r = enable_irq_wake(INT_A9_M2A_0);
3136 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003137 pr_err("smd_core_init: "
3138 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003139
Eric Holmberg98c6c642012-02-24 11:29:35 -07003140 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003141 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003142 if (r < 0) {
3143 free_irq(INT_A9_M2A_0, 0);
3144 return r;
3145 }
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303146 interrupt_stats[SMD_MODEM].smsm_interrupt_id = INT_A9_M2A_5;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003147 r = enable_irq_wake(INT_A9_M2A_5);
3148 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003149 pr_err("smd_core_init: "
3150 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003151
Brian Swetland37521a32009-07-01 18:30:47 -07003152#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003153#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3154 flags |= IRQF_SHARED;
3155#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003156 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003157 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003158 if (r < 0) {
3159 free_irq(INT_A9_M2A_0, 0);
3160 free_irq(INT_A9_M2A_5, 0);
3161 return r;
3162 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003163
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303164 interrupt_stats[SMD_Q6].smd_interrupt_id = INT_ADSP_A11;
Eric Holmberg98c6c642012-02-24 11:29:35 -07003165 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3166 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003167 if (r < 0) {
3168 free_irq(INT_A9_M2A_0, 0);
3169 free_irq(INT_A9_M2A_5, 0);
3170 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3171 return r;
3172 }
3173
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303174 interrupt_stats[SMD_Q6].smsm_interrupt_id = INT_ADSP_A11_SMSM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003175 r = enable_irq_wake(INT_ADSP_A11);
3176 if (r < 0)
3177 pr_err("smd_core_init: "
3178 "enable_irq_wake failed for INT_ADSP_A11\n");
3179
3180#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3181 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3182 if (r < 0)
3183 pr_err("smd_core_init: enable_irq_wake "
3184 "failed for INT_ADSP_A11_SMSM\n");
3185#endif
3186 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003187#endif
3188
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003189#if defined(CONFIG_DSPS)
3190 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3191 flags, "smd_dev", smd_dsps_irq_handler);
3192 if (r < 0) {
3193 free_irq(INT_A9_M2A_0, 0);
3194 free_irq(INT_A9_M2A_5, 0);
3195 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003196 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003197 return r;
3198 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003199
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303200 interrupt_stats[SMD_DSPS].smd_interrupt_id = INT_DSPS_A11;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003201 r = enable_irq_wake(INT_DSPS_A11);
3202 if (r < 0)
3203 pr_err("smd_core_init: "
3204 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003205#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003207#if defined(CONFIG_WCNSS)
3208 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3209 flags, "smd_dev", smd_wcnss_irq_handler);
3210 if (r < 0) {
3211 free_irq(INT_A9_M2A_0, 0);
3212 free_irq(INT_A9_M2A_5, 0);
3213 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003214 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003215 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3216 return r;
3217 }
3218
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303219 interrupt_stats[SMD_WCNSS].smd_interrupt_id = INT_WCNSS_A11;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003220 r = enable_irq_wake(INT_WCNSS_A11);
3221 if (r < 0)
3222 pr_err("smd_core_init: "
3223 "enable_irq_wake failed for INT_WCNSS_A11\n");
3224
Eric Holmberg98c6c642012-02-24 11:29:35 -07003225 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3226 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003227 if (r < 0) {
3228 free_irq(INT_A9_M2A_0, 0);
3229 free_irq(INT_A9_M2A_5, 0);
3230 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003231 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003232 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3233 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3234 return r;
3235 }
3236
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303237 interrupt_stats[SMD_WCNSS].smsm_interrupt_id = INT_WCNSS_A11_SMSM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003238 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3239 if (r < 0)
3240 pr_err("smd_core_init: "
3241 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3242#endif
3243
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003244#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003245 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3246 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003247 if (r < 0) {
3248 free_irq(INT_A9_M2A_0, 0);
3249 free_irq(INT_A9_M2A_5, 0);
3250 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003251 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003252 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3253 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003254 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003255 return r;
3256 }
3257
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303258 interrupt_stats[SMD_DSPS].smsm_interrupt_id = INT_DSPS_A11_SMSM;
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003259 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3260 if (r < 0)
3261 pr_err("smd_core_init: "
3262 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3263#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003264 SMD_INFO("smd_core_init() done\n");
3265
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003266 return 0;
3267}
3268
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303269static int intr_init(struct interrupt_config_item *private_irq,
3270 struct smd_irq_config *platform_irq,
3271 struct platform_device *pdev
3272 )
3273{
3274 int irq_id;
3275 int ret;
3276 int ret_wake;
3277
3278 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3279 private_irq->out_offset = platform_irq->out_offset;
3280 private_irq->out_base = platform_irq->out_base;
3281
3282 irq_id = platform_get_irq_byname(
3283 pdev,
3284 platform_irq->irq_name
3285 );
3286 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3287 platform_irq->irq_name, irq_id);
3288 ret = request_irq(irq_id,
3289 private_irq->irq_handler,
3290 platform_irq->flags,
3291 platform_irq->device_name,
3292 (void *)platform_irq->dev_id
3293 );
3294 if (ret < 0) {
3295 platform_irq->irq_id = ret;
Eric Holmbergdeace152012-07-25 12:17:11 -06003296 private_irq->irq_id = ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303297 } else {
3298 platform_irq->irq_id = irq_id;
Eric Holmbergdeace152012-07-25 12:17:11 -06003299 private_irq->irq_id = irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303300 ret_wake = enable_irq_wake(irq_id);
3301 if (ret_wake < 0) {
3302 pr_err("smd: enable_irq_wake failed on %s",
3303 platform_irq->irq_name);
3304 }
3305 }
3306
3307 return ret;
3308}
3309
3310int smd_core_platform_init(struct platform_device *pdev)
3311{
3312 int i;
3313 int ret;
3314 uint32_t num_ss;
3315 struct smd_platform *smd_platform_data;
3316 struct smd_subsystem_config *smd_ss_config_list;
3317 struct smd_subsystem_config *cfg;
3318 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003319 struct smd_smem_regions *smd_smem_areas;
Eric Holmberg51edef72013-04-11 14:28:33 -06003320 struct smem_area *smem_areas_tmp = NULL;
3321 int smem_idx;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303322
3323 smd_platform_data = pdev->dev.platform_data;
3324 num_ss = smd_platform_data->num_ss_configs;
3325 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3326
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003327 if (smd_platform_data->smd_ssr_config)
3328 disable_smsm_reset_handshake = smd_platform_data->
3329 smd_ssr_config->disable_smsm_reset_handshake;
3330
Jeff Hugobdc734d2012-03-26 16:05:39 -06003331 smd_smem_areas = smd_platform_data->smd_smem_areas;
Eric Holmberg51edef72013-04-11 14:28:33 -06003332 num_smem_areas = smd_platform_data->num_smem_areas + 1;
3333
3334 /* Initialize main SMEM region */
3335 smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
3336 GFP_KERNEL);
3337 if (!smem_areas_tmp) {
3338 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3339 err_ret = -ENOMEM;
3340 goto smem_areas_alloc_fail;
3341 }
3342
3343 smem_areas_tmp[0].phys_addr = msm_shared_ram_phys;
3344 smem_areas_tmp[0].size = MSM_SHARED_RAM_SIZE;
3345 smem_areas_tmp[0].virt_addr = MSM_SHARED_RAM_BASE;
3346
3347 /* Configure auxiliary SMEM regions */
3348 for (smem_idx = 1; smem_idx < num_smem_areas; ++smem_idx) {
3349 smem_areas_tmp[smem_idx].phys_addr =
3350 smd_smem_areas[smem_idx].phys_addr;
3351 smem_areas_tmp[smem_idx].size =
3352 smd_smem_areas[smem_idx].size;
3353 smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
3354 (unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
3355 smem_areas_tmp[smem_idx].size);
3356 if (!smem_areas_tmp[smem_idx].virt_addr) {
3357 pr_err("%s: ioremap_nocache() of addr: %pa size: %pa\n",
3358 __func__,
3359 &smem_areas_tmp[smem_idx].phys_addr,
3360 &smem_areas_tmp[smem_idx].size);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003361 err_ret = -ENOMEM;
Eric Holmberg51edef72013-04-11 14:28:33 -06003362 goto smem_failed;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003363 }
3364
Eric Holmberg51edef72013-04-11 14:28:33 -06003365 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
3366 (uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
3367 smem_areas_tmp[smem_idx].size)) {
3368 pr_err("%s: invalid virtual address block %i: %p:%pa\n",
3369 __func__, smem_idx,
3370 smem_areas_tmp[smem_idx].virt_addr,
3371 &smem_areas_tmp[smem_idx].size);
3372 ++smem_idx;
3373 err_ret = -EINVAL;
3374 goto smem_failed;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003375 }
Eric Holmberg51edef72013-04-11 14:28:33 -06003376
3377 SMD_DBG("%s: %d = %pa %pa", __func__, smem_idx,
3378 &smd_smem_areas[smem_idx].phys_addr,
3379 &smd_smem_areas[smem_idx].size);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003380 }
3381
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303382 for (i = 0; i < num_ss; i++) {
3383 cfg = &smd_ss_config_list[i];
3384
3385 ret = intr_init(
3386 &private_intr_config[cfg->irq_config_id].smd,
3387 &cfg->smd_int,
3388 pdev
3389 );
3390
3391 if (ret < 0) {
3392 err_ret = ret;
3393 pr_err("smd: register irq failed on %s\n",
3394 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003395 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303396 }
3397
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303398 interrupt_stats[cfg->irq_config_id].smd_interrupt_id
3399 = cfg->smd_int.irq_id;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003400 /* only init smsm structs if this edge supports smsm */
3401 if (cfg->smsm_int.irq_id)
3402 ret = intr_init(
3403 &private_intr_config[cfg->irq_config_id].smsm,
3404 &cfg->smsm_int,
3405 pdev
3406 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303407
3408 if (ret < 0) {
3409 err_ret = ret;
3410 pr_err("smd: register irq failed on %s\n",
3411 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003412 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303413 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003414
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303415 if (cfg->smsm_int.irq_id)
3416 interrupt_stats[cfg->irq_config_id].smsm_interrupt_id
3417 = cfg->smsm_int.irq_id;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003418 if (cfg->subsys_name)
3419 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003420 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303421 }
3422
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303423 SMD_INFO("smd_core_platform_init() done\n");
Eric Holmberg51edef72013-04-11 14:28:33 -06003424
3425 smem_areas = smem_areas_tmp;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303426 return 0;
3427
Jeff Hugobdc734d2012-03-26 16:05:39 -06003428intr_failed:
3429 pr_err("smd: deregistering IRQs\n");
3430 for (i = 0; i < num_ss; ++i) {
3431 cfg = &smd_ss_config_list[i];
3432
3433 if (cfg->smd_int.irq_id >= 0)
3434 free_irq(cfg->smd_int.irq_id,
3435 (void *)cfg->smd_int.dev_id
3436 );
3437 if (cfg->smsm_int.irq_id >= 0)
3438 free_irq(cfg->smsm_int.irq_id,
3439 (void *)cfg->smsm_int.dev_id
3440 );
3441 }
3442smem_failed:
Eric Holmberg51edef72013-04-11 14:28:33 -06003443 for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
3444 iounmap(smem_areas_tmp[smem_idx].virt_addr);
3445
3446 num_smem_areas = 0;
3447 kfree(smem_areas_tmp);
3448
Jeff Hugobdc734d2012-03-26 16:05:39 -06003449smem_areas_alloc_fail:
3450 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303451}
3452
Jeff Hugo412356e2012-09-27 17:14:23 -06003453static int __devinit parse_smd_devicetree(struct device_node *node,
3454 void *irq_out_base)
3455{
3456 uint32_t edge;
3457 char *key;
3458 int ret;
3459 uint32_t irq_offset;
3460 uint32_t irq_bitmask;
3461 uint32_t irq_line;
3462 unsigned long irq_flags = IRQF_TRIGGER_RISING;
3463 const char *pilstr;
3464 struct interrupt_config_item *private_irq;
3465
3466 key = "qcom,smd-edge";
3467 ret = of_property_read_u32(node, key, &edge);
3468 if (ret)
3469 goto missing_key;
3470 SMD_DBG("%s: %s = %d", __func__, key, edge);
3471
3472 key = "qcom,smd-irq-offset";
3473 ret = of_property_read_u32(node, key, &irq_offset);
3474 if (ret)
3475 goto missing_key;
3476 SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
3477
3478 key = "qcom,smd-irq-bitmask";
3479 ret = of_property_read_u32(node, key, &irq_bitmask);
3480 if (ret)
3481 goto missing_key;
3482 SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
3483
3484 key = "interrupts";
3485 irq_line = irq_of_parse_and_map(node, 0);
3486 if (!irq_line)
3487 goto missing_key;
3488 SMD_DBG("%s: %s = %d", __func__, key, irq_line);
3489
3490 key = "qcom,pil-string";
3491 pilstr = of_get_property(node, key, NULL);
3492 if (pilstr)
3493 SMD_DBG("%s: %s = %s", __func__, key, pilstr);
3494
3495 key = "qcom,irq-no-suspend";
3496 ret = of_property_read_bool(node, key);
3497 if (ret)
3498 irq_flags |= IRQF_NO_SUSPEND;
3499
3500 private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smd;
3501 private_irq->out_bit_pos = irq_bitmask;
3502 private_irq->out_offset = irq_offset;
3503 private_irq->out_base = irq_out_base;
3504 private_irq->irq_id = irq_line;
3505
3506 ret = request_irq(irq_line,
3507 private_irq->irq_handler,
3508 irq_flags,
3509 "smd_dev",
3510 NULL);
3511 if (ret < 0) {
3512 pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
3513 return ret;
3514 } else {
3515 ret = enable_irq_wake(irq_line);
3516 if (ret < 0)
3517 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
3518 irq_line);
3519 }
3520
3521 if (pilstr)
3522 strlcpy(edge_to_pids[edge].subsys_name, pilstr,
3523 SMD_MAX_CH_NAME_LEN);
3524
3525 return 0;
3526
3527missing_key:
3528 pr_err("%s: missing key: %s", __func__, key);
3529 return -ENODEV;
3530}
3531
3532static int __devinit parse_smsm_devicetree(struct device_node *node,
3533 void *irq_out_base)
3534{
3535 uint32_t edge;
3536 char *key;
3537 int ret;
3538 uint32_t irq_offset;
3539 uint32_t irq_bitmask;
3540 uint32_t irq_line;
3541 struct interrupt_config_item *private_irq;
3542
3543 key = "qcom,smsm-edge";
3544 ret = of_property_read_u32(node, key, &edge);
3545 if (ret)
3546 goto missing_key;
3547 SMD_DBG("%s: %s = %d", __func__, key, edge);
3548
3549 key = "qcom,smsm-irq-offset";
3550 ret = of_property_read_u32(node, key, &irq_offset);
3551 if (ret)
3552 goto missing_key;
3553 SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
3554
3555 key = "qcom,smsm-irq-bitmask";
3556 ret = of_property_read_u32(node, key, &irq_bitmask);
3557 if (ret)
3558 goto missing_key;
3559 SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
3560
3561 key = "interrupts";
3562 irq_line = irq_of_parse_and_map(node, 0);
3563 if (!irq_line)
3564 goto missing_key;
3565 SMD_DBG("%s: %s = %d", __func__, key, irq_line);
3566
3567 private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smsm;
3568 private_irq->out_bit_pos = irq_bitmask;
3569 private_irq->out_offset = irq_offset;
3570 private_irq->out_base = irq_out_base;
3571 private_irq->irq_id = irq_line;
3572
3573 ret = request_irq(irq_line,
3574 private_irq->irq_handler,
3575 IRQF_TRIGGER_RISING,
3576 "smsm_dev",
3577 NULL);
3578 if (ret < 0) {
3579 pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
3580 return ret;
3581 } else {
3582 ret = enable_irq_wake(irq_line);
3583 if (ret < 0)
3584 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
3585 irq_line);
3586 }
3587
3588 return 0;
3589
3590missing_key:
3591 pr_err("%s: missing key: %s", __func__, key);
3592 return -ENODEV;
3593}
3594
3595static void __devinit unparse_smd_devicetree(struct device_node *node)
3596{
3597 uint32_t irq_line;
3598
3599 irq_line = irq_of_parse_and_map(node, 0);
3600
3601 free_irq(irq_line, NULL);
3602}
3603
3604static void __devinit unparse_smsm_devicetree(struct device_node *node)
3605{
3606 uint32_t irq_line;
3607
3608 irq_line = irq_of_parse_and_map(node, 0);
3609
3610 free_irq(irq_line, NULL);
3611}
3612
3613static int __devinit smd_core_devicetree_init(struct platform_device *pdev)
3614{
3615 char *key;
3616 struct resource *r;
3617 void *irq_out_base;
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003618 phys_addr_t aux_mem_base;
3619 resource_size_t aux_mem_size;
Jeff Hugo412356e2012-09-27 17:14:23 -06003620 int temp_string_size = 11; /* max 3 digit count */
3621 char temp_string[temp_string_size];
Jeff Hugo412356e2012-09-27 17:14:23 -06003622 struct device_node *node;
3623 int ret;
3624 const char *compatible;
Eric Holmberg51edef72013-04-11 14:28:33 -06003625 struct ramdump_segment *ramdump_segments_tmp = NULL;
3626 struct smem_area *smem_areas_tmp = NULL;
3627 int smem_idx = 0;
Jeff Hugo412356e2012-09-27 17:14:23 -06003628 int subnode_num = 0;
Eric Holmberg51edef72013-04-11 14:28:33 -06003629 int i;
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003630 resource_size_t irq_out_size;
Jeff Hugo412356e2012-09-27 17:14:23 -06003631
3632 disable_smsm_reset_handshake = 1;
3633
3634 key = "irq-reg-base";
3635 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
3636 if (!r) {
3637 pr_err("%s: missing '%s'\n", __func__, key);
3638 return -ENODEV;
3639 }
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003640 irq_out_size = resource_size(r);
3641 irq_out_base = ioremap_nocache(r->start, irq_out_size);
3642 if (!irq_out_base) {
3643 pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
3644 __func__, &r->start, &irq_out_size);
3645 return -ENOMEM;
3646 }
Jeff Hugo412356e2012-09-27 17:14:23 -06003647 SMD_DBG("%s: %s = %p", __func__, key, irq_out_base);
3648
Eric Holmberg51edef72013-04-11 14:28:33 -06003649 num_smem_areas = 1;
Jeff Hugo412356e2012-09-27 17:14:23 -06003650 while (1) {
Eric Holmberg51edef72013-04-11 14:28:33 -06003651 scnprintf(temp_string, temp_string_size, "aux-mem%d",
3652 num_smem_areas);
Jeff Hugo412356e2012-09-27 17:14:23 -06003653 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3654 temp_string);
3655 if (!r)
3656 break;
3657
3658 ++num_smem_areas;
Eric Holmberg51edef72013-04-11 14:28:33 -06003659 if (num_smem_areas > 999) {
Jeff Hugo412356e2012-09-27 17:14:23 -06003660 pr_err("%s: max num aux mem regions reached\n",
3661 __func__);
3662 break;
3663 }
3664 }
3665
Eric Holmberg51edef72013-04-11 14:28:33 -06003666 /* Initialize main SMEM region and SSR ramdump region */
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003667 key = "smem";
3668 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
3669 if (!r) {
3670 pr_err("%s: missing '%s'\n", __func__, key);
3671 return -ENODEV;
3672 }
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003673
Eric Holmberg51edef72013-04-11 14:28:33 -06003674 smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
3675 GFP_KERNEL);
3676 if (!smem_areas_tmp) {
3677 pr_err("%s: smem areas kmalloc failed\n", __func__);
3678 ret = -ENOMEM;
3679 goto free_smem_areas;
3680 }
3681
3682 ramdump_segments_tmp = kmalloc_array(num_smem_areas,
3683 sizeof(struct ramdump_segment), GFP_KERNEL);
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003684 if (!ramdump_segments_tmp) {
3685 pr_err("%s: ramdump segment kmalloc failed\n", __func__);
3686 ret = -ENOMEM;
3687 goto free_smem_areas;
3688 }
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003689
Eric Holmberg51edef72013-04-11 14:28:33 -06003690 smem_areas_tmp[smem_idx].phys_addr = r->start;
3691 smem_areas_tmp[smem_idx].size = resource_size(r);
3692 smem_areas_tmp[smem_idx].virt_addr = MSM_SHARED_RAM_BASE;
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003693
Eric Holmberg51edef72013-04-11 14:28:33 -06003694 ramdump_segments_tmp[smem_idx].address = r->start;
3695 ramdump_segments_tmp[smem_idx].size = resource_size(r);
3696 ++smem_idx;
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003697
Eric Holmberg51edef72013-04-11 14:28:33 -06003698 /* Configure auxiliary SMEM regions */
3699 while (1) {
3700 scnprintf(temp_string, temp_string_size, "aux-mem%d",
3701 smem_idx);
3702 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3703 temp_string);
3704 if (!r)
3705 break;
3706 aux_mem_base = r->start;
3707 aux_mem_size = resource_size(r);
3708
3709 ramdump_segments_tmp[smem_idx].address = aux_mem_base;
3710 ramdump_segments_tmp[smem_idx].size = aux_mem_size;
3711
3712 smem_areas_tmp[smem_idx].phys_addr = aux_mem_base;
3713 smem_areas_tmp[smem_idx].size = aux_mem_size;
3714 smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
3715 (unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
3716 smem_areas_tmp[smem_idx].size);
3717 SMD_DBG("%s: %s = %pa %pa -> %p", __func__, temp_string,
3718 &aux_mem_base, &aux_mem_size,
3719 smem_areas_tmp[smem_idx].virt_addr);
3720
3721 if (!smem_areas_tmp[smem_idx].virt_addr) {
3722 pr_err("%s: ioremap_nocache() of addr:%pa size: %pa\n",
3723 __func__,
3724 &smem_areas_tmp[smem_idx].phys_addr,
3725 &smem_areas_tmp[smem_idx].size);
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003726 ret = -ENOMEM;
3727 goto free_smem_areas;
Jeff Hugo412356e2012-09-27 17:14:23 -06003728 }
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003729
Eric Holmberg51edef72013-04-11 14:28:33 -06003730 if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
3731 (uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
3732 smem_areas_tmp[smem_idx].size)) {
3733 pr_err("%s: invalid virtual address block %i: %p:%pa\n",
3734 __func__, smem_idx,
3735 smem_areas_tmp[smem_idx].virt_addr,
3736 &smem_areas_tmp[smem_idx].size);
3737 ++smem_idx;
3738 ret = -EINVAL;
3739 goto free_smem_areas;
Jeff Hugo412356e2012-09-27 17:14:23 -06003740 }
Eric Holmberg51edef72013-04-11 14:28:33 -06003741
3742 ++smem_idx;
3743 if (smem_idx > 999) {
3744 pr_err("%s: max num aux mem regions reached\n",
3745 __func__);
3746 break;
3747 }
Jeff Hugo412356e2012-09-27 17:14:23 -06003748 }
3749
3750 for_each_child_of_node(pdev->dev.of_node, node) {
3751 compatible = of_get_property(node, "compatible", NULL);
Brent Hronikf4442e12013-04-17 15:13:11 -06003752 if (!compatible) {
3753 pr_err("%s: invalid child node: compatible null\n",
3754 __func__);
3755 ret = -ENODEV;
3756 goto rollback_subnodes;
3757 }
Jeff Hugo412356e2012-09-27 17:14:23 -06003758 if (!strcmp(compatible, "qcom,smd")) {
3759 ret = parse_smd_devicetree(node, irq_out_base);
3760 if (ret)
3761 goto rollback_subnodes;
3762 } else if (!strcmp(compatible, "qcom,smsm")) {
3763 ret = parse_smsm_devicetree(node, irq_out_base);
3764 if (ret)
3765 goto rollback_subnodes;
3766 } else {
3767 pr_err("%s: invalid child node named: %s\n", __func__,
3768 compatible);
3769 ret = -ENODEV;
3770 goto rollback_subnodes;
3771 }
3772 ++subnode_num;
3773 }
3774
Eric Holmberg51edef72013-04-11 14:28:33 -06003775 smem_areas = smem_areas_tmp;
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003776 smem_ramdump_segments = ramdump_segments_tmp;
Jeff Hugo412356e2012-09-27 17:14:23 -06003777 return 0;
3778
3779rollback_subnodes:
Eric Holmberg51edef72013-04-11 14:28:33 -06003780 i = 0;
Jeff Hugo412356e2012-09-27 17:14:23 -06003781 for_each_child_of_node(pdev->dev.of_node, node) {
Eric Holmberg51edef72013-04-11 14:28:33 -06003782 if (i >= subnode_num)
Jeff Hugo412356e2012-09-27 17:14:23 -06003783 break;
Eric Holmberg51edef72013-04-11 14:28:33 -06003784 ++i;
Jeff Hugo412356e2012-09-27 17:14:23 -06003785 compatible = of_get_property(node, "compatible", NULL);
3786 if (!strcmp(compatible, "qcom,smd"))
3787 unparse_smd_devicetree(node);
3788 else
3789 unparse_smsm_devicetree(node);
3790 }
3791free_smem_areas:
Eric Holmberg51edef72013-04-11 14:28:33 -06003792 for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
3793 iounmap(smem_areas_tmp[smem_idx].virt_addr);
3794
Jeff Hugo412356e2012-09-27 17:14:23 -06003795 num_smem_areas = 0;
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003796 kfree(ramdump_segments_tmp);
Eric Holmberg51edef72013-04-11 14:28:33 -06003797 kfree(smem_areas_tmp);
Jeff Hugo412356e2012-09-27 17:14:23 -06003798 return ret;
3799}
3800
Gregory Bean4416e9e2010-07-28 10:22:12 -07003801static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003802{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303803 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003804
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303805 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003806 INIT_WORK(&probe_work, smd_channel_probe_worker);
3807
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003808 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3809 if (IS_ERR(channel_close_wq)) {
3810 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3811 return -ENOMEM;
3812 }
3813
3814 if (smsm_init()) {
3815 pr_err("smsm_init() failed\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003816 return -1;
3817 }
3818
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303819 if (pdev) {
3820 if (pdev->dev.of_node) {
Jeff Hugo412356e2012-09-27 17:14:23 -06003821 ret = smd_core_devicetree_init(pdev);
3822 if (ret) {
3823 pr_err("%s: device tree init failed\n",
3824 __func__);
3825 return ret;
3826 }
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003827 smd_dev = &pdev->dev;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303828 } else if (pdev->dev.platform_data) {
3829 ret = smd_core_platform_init(pdev);
3830 if (ret) {
3831 pr_err(
3832 "SMD: smd_core_platform_init() failed\n");
3833 return -ENODEV;
3834 }
3835 } else {
3836 ret = smd_core_init();
3837 if (ret) {
3838 pr_err("smd_core_init() failed\n");
3839 return -ENODEV;
3840 }
3841 }
3842 } else {
3843 pr_err("SMD: PDEV not found\n");
3844 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003845 }
Iliyan Malchev1207bab2009-11-15 18:16:43 -08003846
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003847 smd_initialized = 1;
3848
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003849 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003850 smsm_irq_handler(0, 0);
3851 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003852
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003853 return 0;
3854}
3855
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003856static int restart_notifier_cb(struct notifier_block *this,
3857 unsigned long code,
3858 void *data);
3859
3860static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003861 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3862 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
Sameer Thalappil52381142012-10-04 17:22:24 -07003863 {SMD_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
Eric Holmbergca7ead22011-12-01 17:21:15 -07003864 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003865 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Jeff Hugod3cf6ec2012-09-26 15:30:10 -06003866 {SMD_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003867};
3868
3869static int restart_notifier_cb(struct notifier_block *this,
3870 unsigned long code,
3871 void *data)
3872{
Jeff Hugo73f356f2012-12-14 17:56:19 -07003873 /*
3874 * Some SMD or SMSM clients assume SMD/SMSM SSR handling will be
3875 * done in the AFTER_SHUTDOWN level. If this ever changes, extra
3876 * care should be taken to verify no clients are broken.
3877 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003878 if (code == SUBSYS_AFTER_SHUTDOWN) {
3879 struct restart_notifier_block *notifier;
3880
3881 notifier = container_of(this,
3882 struct restart_notifier_block, nb);
3883 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3884 __func__, notifier->processor,
3885 notifier->name);
3886
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003887 remote_spin_release(&remote_spinlock, notifier->processor);
3888 remote_spin_release_all(notifier->processor);
3889
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003890 smd_channel_reset(notifier->processor);
3891 }
3892
3893 return NOTIFY_DONE;
3894}
3895
3896static __init int modem_restart_late_init(void)
3897{
3898 int i;
3899 void *handle;
3900 struct restart_notifier_block *nb;
3901
3902 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3903 nb = &restart_notifiers[i];
3904 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3905 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3906 __func__, nb->name, handle);
3907 }
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003908
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003909 return 0;
3910}
3911late_initcall(modem_restart_late_init);
3912
Jeff Hugo412356e2012-09-27 17:14:23 -06003913static struct of_device_id msm_smem_match_table[] = {
3914 { .compatible = "qcom,smem" },
3915 {},
3916};
3917
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003918static struct platform_driver msm_smd_driver = {
3919 .probe = msm_smd_probe,
3920 .driver = {
3921 .name = MODULE_NAME,
3922 .owner = THIS_MODULE,
Jeff Hugo412356e2012-09-27 17:14:23 -06003923 .of_match_table = msm_smem_match_table,
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003924 },
3925};
3926
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003927int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003928{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003929 static bool registered;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003930 int rc;
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003931
3932 if (registered)
3933 return 0;
3934
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05303935 smd_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smd");
3936 if (!smd_log_ctx) {
3937 pr_err("%s: unable to create logging context\n", __func__);
3938 msm_smd_debug_mask = 0;
3939 }
3940
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003941 registered = true;
Jeff Hugob9fb9402013-05-15 09:58:54 -06003942 rc = init_smem_remote_spinlock();
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003943 if (rc) {
3944 pr_err("%s: remote spinlock init failed %d\n", __func__, rc);
3945 return rc;
3946 }
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003947
3948 rc = platform_driver_register(&msm_smd_driver);
3949 if (rc) {
3950 pr_err("%s: msm_smd_driver register failed %d\n",
3951 __func__, rc);
3952 return rc;
3953 }
3954
3955 smd_module_init_notify(0, NULL);
3956
3957 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003958}
3959
3960module_init(msm_smd_init);
3961
3962MODULE_DESCRIPTION("MSM Shared Memory Core");
3963MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3964MODULE_LICENSE("GPL");