blob: 10e40b4b01db8e31fbcff77fc2a12c1b6cb222e9 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmberg6275b602012-11-19 13:05:04 -07004 * Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f9412012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Eric Holmberg144c2de2012-10-04 13:37:28 -060038#include <linux/suspend.h>
Jeff Hugo412356e2012-09-27 17:14:23 -060039#include <linux/of.h>
40#include <linux/of_irq.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070041#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070043#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053045#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070046#include <mach/proc_comm.h>
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +053047#include <mach/msm_ipc_logging.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053048#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070049
50#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070052
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060054 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060055 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070056#define CONFIG_QDSP6 1
57#endif
58
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060059#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
60 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061#define CONFIG_DSPS 1
62#endif
63
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060064#if defined(CONFIG_ARCH_MSM8960) \
65 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060067#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070069
70#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071#define SMEM_VERSION 0x000B
72#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070073#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060074#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Eric Holmberge5266d32013-02-25 18:29:27 -070075#define RSPIN_INIT_WAIT_MS 1000
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076
77uint32_t SMSM_NUM_ENTRIES = 8;
78uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070079
Eric Holmberge8a39322012-04-03 15:14:02 -060080/* Legacy SMSM interrupt notifications */
81#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
82 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070083
84enum {
85 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086 MSM_SMSM_DEBUG = 1U << 1,
87 MSM_SMD_INFO = 1U << 2,
88 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070089 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070090};
91
92struct smsm_shared_info {
93 uint32_t *state;
94 uint32_t *intr_mask;
95 uint32_t *intr_mux;
96};
97
98static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f9412012-03-19 10:04:22 -060099static struct kfifo smsm_snapshot_fifo;
100static struct wake_lock smsm_snapshot_wakelock;
101static int smsm_snapshot_count;
102static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700103
104struct smsm_size_info_type {
105 uint32_t num_hosts;
106 uint32_t num_entries;
107 uint32_t reserved0;
108 uint32_t reserved1;
109};
110
111struct smsm_state_cb_info {
112 struct list_head cb_list;
113 uint32_t mask;
114 void *data;
115 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
116};
117
118struct smsm_state_info {
119 struct list_head callbacks;
120 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600121 uint32_t intr_mask_set;
122 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123};
124
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530125struct interrupt_config_item {
126 /* must be initialized */
127 irqreturn_t (*irq_handler)(int req, void *data);
128 /* outgoing interrupt config (set from platform data) */
129 uint32_t out_bit_pos;
130 void __iomem *out_base;
131 uint32_t out_offset;
Eric Holmbergdeace152012-07-25 12:17:11 -0600132 int irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530133};
134
135struct interrupt_config {
136 struct interrupt_config_item smd;
137 struct interrupt_config_item smsm;
138};
139
140static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700141static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530142static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700143static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530144static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700145static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530146static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700147static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600148static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530149static irqreturn_t smsm_irq_handler(int irq, void *data);
150
151static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
152 [SMD_MODEM] = {
153 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700154 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530155 },
156 [SMD_Q6] = {
157 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700158 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530159 },
160 [SMD_DSPS] = {
161 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700162 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530163 },
164 [SMD_WCNSS] = {
165 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700166 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530167 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600168 [SMD_RPM] = {
169 .smd.irq_handler = smd_rpm_irq_handler,
170 .smsm.irq_handler = NULL, /* does not support smsm */
171 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530172};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600173
174struct smem_area {
175 void *phys_addr;
176 unsigned size;
177 void __iomem *virt_addr;
178};
179static uint32_t num_smem_areas;
180static struct smem_area *smem_areas;
181static void *smem_range_check(void *base, unsigned offset);
182
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700183struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530184
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
186#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
187 entry * SMSM_NUM_HOSTS + host)
188#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
189
190/* Internal definitions which are not exported in some targets */
191enum {
192 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700193};
194
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530195static int msm_smd_debug_mask = MSM_SMx_POWER_INFO;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700196module_param_named(debug_mask, msm_smd_debug_mask,
197 int, S_IRUGO | S_IWUSR | S_IWGRP);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530198static void *smd_log_ctx;
199#define NUM_LOG_PAGES 4
200
201#define IPC_LOG(level, x...) do { \
202 if (smd_log_ctx) \
203 ipc_log_string(smd_log_ctx, x); \
204 else \
205 printk(level x); \
206 } while (0)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700207
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208#if defined(CONFIG_MSM_SMD_DEBUG)
209#define SMD_DBG(x...) do { \
210 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530211 IPC_LOG(KERN_DEBUG, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700212 } while (0)
213
214#define SMSM_DBG(x...) do { \
215 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530216 IPC_LOG(KERN_DEBUG, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217 } while (0)
218
219#define SMD_INFO(x...) do { \
220 if (msm_smd_debug_mask & MSM_SMD_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530221 IPC_LOG(KERN_INFO, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 } while (0)
223
224#define SMSM_INFO(x...) do { \
225 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530226 IPC_LOG(KERN_INFO, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700228#define SMx_POWER_INFO(x...) do { \
229 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530230 IPC_LOG(KERN_INFO, x); \
Eric Holmberg98c6c642012-02-24 11:29:35 -0700231 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232#else
233#define SMD_DBG(x...) do { } while (0)
234#define SMSM_DBG(x...) do { } while (0)
235#define SMD_INFO(x...) do { } while (0)
236#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700237#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238#endif
239
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700240static unsigned last_heap_free = 0xffffffff;
241
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242static inline void smd_write_intr(unsigned int val,
243 const void __iomem *addr);
244
245#if defined(CONFIG_ARCH_MSM7X30)
246#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530247 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530249 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530251 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530253 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600255#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256#define MSM_TRIG_A2WCNSS_SMD_INT
257#define MSM_TRIG_A2WCNSS_SMSM_INT
258#elif defined(CONFIG_ARCH_MSM8X60)
259#define MSM_TRIG_A2M_SMD_INT \
260 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
261#define MSM_TRIG_A2Q6_SMD_INT \
262 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
263#define MSM_TRIG_A2M_SMSM_INT \
264 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
265#define MSM_TRIG_A2Q6_SMSM_INT \
266 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
267#define MSM_TRIG_A2DSPS_SMD_INT \
268 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600269#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270#define MSM_TRIG_A2WCNSS_SMD_INT
271#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600272#elif defined(CONFIG_ARCH_MSM9615)
273#define MSM_TRIG_A2M_SMD_INT \
274 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
275#define MSM_TRIG_A2Q6_SMD_INT \
276 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
277#define MSM_TRIG_A2M_SMSM_INT \
278 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
279#define MSM_TRIG_A2Q6_SMSM_INT \
280 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
281#define MSM_TRIG_A2DSPS_SMD_INT
282#define MSM_TRIG_A2DSPS_SMSM_INT
283#define MSM_TRIG_A2WCNSS_SMD_INT
284#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285#elif defined(CONFIG_ARCH_FSM9XXX)
286#define MSM_TRIG_A2Q6_SMD_INT \
287 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
288#define MSM_TRIG_A2Q6_SMSM_INT \
289 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
290#define MSM_TRIG_A2M_SMD_INT \
291 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
292#define MSM_TRIG_A2M_SMSM_INT \
293 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
294#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600295#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296#define MSM_TRIG_A2WCNSS_SMD_INT
297#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700298#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299#define MSM_TRIG_A2M_SMD_INT \
300 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700301#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302#define MSM_TRIG_A2M_SMSM_INT \
303 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700304#define MSM_TRIG_A2Q6_SMSM_INT
305#define MSM_TRIG_A2DSPS_SMD_INT
306#define MSM_TRIG_A2DSPS_SMSM_INT
307#define MSM_TRIG_A2WCNSS_SMD_INT
308#define MSM_TRIG_A2WCNSS_SMSM_INT
309#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
310#define MSM_TRIG_A2M_SMD_INT \
311 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
312#define MSM_TRIG_A2Q6_SMD_INT
313#define MSM_TRIG_A2M_SMSM_INT \
314 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
315#define MSM_TRIG_A2Q6_SMSM_INT
316#define MSM_TRIG_A2DSPS_SMD_INT
317#define MSM_TRIG_A2DSPS_SMSM_INT
318#define MSM_TRIG_A2WCNSS_SMD_INT
319#define MSM_TRIG_A2WCNSS_SMSM_INT
320#else /* use platform device / device tree configuration */
321#define MSM_TRIG_A2M_SMD_INT
322#define MSM_TRIG_A2Q6_SMD_INT
323#define MSM_TRIG_A2M_SMSM_INT
324#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600326#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327#define MSM_TRIG_A2WCNSS_SMD_INT
328#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700329#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330
Jeff Hugoee40b152012-02-09 17:39:47 -0700331/*
332 * stub out legacy macros if they are not being used so that the legacy
333 * code compiles even though it is not used
334 *
335 * these definitions should not be used in active code and will cause
336 * an early failure
337 */
338#ifndef INT_A9_M2A_0
339#define INT_A9_M2A_0 -1
340#endif
341#ifndef INT_A9_M2A_5
342#define INT_A9_M2A_5 -1
343#endif
344#ifndef INT_ADSP_A11
345#define INT_ADSP_A11 -1
346#endif
347#ifndef INT_ADSP_A11_SMSM
348#define INT_ADSP_A11_SMSM -1
349#endif
350#ifndef INT_DSPS_A11
351#define INT_DSPS_A11 -1
352#endif
353#ifndef INT_DSPS_A11_SMSM
354#define INT_DSPS_A11_SMSM -1
355#endif
356#ifndef INT_WCNSS_A11
357#define INT_WCNSS_A11 -1
358#endif
359#ifndef INT_WCNSS_A11_SMSM
360#define INT_WCNSS_A11_SMSM -1
361#endif
362
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363#define SMD_LOOPBACK_CID 100
364
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600365#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
366static remote_spinlock_t remote_spinlock;
367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700368static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600370static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600372static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373static void notify_smsm_cb_clients_worker(struct work_struct *work);
374static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600375static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530377static int spinlocks_initialized;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -0600378
379/**
380 * Variables to indicate smd module initialization.
381 * Dependents to register for smd module init notifier.
382 */
383static int smd_module_inited;
384static RAW_NOTIFIER_HEAD(smd_module_init_notifier_list);
385static DEFINE_MUTEX(smd_module_init_notifier_lock);
386static void smd_module_init_notify(uint32_t state, void *data);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530387static int smd_stream_write_avail(struct smd_channel *ch);
388static int smd_stream_read_avail(struct smd_channel *ch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700389
390static inline void smd_write_intr(unsigned int val,
391 const void __iomem *addr)
392{
393 wmb();
394 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700395}
396
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530397static inline void log_notify(uint32_t subsystem, smd_channel_t *ch)
398{
399 const char *subsys = smd_edge_to_subsystem(subsystem);
400
Jay Chokshi83b4f6132013-02-14 16:20:56 -0800401 (void) subsys;
402
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530403 if (!ch)
404 SMx_POWER_INFO("Apps->%s\n", subsys);
405 else
406 SMx_POWER_INFO(
407 "Apps->%s ch%d '%s': tx%d/rx%d %dr/%dw : %dr/%dw\n",
408 subsys, ch->n, ch->name,
409 ch->fifo_size -
410 (smd_stream_write_avail(ch) + 1),
411 smd_stream_read_avail(ch),
412 ch->half_ch->get_tail(ch->send),
413 ch->half_ch->get_head(ch->send),
414 ch->half_ch->get_tail(ch->recv),
415 ch->half_ch->get_head(ch->recv)
416 );
417}
418
419static inline void notify_modem_smd(smd_channel_t *ch)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700420{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530421 static const struct interrupt_config_item *intr
422 = &private_intr_config[SMD_MODEM].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530423
424 log_notify(SMD_APPS_MODEM, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700425 if (intr->out_base) {
426 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530427 smd_write_intr(intr->out_bit_pos,
428 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700429 } else {
430 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530431 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700432 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700433}
434
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530435static inline void notify_dsp_smd(smd_channel_t *ch)
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700436{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530437 static const struct interrupt_config_item *intr
438 = &private_intr_config[SMD_Q6].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530439
440 log_notify(SMD_APPS_QDSP, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700441 if (intr->out_base) {
442 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530443 smd_write_intr(intr->out_bit_pos,
444 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700445 } else {
446 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530447 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700448 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700449}
450
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530451static inline void notify_dsps_smd(smd_channel_t *ch)
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530452{
453 static const struct interrupt_config_item *intr
454 = &private_intr_config[SMD_DSPS].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530455
456 log_notify(SMD_APPS_DSPS, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700457 if (intr->out_base) {
458 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530459 smd_write_intr(intr->out_bit_pos,
460 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700461 } else {
462 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530463 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700464 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530465}
466
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530467static inline void notify_wcnss_smd(struct smd_channel *ch)
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530468{
469 static const struct interrupt_config_item *intr
470 = &private_intr_config[SMD_WCNSS].smd;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530471
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530472 log_notify(SMD_APPS_WCNSS, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700473 if (intr->out_base) {
474 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530475 smd_write_intr(intr->out_bit_pos,
476 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700477 } else {
478 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530479 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700480 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530481}
482
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530483static inline void notify_rpm_smd(smd_channel_t *ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600484{
485 static const struct interrupt_config_item *intr
486 = &private_intr_config[SMD_RPM].smd;
487
488 if (intr->out_base) {
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530489 log_notify(SMD_APPS_RPM, ch);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600490 ++interrupt_stats[SMD_RPM].smd_out_config_count;
491 smd_write_intr(intr->out_bit_pos,
492 intr->out_base + intr->out_offset);
493 }
494}
495
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530496static inline void notify_modem_smsm(void)
497{
498 static const struct interrupt_config_item *intr
499 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700500 if (intr->out_base) {
501 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530502 smd_write_intr(intr->out_bit_pos,
503 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700504 } else {
505 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530506 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700507 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530508}
509
510static inline void notify_dsp_smsm(void)
511{
512 static const struct interrupt_config_item *intr
513 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700514 if (intr->out_base) {
515 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530516 smd_write_intr(intr->out_bit_pos,
517 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700518 } else {
519 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530520 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700521 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530522}
523
524static inline void notify_dsps_smsm(void)
525{
526 static const struct interrupt_config_item *intr
527 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700528 if (intr->out_base) {
529 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530530 smd_write_intr(intr->out_bit_pos,
531 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700532 } else {
533 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530534 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700535 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530536}
537
538static inline void notify_wcnss_smsm(void)
539{
540 static const struct interrupt_config_item *intr
541 = &private_intr_config[SMD_WCNSS].smsm;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530542
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700543 if (intr->out_base) {
544 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530545 smd_write_intr(intr->out_bit_pos,
546 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700547 } else {
548 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530549 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700550 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530551}
552
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
554{
555 /* older protocol don't use smsm_intr_mask,
556 but still communicates with modem */
557 if (!smsm_info.intr_mask ||
558 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
559 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530560 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700561
562 if (smsm_info.intr_mask &&
563 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
564 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 uint32_t mux_val;
566
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600567 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568 mux_val = __raw_readl(
569 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
570 mux_val++;
571 __raw_writel(mux_val,
572 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
573 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530574 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575 }
576
577 if (smsm_info.intr_mask &&
578 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
579 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530580 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 }
582
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600583 if (smsm_info.intr_mask &&
584 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
585 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530586 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600587 }
588
Eric Holmbergda31d042012-03-28 14:01:02 -0600589 /*
590 * Notify local SMSM callback clients without wakelock since this
591 * code is used by power management during power-down/-up sequencing
592 * on DEM-based targets. Grabbing a wakelock in this case will
593 * abort the power-down sequencing.
594 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600595 if (smsm_info.intr_mask &&
596 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
597 & notify_mask)) {
598 smsm_cb_snapshot(0);
599 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700600}
601
Eric Holmberg144c2de2012-10-04 13:37:28 -0600602static int smsm_pm_notifier(struct notifier_block *nb,
603 unsigned long event, void *unused)
604{
605 switch (event) {
606 case PM_SUSPEND_PREPARE:
607 smsm_change_state(SMSM_APPS_STATE, SMSM_PROC_AWAKE, 0);
608 break;
609
610 case PM_POST_SUSPEND:
611 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_PROC_AWAKE);
612 break;
613 }
614 return NOTIFY_DONE;
615}
616
617static struct notifier_block smsm_pm_nb = {
618 .notifier_call = smsm_pm_notifier,
619 .priority = 0,
620};
621
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700623{
624 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700626
627 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
628 if (x != 0) {
629 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 SMD_INFO("smem: DIAG '%s'\n", x);
631 }
632
633 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
634 if (x != 0) {
635 x[size - 1] = 0;
636 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700637 }
638}
639
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700641static void handle_modem_crash(void)
642{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700644 smd_diag();
645
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 /* hard reboot if possible FIXME
647 if (msm_reset_hook)
648 msm_reset_hook();
649 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700650
651 /* in this case the modem or watchdog should reboot us */
652 for (;;)
653 ;
654}
655
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700656int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700657{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700658 /* if the modem's not ready yet, we have to hope for the best */
659 if (!smsm_info.state)
660 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700661
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700662 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700663 handle_modem_crash();
664 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700665 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700666 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700667}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700669
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700670/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700671 * irq handler and code that mutates the channel
672 * list or fiddles with channel state
673 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700675DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700676
677/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700678 * operations to avoid races while creating or
679 * destroying smd_channel structures
680 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700681static DEFINE_MUTEX(smd_creation_mutex);
682
683static int smd_initialized;
684
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700685struct smd_shared_v1 {
686 struct smd_half_channel ch0;
687 unsigned char data0[SMD_BUF_SIZE];
688 struct smd_half_channel ch1;
689 unsigned char data1[SMD_BUF_SIZE];
690};
691
692struct smd_shared_v2 {
693 struct smd_half_channel ch0;
694 struct smd_half_channel ch1;
695};
696
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600697struct smd_shared_v2_word_access {
698 struct smd_half_channel_word_access ch0;
699 struct smd_half_channel_word_access ch1;
700};
701
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700702struct edge_to_pid {
703 uint32_t local_pid;
704 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700705 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706};
707
708/**
709 * Maps edge type to local and remote processor ID's.
710 */
711static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700712 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
Stephen Boyd77db8bb2012-06-27 15:15:16 -0700713 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "adsp"},
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700714 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
715 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
716 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
717 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
718 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
719 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
720 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
721 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
722 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
723 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
724 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
725 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
726 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600727 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
728 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
729 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
730 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731};
732
733struct restart_notifier_block {
734 unsigned processor;
735 char *name;
736 struct notifier_block nb;
737};
738
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600739static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700740static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
741
742static LIST_HEAD(smd_ch_closed_list);
743static LIST_HEAD(smd_ch_closing_list);
744static LIST_HEAD(smd_ch_to_close_list);
745static LIST_HEAD(smd_ch_list_modem);
746static LIST_HEAD(smd_ch_list_dsp);
747static LIST_HEAD(smd_ch_list_dsps);
748static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600749static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700750
751static unsigned char smd_ch_allocated[64];
752static struct work_struct probe_work;
753
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700754static void finalize_channel_close_fn(struct work_struct *work);
755static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
756static struct workqueue_struct *channel_close_wq;
757
758static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
759
760/* on smp systems, the probe might get called from multiple cores,
761 hence use a lock */
762static DEFINE_MUTEX(smd_probe_lock);
763
764static void smd_channel_probe_worker(struct work_struct *work)
765{
766 struct smd_alloc_elm *shared;
767 unsigned n;
768 uint32_t type;
769
770 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
771
772 if (!shared) {
773 pr_err("%s: allocation table not initialized\n", __func__);
774 return;
775 }
776
777 mutex_lock(&smd_probe_lock);
778 for (n = 0; n < 64; n++) {
779 if (smd_ch_allocated[n])
780 continue;
781
782 /* channel should be allocated only if APPS
783 processor is involved */
784 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600785 if (type >= ARRAY_SIZE(edge_to_pids) ||
786 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787 continue;
788 if (!shared[n].ref_count)
789 continue;
790 if (!shared[n].name[0])
791 continue;
792
793 if (!smd_alloc_channel(&shared[n]))
794 smd_ch_allocated[n] = 1;
795 else
796 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
797 }
798 mutex_unlock(&smd_probe_lock);
799}
800
801/**
802 * Lookup processor ID and determine if it belongs to the proved edge
803 * type.
804 *
805 * @shared2: Pointer to v2 shared channel structure
806 * @type: Edge type
807 * @pid: Processor ID of processor on edge
808 * @local_ch: Channel that belongs to processor @pid
809 * @remote_ch: Other side of edge contained @pid
Jeff Hugo00be6282012-09-07 11:24:32 -0600810 * @is_word_access_ch: Bool, is this a word aligned access channel
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700811 *
812 * Returns 0 for not on edge, 1 for found on edge
813 */
Jeff Hugo00be6282012-09-07 11:24:32 -0600814static int pid_is_on_edge(void *shared2,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700815 uint32_t type, uint32_t pid,
Jeff Hugo00be6282012-09-07 11:24:32 -0600816 void **local_ch,
817 void **remote_ch,
818 int is_word_access_ch
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 )
820{
821 int ret = 0;
822 struct edge_to_pid *edge;
Jeff Hugo00be6282012-09-07 11:24:32 -0600823 void *ch0;
824 void *ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825
826 *local_ch = 0;
827 *remote_ch = 0;
828
829 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
830 return 0;
831
Jeff Hugo00be6282012-09-07 11:24:32 -0600832 if (is_word_access_ch) {
833 ch0 = &((struct smd_shared_v2_word_access *)(shared2))->ch0;
834 ch1 = &((struct smd_shared_v2_word_access *)(shared2))->ch1;
835 } else {
836 ch0 = &((struct smd_shared_v2 *)(shared2))->ch0;
837 ch1 = &((struct smd_shared_v2 *)(shared2))->ch1;
838 }
839
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700840 edge = &edge_to_pids[type];
841 if (edge->local_pid != edge->remote_pid) {
842 if (pid == edge->local_pid) {
Jeff Hugo00be6282012-09-07 11:24:32 -0600843 *local_ch = ch0;
844 *remote_ch = ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845 ret = 1;
846 } else if (pid == edge->remote_pid) {
Jeff Hugo00be6282012-09-07 11:24:32 -0600847 *local_ch = ch1;
848 *remote_ch = ch0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700849 ret = 1;
850 }
851 }
852
853 return ret;
854}
855
Eric Holmberg17992c12012-02-29 12:54:44 -0700856/*
857 * Returns a pointer to the subsystem name or NULL if no
858 * subsystem name is available.
859 *
860 * @type - Edge definition
861 */
862const char *smd_edge_to_subsystem(uint32_t type)
863{
864 const char *subsys = NULL;
865
866 if (type < ARRAY_SIZE(edge_to_pids)) {
867 subsys = edge_to_pids[type].subsys_name;
868 if (subsys[0] == 0x0)
869 subsys = NULL;
870 }
871 return subsys;
872}
873EXPORT_SYMBOL(smd_edge_to_subsystem);
874
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700875/*
876 * Returns a pointer to the subsystem name given the
877 * remote processor ID.
Arun Kumar Neelakantama35c7a72012-10-18 15:45:15 +0530878 * subsystem is not necessarily PIL-loadable
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700879 *
880 * @pid Remote processor ID
881 * @returns Pointer to subsystem name or NULL if not found
882 */
883const char *smd_pid_to_subsystem(uint32_t pid)
884{
885 const char *subsys = NULL;
886 int i;
887
888 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
Arun Kumar Neelakantama35c7a72012-10-18 15:45:15 +0530889 if (pid == edge_to_pids[i].remote_pid) {
890 if (edge_to_pids[i].subsys_name[0] != 0x0) {
891 subsys = edge_to_pids[i].subsys_name;
892 break;
893 } else if (pid == SMD_RPM) {
894 subsys = "rpm";
895 break;
896 }
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700897 }
898 }
899
900 return subsys;
901}
902EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700903
Jeff Hugo00be6282012-09-07 11:24:32 -0600904static void smd_reset_edge(void *void_ch, unsigned new_state,
905 int is_word_access_ch)
Eric Holmberg2a563c32011-10-05 14:51:43 -0600906{
Jeff Hugo00be6282012-09-07 11:24:32 -0600907 if (is_word_access_ch) {
908 struct smd_half_channel_word_access *ch =
909 (struct smd_half_channel_word_access *)(void_ch);
910 if (ch->state != SMD_SS_CLOSED) {
911 ch->state = new_state;
912 ch->fDSR = 0;
913 ch->fCTS = 0;
914 ch->fCD = 0;
915 ch->fSTATE = 1;
916 }
917 } else {
918 struct smd_half_channel *ch =
919 (struct smd_half_channel *)(void_ch);
920 if (ch->state != SMD_SS_CLOSED) {
921 ch->state = new_state;
922 ch->fDSR = 0;
923 ch->fCTS = 0;
924 ch->fCD = 0;
925 ch->fSTATE = 1;
926 }
Eric Holmberg2a563c32011-10-05 14:51:43 -0600927 }
928}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929
930static void smd_channel_reset_state(struct smd_alloc_elm *shared,
931 unsigned new_state, unsigned pid)
932{
933 unsigned n;
Jeff Hugo00be6282012-09-07 11:24:32 -0600934 void *shared2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700935 uint32_t type;
Jeff Hugo00be6282012-09-07 11:24:32 -0600936 void *local_ch;
937 void *remote_ch;
938 int is_word_access;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939
940 for (n = 0; n < SMD_CHANNELS; n++) {
941 if (!shared[n].ref_count)
942 continue;
943 if (!shared[n].name[0])
944 continue;
945
946 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo00be6282012-09-07 11:24:32 -0600947 is_word_access = is_word_access_ch(type);
948 if (is_word_access)
949 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
950 sizeof(struct smd_shared_v2_word_access));
951 else
952 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
953 sizeof(struct smd_shared_v2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700954 if (!shared2)
955 continue;
956
Jeff Hugo00be6282012-09-07 11:24:32 -0600957 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch,
958 is_word_access))
959 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960
Eric Holmberg2a563c32011-10-05 14:51:43 -0600961 /*
962 * ModemFW is in the same subsystem as ModemSW, but has
963 * separate SMD edges that need to be reset.
964 */
965 if (pid == SMSM_MODEM &&
966 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
Jeff Hugo00be6282012-09-07 11:24:32 -0600967 &local_ch, &remote_ch, is_word_access))
968 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700969 }
970}
971
972
973void smd_channel_reset(uint32_t restart_pid)
974{
975 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976 unsigned long flags;
977
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530978 SMx_POWER_INFO("%s: starting reset\n", __func__);
Eric Holmberg50ad86f2012-09-07 13:54:31 -0600979
980 /* release any held spinlocks */
981 remote_spin_release(&remote_spinlock, restart_pid);
982 remote_spin_release_all(restart_pid);
983
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
985 if (!shared) {
986 pr_err("%s: allocation table not initialized\n", __func__);
987 return;
988 }
989
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990 /* reset SMSM entry */
991 if (smsm_info.state) {
992 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
993
Eric Holmberg351a63c2011-12-02 17:49:43 -0700994 /* restart SMSM init handshake */
995 if (restart_pid == SMSM_MODEM) {
996 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700997 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
998 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700999 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000
1001 /* notify SMSM processors */
1002 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -07001003 notify_modem_smsm();
1004 notify_dsp_smsm();
1005 notify_dsps_smsm();
1006 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001007 }
1008
1009 /* change all remote states to CLOSING */
1010 mutex_lock(&smd_probe_lock);
1011 spin_lock_irqsave(&smd_lock, flags);
1012 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
1013 spin_unlock_irqrestore(&smd_lock, flags);
1014 mutex_unlock(&smd_probe_lock);
1015
1016 /* notify SMD processors */
1017 mb();
1018 smd_fake_irq_handler(0);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301019 notify_modem_smd(NULL);
1020 notify_dsp_smd(NULL);
1021 notify_dsps_smd(NULL);
1022 notify_wcnss_smd(NULL);
1023 notify_rpm_smd(NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001024
1025 /* change all remote states to CLOSED */
1026 mutex_lock(&smd_probe_lock);
1027 spin_lock_irqsave(&smd_lock, flags);
1028 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
1029 spin_unlock_irqrestore(&smd_lock, flags);
1030 mutex_unlock(&smd_probe_lock);
1031
1032 /* notify SMD processors */
1033 mb();
1034 smd_fake_irq_handler(0);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301035 notify_modem_smd(NULL);
1036 notify_dsp_smd(NULL);
1037 notify_dsps_smd(NULL);
1038 notify_wcnss_smd(NULL);
1039 notify_rpm_smd(NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301041 SMx_POWER_INFO("%s: finished reset\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042}
1043
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001044/* how many bytes are available for reading */
1045static int smd_stream_read_avail(struct smd_channel *ch)
1046{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001047 return (ch->half_ch->get_head(ch->recv) -
1048 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001049}
1050
1051/* how many bytes we are free to write */
1052static int smd_stream_write_avail(struct smd_channel *ch)
1053{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001054 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
1055 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001056}
1057
1058static int smd_packet_read_avail(struct smd_channel *ch)
1059{
1060 if (ch->current_packet) {
1061 int n = smd_stream_read_avail(ch);
1062 if (n > ch->current_packet)
1063 n = ch->current_packet;
1064 return n;
1065 } else {
1066 return 0;
1067 }
1068}
1069
1070static int smd_packet_write_avail(struct smd_channel *ch)
1071{
1072 int n = smd_stream_write_avail(ch);
1073 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1074}
1075
1076static int ch_is_open(struct smd_channel *ch)
1077{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001078 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1079 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1080 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001081}
1082
1083/* provide a pointer and length to readable data in the fifo */
1084static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1085{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001086 unsigned head = ch->half_ch->get_head(ch->recv);
1087 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001088 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001089
1090 if (tail <= head)
1091 return head - tail;
1092 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001093 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001094}
1095
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096static int read_intr_blocked(struct smd_channel *ch)
1097{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001098 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001099}
1100
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001101/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1102static void ch_read_done(struct smd_channel *ch, unsigned count)
1103{
1104 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001105 ch->half_ch->set_tail(ch->recv,
1106 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001108 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001109}
1110
1111/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001112 * by smd_*_read() and update_packet_state()
1113 * will read-and-discard if the _data pointer is null
1114 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001115static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001116{
1117 void *ptr;
1118 unsigned n;
1119 unsigned char *data = _data;
1120 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001121 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001122
1123 while (len > 0) {
1124 n = ch_read_buffer(ch, &ptr);
1125 if (n == 0)
1126 break;
1127
1128 if (n > len)
1129 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001130 if (_data) {
1131 if (user_buf) {
1132 r = copy_to_user(data, ptr, n);
1133 if (r > 0) {
1134 pr_err("%s: "
1135 "copy_to_user could not copy "
1136 "%i bytes.\n",
1137 __func__,
1138 r);
1139 }
1140 } else
1141 memcpy(data, ptr, n);
1142 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001143
1144 data += n;
1145 len -= n;
1146 ch_read_done(ch, n);
1147 }
1148
1149 return orig_len - len;
1150}
1151
1152static void update_stream_state(struct smd_channel *ch)
1153{
1154 /* streams have no special state requiring updating */
1155}
1156
1157static void update_packet_state(struct smd_channel *ch)
1158{
1159 unsigned hdr[5];
1160 int r;
1161
1162 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001163 while (ch->current_packet == 0) {
1164 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001165
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001166 /* don't bother unless we can get the full header */
1167 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1168 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001169
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001170 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1171 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001172
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001173 ch->current_packet = hdr[0];
1174 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001175}
1176
1177/* provide a pointer and length to next free space in the fifo */
1178static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1179{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001180 unsigned head = ch->half_ch->get_head(ch->send);
1181 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001182 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001183
1184 if (head < tail) {
1185 return tail - head - 1;
1186 } else {
1187 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001188 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001189 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001190 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001191 }
1192}
1193
1194/* advace the fifo write pointer after freespace
1195 * from ch_write_buffer is filled
1196 */
1197static void ch_write_done(struct smd_channel *ch, unsigned count)
1198{
1199 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001200 ch->half_ch->set_head(ch->send,
1201 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001203 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001204}
1205
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001206static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001207{
1208 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001209 ch->half_ch->set_fDSR(ch->send, 1);
1210 ch->half_ch->set_fCTS(ch->send, 1);
1211 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001212 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001213 ch->half_ch->set_fDSR(ch->send, 0);
1214 ch->half_ch->set_fCTS(ch->send, 0);
1215 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001216 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001217 ch->half_ch->set_state(ch->send, n);
1218 ch->half_ch->set_fSTATE(ch->send, 1);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301219 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001220}
1221
1222static void do_smd_probe(void)
1223{
1224 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1225 if (shared->heap_info.free_offset != last_heap_free) {
1226 last_heap_free = shared->heap_info.free_offset;
1227 schedule_work(&probe_work);
1228 }
1229}
1230
1231static void smd_state_change(struct smd_channel *ch,
1232 unsigned last, unsigned next)
1233{
1234 ch->last_state = next;
1235
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001236 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001237
1238 switch (next) {
1239 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001240 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1241 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1242 ch->half_ch->set_tail(ch->recv, 0);
1243 ch->half_ch->set_head(ch->send, 0);
1244 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245 ch_set_state(ch, SMD_SS_OPENING);
1246 }
1247 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001248 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001249 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001250 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251 ch->notify(ch->priv, SMD_EVENT_OPEN);
1252 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001253 break;
1254 case SMD_SS_FLUSHING:
1255 case SMD_SS_RESET:
1256 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257 break;
1258 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001259 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001260 ch_set_state(ch, SMD_SS_CLOSING);
1261 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001262 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001263 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1264 }
1265 break;
1266 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001267 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268 list_move(&ch->ch_list,
1269 &smd_ch_to_close_list);
1270 queue_work(channel_close_wq,
1271 &finalize_channel_close_work);
1272 }
1273 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001274 }
1275}
1276
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001277static void handle_smd_irq_closing_list(void)
1278{
1279 unsigned long flags;
1280 struct smd_channel *ch;
1281 struct smd_channel *index;
1282 unsigned tmp;
1283
1284 spin_lock_irqsave(&smd_lock, flags);
1285 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001286 if (ch->half_ch->get_fSTATE(ch->recv))
1287 ch->half_ch->set_fSTATE(ch->recv, 0);
1288 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001289 if (tmp != ch->last_state)
1290 smd_state_change(ch, ch->last_state, tmp);
1291 }
1292 spin_unlock_irqrestore(&smd_lock, flags);
1293}
1294
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301295static void handle_smd_irq(struct list_head *list,
1296 void (*notify)(smd_channel_t *ch))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001297{
1298 unsigned long flags;
1299 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001300 unsigned ch_flags;
1301 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001303
1304 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001305 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001306 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001307 ch_flags = 0;
1308 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001309 if (ch->half_ch->get_fHEAD(ch->recv)) {
1310 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001311 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001312 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001313 if (ch->half_ch->get_fTAIL(ch->recv)) {
1314 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001315 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001316 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001317 if (ch->half_ch->get_fSTATE(ch->recv)) {
1318 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001319 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001320 }
1321 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001322 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001323 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001324 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1325 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001326 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327 state_change = 1;
1328 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001329 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001330 ch->update_state(ch);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301331 SMx_POWER_INFO(
1332 "SMD ch%d '%s' Data event 0x%x tx%d/rx%d %dr/%dw : %dr/%dw\n",
1333 ch->n, ch->name,
1334 ch_flags,
1335 ch->fifo_size -
1336 (smd_stream_write_avail(ch) + 1),
1337 smd_stream_read_avail(ch),
1338 ch->half_ch->get_tail(ch->send),
1339 ch->half_ch->get_head(ch->send),
1340 ch->half_ch->get_tail(ch->recv),
1341 ch->half_ch->get_head(ch->recv)
1342 );
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001343 ch->notify(ch->priv, SMD_EVENT_DATA);
1344 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001345 if (ch_flags & 0x4 && !state_change) {
1346 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1347 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001348 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001349 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001350 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001351 spin_unlock_irqrestore(&smd_lock, flags);
1352 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001353}
1354
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301355static inline void log_irq(uint32_t subsystem)
1356{
1357 const char *subsys = smd_edge_to_subsystem(subsystem);
1358
Jay Chokshi83b4f6132013-02-14 16:20:56 -08001359 (void) subsys;
1360
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301361 SMx_POWER_INFO("SMD Int %s->Apps\n", subsys);
1362}
1363
Brian Swetland37521a32009-07-01 18:30:47 -07001364static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001365{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301366 log_irq(SMD_APPS_MODEM);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001367 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001368 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001369 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001370 return IRQ_HANDLED;
1371}
1372
1373static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1374{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301375 log_irq(SMD_APPS_QDSP);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001376 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001377 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001378 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001379 return IRQ_HANDLED;
1380}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001381
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001382static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1383{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301384 log_irq(SMD_APPS_DSPS);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001385 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001386 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1387 handle_smd_irq_closing_list();
1388 return IRQ_HANDLED;
1389}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001390
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001391static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1392{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301393 log_irq(SMD_APPS_WCNSS);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001394 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001395 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1396 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001397 return IRQ_HANDLED;
1398}
1399
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001400static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1401{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301402 log_irq(SMD_APPS_RPM);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001403 ++interrupt_stats[SMD_RPM].smd_in_count;
1404 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1405 handle_smd_irq_closing_list();
1406 return IRQ_HANDLED;
1407}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001408
1409static void smd_fake_irq_handler(unsigned long arg)
1410{
Brian Swetland37521a32009-07-01 18:30:47 -07001411 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1412 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001413 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1414 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001415 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001416 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001417}
1418
1419static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1420
Brian Swetland37521a32009-07-01 18:30:47 -07001421static inline int smd_need_int(struct smd_channel *ch)
1422{
1423 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001424 if (ch->half_ch->get_fHEAD(ch->recv) ||
1425 ch->half_ch->get_fTAIL(ch->recv) ||
1426 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001427 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001428 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001429 return 1;
1430 }
1431 return 0;
1432}
1433
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001434void smd_sleep_exit(void)
1435{
1436 unsigned long flags;
1437 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001438 int need_int = 0;
1439
1440 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001441 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1442 if (smd_need_int(ch)) {
1443 need_int = 1;
1444 break;
1445 }
1446 }
1447 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1448 if (smd_need_int(ch)) {
1449 need_int = 1;
1450 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001451 }
1452 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001453 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1454 if (smd_need_int(ch)) {
1455 need_int = 1;
1456 break;
1457 }
1458 }
1459 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1460 if (smd_need_int(ch)) {
1461 need_int = 1;
1462 break;
1463 }
1464 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001465 spin_unlock_irqrestore(&smd_lock, flags);
1466 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001467
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001468 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001469 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001470 tasklet_schedule(&smd_fake_irq_tasklet);
1471 }
1472}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001473EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001474
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001475static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001476{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001477 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1478 return 0;
1479 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001480 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001481
1482 /* for cases where xfer type is 0 */
1483 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001484 return 0;
1485
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001486 /* for cases where xfer type is 0 */
1487 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1488 return 0;
1489
1490 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001491 return 1;
1492 else
1493 return 0;
1494}
1495
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1497 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001498{
1499 void *ptr;
1500 const unsigned char *buf = _data;
1501 unsigned xfer;
1502 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001503 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001504
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001505 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001506 if (len < 0)
1507 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001508 else if (len == 0)
1509 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001510
1511 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001512 if (!ch_is_open(ch)) {
1513 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001514 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001515 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001516 if (xfer > len)
1517 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001518 if (user_buf) {
1519 r = copy_from_user(ptr, buf, xfer);
1520 if (r > 0) {
1521 pr_err("%s: "
1522 "copy_from_user could not copy %i "
1523 "bytes.\n",
1524 __func__,
1525 r);
1526 }
1527 } else
1528 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001529 ch_write_done(ch, xfer);
1530 len -= xfer;
1531 buf += xfer;
1532 if (len == 0)
1533 break;
1534 }
1535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001536 if (orig_len - len)
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301537 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001538
1539 return orig_len - len;
1540}
1541
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001542static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1543 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001544{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001545 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001546 unsigned hdr[5];
1547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001548 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001549 if (len < 0)
1550 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001551 else if (len == 0)
1552 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001553
1554 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1555 return -ENOMEM;
1556
1557 hdr[0] = len;
1558 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1559
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001560
1561 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1562 if (ret < 0 || ret != sizeof(hdr)) {
1563 SMD_DBG("%s failed to write pkt header: "
1564 "%d returned\n", __func__, ret);
1565 return -1;
1566 }
1567
1568
1569 ret = smd_stream_write(ch, _data, len, user_buf);
1570 if (ret < 0 || ret != len) {
1571 SMD_DBG("%s failed to write pkt data: "
1572 "%d returned\n", __func__, ret);
1573 return ret;
1574 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001575
1576 return len;
1577}
1578
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001579static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001580{
1581 int r;
1582
1583 if (len < 0)
1584 return -EINVAL;
1585
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001586 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001587 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301589 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001590
1591 return r;
1592}
1593
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001594static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001595{
1596 unsigned long flags;
1597 int r;
1598
1599 if (len < 0)
1600 return -EINVAL;
1601
1602 if (len > ch->current_packet)
1603 len = ch->current_packet;
1604
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001605 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001606 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001607 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301608 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001609
1610 spin_lock_irqsave(&smd_lock, flags);
1611 ch->current_packet -= r;
1612 update_packet_state(ch);
1613 spin_unlock_irqrestore(&smd_lock, flags);
1614
1615 return r;
1616}
1617
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001618static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1619 int user_buf)
1620{
1621 int r;
1622
1623 if (len < 0)
1624 return -EINVAL;
1625
1626 if (len > ch->current_packet)
1627 len = ch->current_packet;
1628
1629 r = ch_read(ch, data, len, user_buf);
1630 if (r > 0)
1631 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301632 ch->notify_other_cpu(ch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001633
1634 ch->current_packet -= r;
1635 update_packet_state(ch);
1636
1637 return r;
1638}
1639
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301640#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001641static int smd_alloc_v2(struct smd_channel *ch)
1642{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001643 void *buffer;
1644 unsigned buffer_sz;
1645
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001646 if (is_word_access_ch(ch->type)) {
1647 struct smd_shared_v2_word_access *shared2;
1648 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1649 sizeof(*shared2));
1650 if (!shared2) {
1651 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1652 return -EINVAL;
1653 }
1654 ch->send = &shared2->ch0;
1655 ch->recv = &shared2->ch1;
1656 } else {
1657 struct smd_shared_v2 *shared2;
1658 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1659 sizeof(*shared2));
1660 if (!shared2) {
1661 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1662 return -EINVAL;
1663 }
1664 ch->send = &shared2->ch0;
1665 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001666 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001667 ch->half_ch = get_half_ch_funcs(ch->type);
1668
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001669 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1670 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301671 SMD_INFO("smem_get_entry failed\n");
1672 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673 }
1674
1675 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301676 if (buffer_sz & (buffer_sz - 1)) {
1677 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1678 return -EINVAL;
1679 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001680 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001681 ch->send_data = buffer;
1682 ch->recv_data = buffer + buffer_sz;
1683 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001684
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001685 return 0;
1686}
1687
1688static int smd_alloc_v1(struct smd_channel *ch)
1689{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301690 return -EINVAL;
1691}
1692
1693#else /* define v1 for older targets */
1694static int smd_alloc_v2(struct smd_channel *ch)
1695{
1696 return -EINVAL;
1697}
1698
1699static int smd_alloc_v1(struct smd_channel *ch)
1700{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001701 struct smd_shared_v1 *shared1;
1702 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1703 if (!shared1) {
1704 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301705 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001706 }
1707 ch->send = &shared1->ch0;
1708 ch->recv = &shared1->ch1;
1709 ch->send_data = shared1->data0;
1710 ch->recv_data = shared1->data1;
1711 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001712 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001713 return 0;
1714}
1715
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301716#endif
1717
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001718static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001719{
1720 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001721
1722 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1723 if (ch == 0) {
1724 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001725 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001726 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001727 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001728 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001730 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001731 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001732 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001733 }
1734
1735 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001736
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001737 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001738 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001739 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001740 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001741 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001742 else if (ch->type == SMD_APPS_DSPS)
1743 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001744 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001745 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001746 else if (ch->type == SMD_APPS_RPM)
1747 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001748
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001749 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001750 ch->read = smd_packet_read;
1751 ch->write = smd_packet_write;
1752 ch->read_avail = smd_packet_read_avail;
1753 ch->write_avail = smd_packet_write_avail;
1754 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001755 ch->read_from_cb = smd_packet_read_from_cb;
1756 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001757 } else {
1758 ch->read = smd_stream_read;
1759 ch->write = smd_stream_write;
1760 ch->read_avail = smd_stream_read_avail;
1761 ch->write_avail = smd_stream_write_avail;
1762 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001763 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001764 }
1765
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001766 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1767 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001768
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001769 ch->pdev.name = ch->name;
1770 ch->pdev.id = ch->type;
1771
1772 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1773 ch->name, ch->n);
1774
1775 mutex_lock(&smd_creation_mutex);
1776 list_add(&ch->ch_list, &smd_ch_closed_list);
1777 mutex_unlock(&smd_creation_mutex);
1778
1779 platform_device_register(&ch->pdev);
1780 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1781 /* create a platform driver to be used by smd_tty driver
1782 * so that it can access the loopback port
1783 */
1784 loopback_tty_pdev.id = ch->type;
1785 platform_device_register(&loopback_tty_pdev);
1786 }
1787 return 0;
1788}
1789
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301790static inline void notify_loopback_smd(smd_channel_t *ch_notif)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001791{
1792 unsigned long flags;
1793 struct smd_channel *ch;
1794
1795 spin_lock_irqsave(&smd_lock, flags);
1796 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1797 ch->notify(ch->priv, SMD_EVENT_DATA);
1798 }
1799 spin_unlock_irqrestore(&smd_lock, flags);
1800}
1801
1802static int smd_alloc_loopback_channel(void)
1803{
1804 static struct smd_half_channel smd_loopback_ctl;
1805 static char smd_loopback_data[SMD_BUF_SIZE];
1806 struct smd_channel *ch;
1807
1808 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1809 if (ch == 0) {
1810 pr_err("%s: out of memory\n", __func__);
1811 return -1;
1812 }
1813 ch->n = SMD_LOOPBACK_CID;
1814
1815 ch->send = &smd_loopback_ctl;
1816 ch->recv = &smd_loopback_ctl;
1817 ch->send_data = smd_loopback_data;
1818 ch->recv_data = smd_loopback_data;
1819 ch->fifo_size = SMD_BUF_SIZE;
1820
1821 ch->fifo_mask = ch->fifo_size - 1;
1822 ch->type = SMD_LOOPBACK_TYPE;
1823 ch->notify_other_cpu = notify_loopback_smd;
1824
1825 ch->read = smd_stream_read;
1826 ch->write = smd_stream_write;
1827 ch->read_avail = smd_stream_read_avail;
1828 ch->write_avail = smd_stream_write_avail;
1829 ch->update_state = update_stream_state;
1830 ch->read_from_cb = smd_stream_read;
1831
1832 memset(ch->name, 0, 20);
1833 memcpy(ch->name, "local_loopback", 14);
1834
1835 ch->pdev.name = ch->name;
1836 ch->pdev.id = ch->type;
1837
1838 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001839
1840 mutex_lock(&smd_creation_mutex);
1841 list_add(&ch->ch_list, &smd_ch_closed_list);
1842 mutex_unlock(&smd_creation_mutex);
1843
1844 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001845 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001846}
1847
1848static void do_nothing_notify(void *priv, unsigned flags)
1849{
1850}
1851
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001852static void finalize_channel_close_fn(struct work_struct *work)
1853{
1854 unsigned long flags;
1855 struct smd_channel *ch;
1856 struct smd_channel *index;
1857
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001858 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001859 spin_lock_irqsave(&smd_lock, flags);
1860 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1861 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001862 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001863 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1864 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001865 }
1866 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001867 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001868}
1869
1870struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001871{
1872 struct smd_channel *ch;
1873
1874 mutex_lock(&smd_creation_mutex);
1875 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001876 if (!strcmp(name, ch->name) &&
1877 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001878 list_del(&ch->ch_list);
1879 mutex_unlock(&smd_creation_mutex);
1880 return ch;
1881 }
1882 }
1883 mutex_unlock(&smd_creation_mutex);
1884
1885 return NULL;
1886}
1887
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001888int smd_named_open_on_edge(const char *name, uint32_t edge,
1889 smd_channel_t **_ch,
1890 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001891{
1892 struct smd_channel *ch;
1893 unsigned long flags;
1894
1895 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001896 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001897 return -ENODEV;
1898 }
1899
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001900 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1901
1902 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001903 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001904 /* check closing list for port */
1905 spin_lock_irqsave(&smd_lock, flags);
1906 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1907 if (!strncmp(name, ch->name, 20) &&
1908 (edge == ch->type)) {
1909 /* channel exists, but is being closed */
1910 spin_unlock_irqrestore(&smd_lock, flags);
1911 return -EAGAIN;
1912 }
1913 }
1914
1915 /* check closing workqueue list for port */
1916 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1917 if (!strncmp(name, ch->name, 20) &&
1918 (edge == ch->type)) {
1919 /* channel exists, but is being closed */
1920 spin_unlock_irqrestore(&smd_lock, flags);
1921 return -EAGAIN;
1922 }
1923 }
1924 spin_unlock_irqrestore(&smd_lock, flags);
1925
1926 /* one final check to handle closing->closed race condition */
1927 ch = smd_get_channel(name, edge);
1928 if (!ch)
1929 return -ENODEV;
1930 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001931
1932 if (notify == 0)
1933 notify = do_nothing_notify;
1934
1935 ch->notify = notify;
1936 ch->current_packet = 0;
1937 ch->last_state = SMD_SS_CLOSED;
1938 ch->priv = priv;
1939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940 if (edge == SMD_LOOPBACK_TYPE) {
1941 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001942 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1943 ch->half_ch->set_fDSR(ch->send, 1);
1944 ch->half_ch->set_fCTS(ch->send, 1);
1945 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001946 }
1947
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001948 *_ch = ch;
1949
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001950 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1951
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001952 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001954 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001955 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001956 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001957 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1958 list_add(&ch->ch_list, &smd_ch_list_dsps);
1959 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1960 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001961 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1962 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001963 else
1964 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001965
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001966 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1967
1968 if (edge != SMD_LOOPBACK_TYPE)
1969 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1970
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001971 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001972
1973 return 0;
1974}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001975EXPORT_SYMBOL(smd_named_open_on_edge);
1976
1977
1978int smd_open(const char *name, smd_channel_t **_ch,
1979 void *priv, void (*notify)(void *, unsigned))
1980{
1981 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1982 notify);
1983}
1984EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001985
1986int smd_close(smd_channel_t *ch)
1987{
1988 unsigned long flags;
1989
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001990 if (ch == 0)
1991 return -1;
1992
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001993 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001994
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001995 spin_lock_irqsave(&smd_lock, flags);
1996 list_del(&ch->ch_list);
1997 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001998 ch->half_ch->set_fDSR(ch->send, 0);
1999 ch->half_ch->set_fCTS(ch->send, 0);
2000 ch->half_ch->set_fCD(ch->send, 0);
2001 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002002 } else
2003 ch_set_state(ch, SMD_SS_CLOSED);
2004
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002005 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002006 list_add(&ch->ch_list, &smd_ch_closing_list);
2007 spin_unlock_irqrestore(&smd_lock, flags);
2008 } else {
2009 spin_unlock_irqrestore(&smd_lock, flags);
2010 ch->notify = do_nothing_notify;
2011 mutex_lock(&smd_creation_mutex);
2012 list_add(&ch->ch_list, &smd_ch_closed_list);
2013 mutex_unlock(&smd_creation_mutex);
2014 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002015
2016 return 0;
2017}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018EXPORT_SYMBOL(smd_close);
2019
2020int smd_write_start(smd_channel_t *ch, int len)
2021{
2022 int ret;
2023 unsigned hdr[5];
2024
2025 if (!ch) {
2026 pr_err("%s: Invalid channel specified\n", __func__);
2027 return -ENODEV;
2028 }
2029 if (!ch->is_pkt_ch) {
2030 pr_err("%s: non-packet channel specified\n", __func__);
2031 return -EACCES;
2032 }
2033 if (len < 1) {
2034 pr_err("%s: invalid length: %d\n", __func__, len);
2035 return -EINVAL;
2036 }
2037
2038 if (ch->pending_pkt_sz) {
2039 pr_err("%s: packet of size: %d in progress\n", __func__,
2040 ch->pending_pkt_sz);
2041 return -EBUSY;
2042 }
2043 ch->pending_pkt_sz = len;
2044
2045 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
2046 ch->pending_pkt_sz = 0;
2047 SMD_DBG("%s: no space to write packet header\n", __func__);
2048 return -EAGAIN;
2049 }
2050
2051 hdr[0] = len;
2052 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
2053
2054
2055 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
2056 if (ret < 0 || ret != sizeof(hdr)) {
2057 ch->pending_pkt_sz = 0;
2058 pr_err("%s: packet header failed to write\n", __func__);
2059 return -EPERM;
2060 }
2061 return 0;
2062}
2063EXPORT_SYMBOL(smd_write_start);
2064
2065int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
2066{
2067 int bytes_written;
2068
2069 if (!ch) {
2070 pr_err("%s: Invalid channel specified\n", __func__);
2071 return -ENODEV;
2072 }
2073 if (len < 1) {
2074 pr_err("%s: invalid length: %d\n", __func__, len);
2075 return -EINVAL;
2076 }
2077
2078 if (!ch->pending_pkt_sz) {
2079 pr_err("%s: no transaction in progress\n", __func__);
2080 return -ENOEXEC;
2081 }
2082 if (ch->pending_pkt_sz - len < 0) {
2083 pr_err("%s: segment of size: %d will make packet go over "
2084 "length\n", __func__, len);
2085 return -EINVAL;
2086 }
2087
2088 bytes_written = smd_stream_write(ch, data, len, user_buf);
2089
2090 ch->pending_pkt_sz -= bytes_written;
2091
2092 return bytes_written;
2093}
2094EXPORT_SYMBOL(smd_write_segment);
2095
2096int smd_write_end(smd_channel_t *ch)
2097{
2098
2099 if (!ch) {
2100 pr_err("%s: Invalid channel specified\n", __func__);
2101 return -ENODEV;
2102 }
2103 if (ch->pending_pkt_sz) {
2104 pr_err("%s: current packet not completely written\n", __func__);
2105 return -E2BIG;
2106 }
2107
2108 return 0;
2109}
2110EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002111
2112int smd_read(smd_channel_t *ch, void *data, int len)
2113{
Jack Pham1b236d12012-03-19 15:27:18 -07002114 if (!ch) {
2115 pr_err("%s: Invalid channel specified\n", __func__);
2116 return -ENODEV;
2117 }
2118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002119 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002120}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002121EXPORT_SYMBOL(smd_read);
2122
2123int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2124{
Jack Pham1b236d12012-03-19 15:27:18 -07002125 if (!ch) {
2126 pr_err("%s: Invalid channel specified\n", __func__);
2127 return -ENODEV;
2128 }
2129
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002130 return ch->read(ch, data, len, 1);
2131}
2132EXPORT_SYMBOL(smd_read_user_buffer);
2133
2134int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2135{
Jack Pham1b236d12012-03-19 15:27:18 -07002136 if (!ch) {
2137 pr_err("%s: Invalid channel specified\n", __func__);
2138 return -ENODEV;
2139 }
2140
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002141 return ch->read_from_cb(ch, data, len, 0);
2142}
2143EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002144
2145int smd_write(smd_channel_t *ch, const void *data, int len)
2146{
Jack Pham1b236d12012-03-19 15:27:18 -07002147 if (!ch) {
2148 pr_err("%s: Invalid channel specified\n", __func__);
2149 return -ENODEV;
2150 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002151
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002152 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002153}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002154EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002155
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002156int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002157{
Jack Pham1b236d12012-03-19 15:27:18 -07002158 if (!ch) {
2159 pr_err("%s: Invalid channel specified\n", __func__);
2160 return -ENODEV;
2161 }
2162
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002163 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002164}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002165EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002166
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002167int smd_read_avail(smd_channel_t *ch)
2168{
Jack Pham1b236d12012-03-19 15:27:18 -07002169 if (!ch) {
2170 pr_err("%s: Invalid channel specified\n", __func__);
2171 return -ENODEV;
2172 }
2173
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002174 return ch->read_avail(ch);
2175}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002176EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002177
2178int smd_write_avail(smd_channel_t *ch)
2179{
Jack Pham1b236d12012-03-19 15:27:18 -07002180 if (!ch) {
2181 pr_err("%s: Invalid channel specified\n", __func__);
2182 return -ENODEV;
2183 }
2184
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002185 return ch->write_avail(ch);
2186}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002187EXPORT_SYMBOL(smd_write_avail);
2188
2189void smd_enable_read_intr(smd_channel_t *ch)
2190{
2191 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002192 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002193}
2194EXPORT_SYMBOL(smd_enable_read_intr);
2195
2196void smd_disable_read_intr(smd_channel_t *ch)
2197{
2198 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002199 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002200}
2201EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002202
Eric Holmbergdeace152012-07-25 12:17:11 -06002203/**
2204 * Enable/disable receive interrupts for the remote processor used by a
2205 * particular channel.
2206 * @ch: open channel handle to use for the edge
2207 * @mask: 1 = mask interrupts; 0 = unmask interrupts
2208 * @returns: 0 for success; < 0 for failure
2209 *
2210 * Note that this enables/disables all interrupts from the remote subsystem for
2211 * all channels. As such, it should be used with care and only for specific
2212 * use cases such as power-collapse sequencing.
2213 */
2214int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
2215{
2216 struct irq_chip *irq_chip;
2217 struct irq_data *irq_data;
2218 struct interrupt_config_item *int_cfg;
2219
2220 if (!ch)
2221 return -EINVAL;
2222
2223 if (ch->type >= ARRAY_SIZE(edge_to_pids))
2224 return -ENODEV;
2225
2226 int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
2227
2228 if (int_cfg->irq_id < 0)
2229 return -ENODEV;
2230
2231 irq_chip = irq_get_chip(int_cfg->irq_id);
2232 if (!irq_chip)
2233 return -ENODEV;
2234
2235 irq_data = irq_get_irq_data(int_cfg->irq_id);
2236 if (!irq_data)
2237 return -ENODEV;
2238
2239 if (mask) {
2240 SMx_POWER_INFO("SMD Masking interrupts from %s\n",
2241 edge_to_pids[ch->type].subsys_name);
2242 irq_chip->irq_mask(irq_data);
2243 } else {
2244 SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
2245 edge_to_pids[ch->type].subsys_name);
2246 irq_chip->irq_unmask(irq_data);
2247 }
2248
2249 return 0;
2250}
2251EXPORT_SYMBOL(smd_mask_receive_interrupt);
2252
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002253int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2254{
2255 return -1;
2256}
2257
2258int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2259{
2260 return -1;
2261}
2262
2263int smd_cur_packet_size(smd_channel_t *ch)
2264{
Jack Pham1b236d12012-03-19 15:27:18 -07002265 if (!ch) {
2266 pr_err("%s: Invalid channel specified\n", __func__);
2267 return -ENODEV;
2268 }
2269
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002270 return ch->current_packet;
2271}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002272EXPORT_SYMBOL(smd_cur_packet_size);
2273
2274int smd_tiocmget(smd_channel_t *ch)
2275{
Jack Pham1b236d12012-03-19 15:27:18 -07002276 if (!ch) {
2277 pr_err("%s: Invalid channel specified\n", __func__);
2278 return -ENODEV;
2279 }
2280
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002281 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2282 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2283 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2284 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2285 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2286 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002287}
2288EXPORT_SYMBOL(smd_tiocmget);
2289
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002290/* this api will be called while holding smd_lock */
2291int
2292smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002293{
Jack Pham1b236d12012-03-19 15:27:18 -07002294 if (!ch) {
2295 pr_err("%s: Invalid channel specified\n", __func__);
2296 return -ENODEV;
2297 }
2298
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002299 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002300 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002301
2302 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002303 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002304
2305 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002306 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002307
2308 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002309 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002310
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002311 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002312 barrier();
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05302313 ch->notify_other_cpu(ch);
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002314
2315 return 0;
2316}
2317EXPORT_SYMBOL(smd_tiocmset_from_cb);
2318
2319int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2320{
2321 unsigned long flags;
2322
Jack Pham1b236d12012-03-19 15:27:18 -07002323 if (!ch) {
2324 pr_err("%s: Invalid channel specified\n", __func__);
2325 return -ENODEV;
2326 }
2327
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002328 spin_lock_irqsave(&smd_lock, flags);
2329 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002330 spin_unlock_irqrestore(&smd_lock, flags);
2331
2332 return 0;
2333}
2334EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002335
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002336int smd_is_pkt_avail(smd_channel_t *ch)
2337{
Jeff Hugoa8549f12012-08-13 20:36:18 -06002338 unsigned long flags;
2339
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002340 if (!ch || !ch->is_pkt_ch)
2341 return -EINVAL;
2342
2343 if (ch->current_packet)
2344 return 1;
2345
Jeff Hugoa8549f12012-08-13 20:36:18 -06002346 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002347 update_packet_state(ch);
Jeff Hugoa8549f12012-08-13 20:36:18 -06002348 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002349
2350 return ch->current_packet ? 1 : 0;
2351}
2352EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002353
2354
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002355/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002356
Jeff Hugobdc734d2012-03-26 16:05:39 -06002357/*
2358 * Shared Memory Range Check
2359 *
2360 * Takes a physical address and an offset and checks if the resulting physical
2361 * address would fit into one of the aux smem regions. If so, returns the
2362 * corresponding virtual address. Otherwise returns NULL. Expects the array
2363 * of smem regions to be in ascending physical address order.
2364 *
2365 * @base: physical base address to check
2366 * @offset: offset from the base to get the final address
2367 */
2368static void *smem_range_check(void *base, unsigned offset)
2369{
2370 int i;
2371 void *phys_addr;
2372 unsigned size;
2373
2374 for (i = 0; i < num_smem_areas; ++i) {
2375 phys_addr = smem_areas[i].phys_addr;
2376 size = smem_areas[i].size;
2377 if (base < phys_addr)
2378 return NULL;
2379 if (base > phys_addr + size)
2380 continue;
2381 if (base >= phys_addr && base + offset < phys_addr + size)
2382 return smem_areas[i].virt_addr + offset;
2383 }
2384
2385 return NULL;
2386}
2387
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002388/* smem_alloc returns the pointer to smem item if it is already allocated.
2389 * Otherwise, it returns NULL.
2390 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002391void *smem_alloc(unsigned id, unsigned size)
2392{
2393 return smem_find(id, size);
2394}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002395EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002396
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002397/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2398 * it allocates it and then returns the pointer to it.
2399 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302400void *smem_alloc2(unsigned id, unsigned size_in)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002401{
2402 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2403 struct smem_heap_entry *toc = shared->heap_toc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002404 unsigned long flags;
2405 void *ret = NULL;
2406
2407 if (!shared->heap_info.initialized) {
2408 pr_err("%s: smem heap info not initialized\n", __func__);
2409 return NULL;
2410 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002411
2412 if (id >= SMEM_NUM_ITEMS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002413 return NULL;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002415 size_in = ALIGN(size_in, 8);
2416 remote_spin_lock_irqsave(&remote_spinlock, flags);
2417 if (toc[id].allocated) {
2418 SMD_DBG("%s: %u already allocated\n", __func__, id);
2419 if (size_in != toc[id].size)
2420 pr_err("%s: wrong size %u (expected %u)\n",
2421 __func__, toc[id].size, size_in);
2422 else
2423 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2424 } else if (id > SMEM_FIXED_ITEM_LAST) {
2425 SMD_DBG("%s: allocating %u\n", __func__, id);
2426 if (shared->heap_info.heap_remaining >= size_in) {
2427 toc[id].offset = shared->heap_info.free_offset;
2428 toc[id].size = size_in;
2429 wmb();
2430 toc[id].allocated = 1;
2431
2432 shared->heap_info.free_offset += size_in;
2433 shared->heap_info.heap_remaining -= size_in;
2434 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2435 } else
2436 pr_err("%s: not enough memory %u (required %u)\n",
2437 __func__, shared->heap_info.heap_remaining,
2438 size_in);
2439 }
2440 wmb();
2441 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2442 return ret;
2443}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302444EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002445
2446void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002447{
2448 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2449 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302450 int use_spinlocks = spinlocks_initialized;
2451 void *ret = 0;
2452 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002453
2454 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302455 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002456
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302457 if (use_spinlocks)
2458 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002459 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002460 if (toc[id].allocated) {
2461 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002462 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002463 if (!(toc[id].reserved & BASE_ADDR_MASK))
2464 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2465 else
2466 ret = smem_range_check(
2467 (void *)(toc[id].reserved & BASE_ADDR_MASK),
2468 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002469 } else {
2470 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002471 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302472 if (use_spinlocks)
2473 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002474
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302475 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002476}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002477EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002478
2479void *smem_find(unsigned id, unsigned size_in)
2480{
2481 unsigned size;
2482 void *ptr;
2483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002484 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002485 if (!ptr)
2486 return 0;
2487
2488 size_in = ALIGN(size_in, 8);
2489 if (size_in != size) {
2490 pr_err("smem_find(%d, %d): wrong size %d\n",
2491 id, size_in, size);
2492 return 0;
2493 }
2494
2495 return ptr;
2496}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002497EXPORT_SYMBOL(smem_find);
2498
2499static int smsm_cb_init(void)
2500{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002501 struct smsm_state_info *state_info;
2502 int n;
2503 int ret = 0;
2504
2505 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2506 GFP_KERNEL);
2507
2508 if (!smsm_states) {
2509 pr_err("%s: SMSM init failed\n", __func__);
2510 return -ENOMEM;
2511 }
2512
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002513 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2514 if (!smsm_cb_wq) {
2515 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2516 kfree(smsm_states);
2517 return -EFAULT;
2518 }
2519
Eric Holmbergc8002902011-09-16 13:55:57 -06002520 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002521 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2522 state_info = &smsm_states[n];
2523 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002524 state_info->intr_mask_set = 0x0;
2525 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002526 INIT_LIST_HEAD(&state_info->callbacks);
2527 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002528 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002529
2530 return ret;
2531}
2532
2533static int smsm_init(void)
2534{
2535 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2536 int i;
2537 struct smsm_size_info_type *smsm_size_info;
Eric Holmberge5266d32013-02-25 18:29:27 -07002538 unsigned long flags;
2539 unsigned long j_start;
2540
2541 /* Verify that remote spinlock is not deadlocked */
2542 j_start = jiffies;
2543 while (!remote_spin_trylock_irqsave(&remote_spinlock, flags)) {
2544 if (jiffies_to_msecs(jiffies - j_start) > RSPIN_INIT_WAIT_MS) {
2545 panic("%s: Remote processor %d will not release spinlock\n",
2546 __func__, remote_spin_owner(&remote_spinlock));
2547 }
2548 }
2549 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002550
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002551 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2552 sizeof(struct smsm_size_info_type));
2553 if (smsm_size_info) {
2554 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2555 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2556 }
2557
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002558 i = kfifo_alloc(&smsm_snapshot_fifo,
2559 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2560 GFP_KERNEL);
2561 if (i) {
2562 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2563 return i;
2564 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002565 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2566 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002567
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002568 if (!smsm_info.state) {
2569 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2570 SMSM_NUM_ENTRIES *
2571 sizeof(uint32_t));
2572
2573 if (smsm_info.state) {
2574 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2575 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2576 __raw_writel(0, \
2577 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2578 }
2579 }
2580
2581 if (!smsm_info.intr_mask) {
2582 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2583 SMSM_NUM_ENTRIES *
2584 SMSM_NUM_HOSTS *
2585 sizeof(uint32_t));
2586
Eric Holmberge8a39322012-04-03 15:14:02 -06002587 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002588 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002589 __raw_writel(0x0,
2590 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2591
2592 /* Configure legacy modem bits */
2593 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2594 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2595 SMSM_APPS));
2596 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002597 }
2598
2599 if (!smsm_info.intr_mux)
2600 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2601 SMSM_NUM_INTR_MUX *
2602 sizeof(uint32_t));
2603
2604 i = smsm_cb_init();
2605 if (i)
2606 return i;
2607
2608 wmb();
Eric Holmberg144c2de2012-10-04 13:37:28 -06002609
2610 smsm_pm_notifier(&smsm_pm_nb, PM_POST_SUSPEND, NULL);
2611 i = register_pm_notifier(&smsm_pm_nb);
2612 if (i)
2613 pr_err("%s: power state notif error %d\n", __func__, i);
2614
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002615 return 0;
2616}
2617
2618void smsm_reset_modem(unsigned mode)
2619{
2620 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2621 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2622 } else if (mode == SMSM_MODEM_WAIT) {
2623 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2624 } else { /* reset_mode is SMSM_RESET or default */
2625 mode = SMSM_RESET;
2626 }
2627
2628 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2629}
2630EXPORT_SYMBOL(smsm_reset_modem);
2631
2632void smsm_reset_modem_cont(void)
2633{
2634 unsigned long flags;
2635 uint32_t state;
2636
2637 if (!smsm_info.state)
2638 return;
2639
2640 spin_lock_irqsave(&smem_lock, flags);
2641 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2642 & ~SMSM_MODEM_WAIT;
2643 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2644 wmb();
2645 spin_unlock_irqrestore(&smem_lock, flags);
2646}
2647EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002648
Eric Holmbergda31d042012-03-28 14:01:02 -06002649static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002650{
2651 int n;
2652 uint32_t new_state;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002653 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002654 int ret;
2655
2656 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002657 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002658 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2659 return;
2660 }
2661
Eric Holmberg96b55f62012-04-03 19:10:46 -06002662 /*
2663 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2664 * following sequence must be followed:
2665 * 1) increment snapshot count
2666 * 2) insert data into FIFO
2667 *
2668 * Potentially in parallel, the worker:
2669 * a) verifies >= 1 snapshots are in FIFO
2670 * b) processes snapshot
2671 * c) decrements reference count
2672 *
2673 * This order ensures that 1 will always occur before abc.
2674 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002675 if (use_wakelock) {
2676 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2677 if (smsm_snapshot_count == 0) {
2678 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2679 wake_lock(&smsm_snapshot_wakelock);
2680 }
2681 ++smsm_snapshot_count;
2682 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2683 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002684
2685 /* queue state entries */
2686 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2687 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2688
2689 ret = kfifo_in(&smsm_snapshot_fifo,
2690 &new_state, sizeof(new_state));
2691 if (ret != sizeof(new_state)) {
2692 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2693 goto restore_snapshot_count;
2694 }
2695 }
2696
2697 /* queue wakelock usage flag */
2698 ret = kfifo_in(&smsm_snapshot_fifo,
2699 &use_wakelock, sizeof(use_wakelock));
2700 if (ret != sizeof(use_wakelock)) {
2701 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2702 goto restore_snapshot_count;
2703 }
2704
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002705 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002706 return;
2707
2708restore_snapshot_count:
2709 if (use_wakelock) {
2710 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2711 if (smsm_snapshot_count) {
2712 --smsm_snapshot_count;
2713 if (smsm_snapshot_count == 0) {
2714 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2715 wake_unlock(&smsm_snapshot_wakelock);
2716 }
2717 } else {
2718 pr_err("%s: invalid snapshot count\n", __func__);
2719 }
2720 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2721 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002722}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002723
2724static irqreturn_t smsm_irq_handler(int irq, void *data)
2725{
2726 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002727
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002728 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002729 uint32_t mux_val;
2730 static uint32_t prev_smem_q6_apps_smsm;
2731
2732 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2733 mux_val = __raw_readl(
2734 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2735 if (mux_val != prev_smem_q6_apps_smsm)
2736 prev_smem_q6_apps_smsm = mux_val;
2737 }
2738
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002739 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002740 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002741 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002742 return IRQ_HANDLED;
2743 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002744
2745 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002746 if (!smsm_info.state) {
2747 SMSM_INFO("<SM NO STATE>\n");
2748 } else {
2749 unsigned old_apps, apps;
2750 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002751
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002752 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002753
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002754 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2755 if (apps & SMSM_RESET) {
2756 /* If we get an interrupt and the apps SMSM_RESET
2757 bit is already set, the modem is acking the
2758 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002759 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302760 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002761 /* Issue a fake irq to handle any
2762 * smd state changes during reset
2763 */
2764 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002765
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002766 /* queue modem restart notify chain */
2767 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002768
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002769 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002770 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302771 if (!disable_smsm_reset_handshake) {
2772 apps |= SMSM_RESET;
2773 flush_cache_all();
2774 outer_flush_all();
2775 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002776 modem_queue_start_reset_notify();
2777
2778 } else if (modm & SMSM_INIT) {
2779 if (!(apps & SMSM_INIT)) {
2780 apps |= SMSM_INIT;
2781 modem_queue_smsm_init_notify();
2782 }
2783
2784 if (modm & SMSM_SMDINIT)
2785 apps |= SMSM_SMDINIT;
2786 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2787 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2788 apps |= SMSM_RUN;
2789 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2790 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2791 modem_queue_start_reset_notify();
2792 }
2793
2794 if (old_apps != apps) {
2795 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2796 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2797 do_smd_probe();
2798 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2799 }
2800
Eric Holmbergda31d042012-03-28 14:01:02 -06002801 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002802 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002803 spin_unlock_irqrestore(&smem_lock, flags);
2804 return IRQ_HANDLED;
2805}
2806
Eric Holmberg98c6c642012-02-24 11:29:35 -07002807static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002808{
Eric Holmberg98c6c642012-02-24 11:29:35 -07002809 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002810 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002811 return smsm_irq_handler(irq, data);
2812}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002813
Eric Holmberg98c6c642012-02-24 11:29:35 -07002814static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2815{
2816 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002817 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002818 return smsm_irq_handler(irq, data);
2819}
2820
2821static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2822{
2823 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002824 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002825 return smsm_irq_handler(irq, data);
2826}
2827
2828static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2829{
2830 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002831 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002832 return smsm_irq_handler(irq, data);
2833}
2834
Eric Holmberge8a39322012-04-03 15:14:02 -06002835/*
2836 * Changes the global interrupt mask. The set and clear masks are re-applied
2837 * every time the global interrupt mask is updated for callback registration
2838 * and de-registration.
2839 *
2840 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2841 * mask and the set mask, the result will be that the interrupt is set.
2842 *
2843 * @smsm_entry SMSM entry to change
2844 * @clear_mask 1 = clear bit, 0 = no-op
2845 * @set_mask 1 = set bit, 0 = no-op
2846 *
2847 * @returns 0 for success, < 0 for error
2848 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002849int smsm_change_intr_mask(uint32_t smsm_entry,
2850 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002851{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002852 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002853 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002854
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002855 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2856 pr_err("smsm_change_state: Invalid entry %d\n",
2857 smsm_entry);
2858 return -EINVAL;
2859 }
2860
2861 if (!smsm_info.intr_mask) {
2862 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002863 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002864 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002865
2866 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002867 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2868 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002869
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002870 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2871 new_mask = (old_mask & ~clear_mask) | set_mask;
2872 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002873
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002874 wmb();
2875 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002876
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002877 return 0;
2878}
2879EXPORT_SYMBOL(smsm_change_intr_mask);
2880
2881int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2882{
2883 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2884 pr_err("smsm_change_state: Invalid entry %d\n",
2885 smsm_entry);
2886 return -EINVAL;
2887 }
2888
2889 if (!smsm_info.intr_mask) {
2890 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2891 return -EIO;
2892 }
2893
2894 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2895 return 0;
2896}
2897EXPORT_SYMBOL(smsm_get_intr_mask);
2898
2899int smsm_change_state(uint32_t smsm_entry,
2900 uint32_t clear_mask, uint32_t set_mask)
2901{
2902 unsigned long flags;
2903 uint32_t old_state, new_state;
2904
2905 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2906 pr_err("smsm_change_state: Invalid entry %d",
2907 smsm_entry);
2908 return -EINVAL;
2909 }
2910
2911 if (!smsm_info.state) {
2912 pr_err("smsm_change_state <SM NO STATE>\n");
2913 return -EIO;
2914 }
2915 spin_lock_irqsave(&smem_lock, flags);
2916
2917 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2918 new_state = (old_state & ~clear_mask) | set_mask;
2919 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2920 SMSM_DBG("smsm_change_state %x\n", new_state);
2921 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002922
2923 spin_unlock_irqrestore(&smem_lock, flags);
2924
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002925 return 0;
2926}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002927EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002929uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002930{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002931 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002932
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002933 /* needs interface change to return error code */
2934 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2935 pr_err("smsm_change_state: Invalid entry %d",
2936 smsm_entry);
2937 return 0;
2938 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002940 if (!smsm_info.state) {
2941 pr_err("smsm_get_state <SM NO STATE>\n");
2942 } else {
2943 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2944 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002945
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002946 return rv;
2947}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002948EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002949
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002950/**
2951 * Performs SMSM callback client notifiction.
2952 */
2953void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002954{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002955 struct smsm_state_cb_info *cb_info;
2956 struct smsm_state_info *state_info;
2957 int n;
2958 uint32_t new_state;
2959 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002960 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002961 int ret;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002962 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002963
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002964 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002965 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002966
Eric Holmbergda31d042012-03-28 14:01:02 -06002967 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002968 mutex_lock(&smsm_lock);
2969 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2970 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002971
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002972 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2973 sizeof(new_state));
2974 if (ret != sizeof(new_state)) {
2975 pr_err("%s: snapshot underflow %d\n",
2976 __func__, ret);
2977 mutex_unlock(&smsm_lock);
2978 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002979 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002980
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002981 state_changes = state_info->last_value ^ new_state;
2982 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002983 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2984 n, state_info->last_value,
2985 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002986 list_for_each_entry(cb_info,
2987 &state_info->callbacks, cb_list) {
2988
2989 if (cb_info->mask & state_changes)
2990 cb_info->notify(cb_info->data,
2991 state_info->last_value,
2992 new_state);
2993 }
2994 state_info->last_value = new_state;
2995 }
2996 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002997
Eric Holmbergda31d042012-03-28 14:01:02 -06002998 /* read wakelock flag */
2999 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
3000 sizeof(use_wakelock));
3001 if (ret != sizeof(use_wakelock)) {
3002 pr_err("%s: snapshot underflow %d\n",
3003 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06003004 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06003005 return;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06003006 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06003007 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06003008
3009 if (use_wakelock) {
3010 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
3011 if (smsm_snapshot_count) {
3012 --smsm_snapshot_count;
3013 if (smsm_snapshot_count == 0) {
3014 SMx_POWER_INFO("SMSM snapshot"
3015 " wake unlock\n");
3016 wake_unlock(&smsm_snapshot_wakelock);
3017 }
3018 } else {
3019 pr_err("%s: invalid snapshot count\n",
3020 __func__);
3021 }
3022 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
3023 flags);
3024 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003025 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003026}
3027
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003028
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003029/**
3030 * Registers callback for SMSM state notifications when the specified
3031 * bits change.
3032 *
3033 * @smsm_entry Processor entry to deregister
3034 * @mask Bits to deregister (if result is 0, callback is removed)
3035 * @notify Notification function to deregister
3036 * @data Opaque data passed in to callback
3037 *
3038 * @returns Status code
3039 * <0 error code
3040 * 0 inserted new entry
3041 * 1 updated mask of existing entry
3042 */
3043int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
3044 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003045{
Eric Holmberge8a39322012-04-03 15:14:02 -06003046 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003047 struct smsm_state_cb_info *cb_info;
3048 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06003049 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003050 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003051
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003052 if (smsm_entry >= SMSM_NUM_ENTRIES)
3053 return -EINVAL;
3054
Eric Holmbergc8002902011-09-16 13:55:57 -06003055 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003056
3057 if (!smsm_states) {
3058 /* smsm not yet initialized */
3059 ret = -ENODEV;
3060 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003061 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003062
Eric Holmberge8a39322012-04-03 15:14:02 -06003063 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003064 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06003065 &state->callbacks, cb_list) {
3066 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003067 (cb_info->data == data)) {
3068 cb_info->mask |= mask;
3069 cb_found = cb_info;
3070 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003071 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003072 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003073 }
3074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003075 if (!cb_found) {
3076 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
3077 GFP_ATOMIC);
3078 if (!cb_info) {
3079 ret = -ENOMEM;
3080 goto cleanup;
3081 }
3082
3083 cb_info->mask = mask;
3084 cb_info->notify = notify;
3085 cb_info->data = data;
3086 INIT_LIST_HEAD(&cb_info->cb_list);
3087 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06003088 &state->callbacks);
3089 new_mask |= mask;
3090 }
3091
3092 /* update interrupt notification mask */
3093 if (smsm_entry == SMSM_MODEM_STATE)
3094 new_mask |= LEGACY_MODEM_SMSM_MASK;
3095
3096 if (smsm_info.intr_mask) {
3097 unsigned long flags;
3098
3099 spin_lock_irqsave(&smem_lock, flags);
3100 new_mask = (new_mask & ~state->intr_mask_clear)
3101 | state->intr_mask_set;
3102 __raw_writel(new_mask,
3103 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3104 wmb();
3105 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003106 }
3107
3108cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06003109 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003110 return ret;
3111}
3112EXPORT_SYMBOL(smsm_state_cb_register);
3113
3114
3115/**
3116 * Deregisters for SMSM state notifications for the specified bits.
3117 *
3118 * @smsm_entry Processor entry to deregister
3119 * @mask Bits to deregister (if result is 0, callback is removed)
3120 * @notify Notification function to deregister
3121 * @data Opaque data passed in to callback
3122 *
3123 * @returns Status code
3124 * <0 error code
3125 * 0 not found
3126 * 1 updated mask
3127 * 2 removed callback
3128 */
3129int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
3130 void (*notify)(void *, uint32_t, uint32_t), void *data)
3131{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003132 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06003133 struct smsm_state_cb_info *cb_tmp;
3134 struct smsm_state_info *state;
3135 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003136 int ret = 0;
3137
3138 if (smsm_entry >= SMSM_NUM_ENTRIES)
3139 return -EINVAL;
3140
Eric Holmbergc8002902011-09-16 13:55:57 -06003141 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003142
3143 if (!smsm_states) {
3144 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003145 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003146 return -ENODEV;
3147 }
3148
Eric Holmberge8a39322012-04-03 15:14:02 -06003149 state = &smsm_states[smsm_entry];
3150 list_for_each_entry_safe(cb_info, cb_tmp,
3151 &state->callbacks, cb_list) {
3152 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003153 (cb_info->data == data)) {
3154 cb_info->mask &= ~mask;
3155 ret = 1;
3156 if (!cb_info->mask) {
3157 /* no mask bits set, remove callback */
3158 list_del(&cb_info->cb_list);
3159 kfree(cb_info);
3160 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003161 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003162 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003163 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003164 new_mask |= cb_info->mask;
3165 }
3166
3167 /* update interrupt notification mask */
3168 if (smsm_entry == SMSM_MODEM_STATE)
3169 new_mask |= LEGACY_MODEM_SMSM_MASK;
3170
3171 if (smsm_info.intr_mask) {
3172 unsigned long flags;
3173
3174 spin_lock_irqsave(&smem_lock, flags);
3175 new_mask = (new_mask & ~state->intr_mask_clear)
3176 | state->intr_mask_set;
3177 __raw_writel(new_mask,
3178 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3179 wmb();
3180 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003181 }
3182
Eric Holmbergc8002902011-09-16 13:55:57 -06003183 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003184 return ret;
3185}
3186EXPORT_SYMBOL(smsm_state_cb_deregister);
3187
Eric Holmberg6275b602012-11-19 13:05:04 -07003188/**
3189 * smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
3190 *
3191 * @returns: pointer to SMEM remote spinlock
3192 */
3193remote_spinlock_t *smem_get_remote_spinlock(void)
3194{
3195 return &remote_spinlock;
3196}
3197EXPORT_SYMBOL(smem_get_remote_spinlock);
3198
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003199int smd_module_init_notifier_register(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003200{
3201 int ret;
3202 if (!nb)
3203 return -EINVAL;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003204 mutex_lock(&smd_module_init_notifier_lock);
3205 ret = raw_notifier_chain_register(&smd_module_init_notifier_list, nb);
3206 if (smd_module_inited)
3207 nb->notifier_call(nb, 0, NULL);
3208 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003209 return ret;
3210}
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003211EXPORT_SYMBOL(smd_module_init_notifier_register);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003212
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003213int smd_module_init_notifier_unregister(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003214{
3215 int ret;
3216 if (!nb)
3217 return -EINVAL;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003218 mutex_lock(&smd_module_init_notifier_lock);
3219 ret = raw_notifier_chain_unregister(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003220 nb);
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003221 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003222 return ret;
3223}
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003224EXPORT_SYMBOL(smd_module_init_notifier_unregister);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003225
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003226static void smd_module_init_notify(uint32_t state, void *data)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003227{
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003228 mutex_lock(&smd_module_init_notifier_lock);
3229 smd_module_inited = 1;
3230 raw_notifier_call_chain(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003231 state, data);
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003232 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003233}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003234
3235int smd_core_init(void)
3236{
3237 int r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003238 unsigned long flags = IRQF_TRIGGER_RISING;
3239 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003240
Brian Swetland37521a32009-07-01 18:30:47 -07003241 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003242 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003243 if (r < 0)
3244 return r;
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303245 interrupt_stats[SMD_MODEM].smd_interrupt_id = INT_A9_M2A_0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003246 r = enable_irq_wake(INT_A9_M2A_0);
3247 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003248 pr_err("smd_core_init: "
3249 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003250
Eric Holmberg98c6c642012-02-24 11:29:35 -07003251 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003252 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003253 if (r < 0) {
3254 free_irq(INT_A9_M2A_0, 0);
3255 return r;
3256 }
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303257 interrupt_stats[SMD_MODEM].smsm_interrupt_id = INT_A9_M2A_5;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003258 r = enable_irq_wake(INT_A9_M2A_5);
3259 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003260 pr_err("smd_core_init: "
3261 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003262
Brian Swetland37521a32009-07-01 18:30:47 -07003263#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003264#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3265 flags |= IRQF_SHARED;
3266#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003267 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003268 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003269 if (r < 0) {
3270 free_irq(INT_A9_M2A_0, 0);
3271 free_irq(INT_A9_M2A_5, 0);
3272 return r;
3273 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003274
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303275 interrupt_stats[SMD_Q6].smd_interrupt_id = INT_ADSP_A11;
Eric Holmberg98c6c642012-02-24 11:29:35 -07003276 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3277 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003278 if (r < 0) {
3279 free_irq(INT_A9_M2A_0, 0);
3280 free_irq(INT_A9_M2A_5, 0);
3281 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3282 return r;
3283 }
3284
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303285 interrupt_stats[SMD_Q6].smsm_interrupt_id = INT_ADSP_A11_SMSM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003286 r = enable_irq_wake(INT_ADSP_A11);
3287 if (r < 0)
3288 pr_err("smd_core_init: "
3289 "enable_irq_wake failed for INT_ADSP_A11\n");
3290
3291#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3292 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3293 if (r < 0)
3294 pr_err("smd_core_init: enable_irq_wake "
3295 "failed for INT_ADSP_A11_SMSM\n");
3296#endif
3297 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003298#endif
3299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003300#if defined(CONFIG_DSPS)
3301 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3302 flags, "smd_dev", smd_dsps_irq_handler);
3303 if (r < 0) {
3304 free_irq(INT_A9_M2A_0, 0);
3305 free_irq(INT_A9_M2A_5, 0);
3306 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003307 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003308 return r;
3309 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003310
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303311 interrupt_stats[SMD_DSPS].smd_interrupt_id = INT_DSPS_A11;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003312 r = enable_irq_wake(INT_DSPS_A11);
3313 if (r < 0)
3314 pr_err("smd_core_init: "
3315 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003316#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003317
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003318#if defined(CONFIG_WCNSS)
3319 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3320 flags, "smd_dev", smd_wcnss_irq_handler);
3321 if (r < 0) {
3322 free_irq(INT_A9_M2A_0, 0);
3323 free_irq(INT_A9_M2A_5, 0);
3324 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003325 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003326 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3327 return r;
3328 }
3329
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303330 interrupt_stats[SMD_WCNSS].smd_interrupt_id = INT_WCNSS_A11;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003331 r = enable_irq_wake(INT_WCNSS_A11);
3332 if (r < 0)
3333 pr_err("smd_core_init: "
3334 "enable_irq_wake failed for INT_WCNSS_A11\n");
3335
Eric Holmberg98c6c642012-02-24 11:29:35 -07003336 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3337 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003338 if (r < 0) {
3339 free_irq(INT_A9_M2A_0, 0);
3340 free_irq(INT_A9_M2A_5, 0);
3341 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003342 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003343 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3344 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3345 return r;
3346 }
3347
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303348 interrupt_stats[SMD_WCNSS].smsm_interrupt_id = INT_WCNSS_A11_SMSM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003349 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3350 if (r < 0)
3351 pr_err("smd_core_init: "
3352 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3353#endif
3354
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003355#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003356 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3357 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003358 if (r < 0) {
3359 free_irq(INT_A9_M2A_0, 0);
3360 free_irq(INT_A9_M2A_5, 0);
3361 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003362 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003363 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3364 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003365 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003366 return r;
3367 }
3368
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303369 interrupt_stats[SMD_DSPS].smsm_interrupt_id = INT_DSPS_A11_SMSM;
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003370 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3371 if (r < 0)
3372 pr_err("smd_core_init: "
3373 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3374#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003375 SMD_INFO("smd_core_init() done\n");
3376
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003377 return 0;
3378}
3379
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303380static int intr_init(struct interrupt_config_item *private_irq,
3381 struct smd_irq_config *platform_irq,
3382 struct platform_device *pdev
3383 )
3384{
3385 int irq_id;
3386 int ret;
3387 int ret_wake;
3388
3389 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3390 private_irq->out_offset = platform_irq->out_offset;
3391 private_irq->out_base = platform_irq->out_base;
3392
3393 irq_id = platform_get_irq_byname(
3394 pdev,
3395 platform_irq->irq_name
3396 );
3397 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3398 platform_irq->irq_name, irq_id);
3399 ret = request_irq(irq_id,
3400 private_irq->irq_handler,
3401 platform_irq->flags,
3402 platform_irq->device_name,
3403 (void *)platform_irq->dev_id
3404 );
3405 if (ret < 0) {
3406 platform_irq->irq_id = ret;
Eric Holmbergdeace152012-07-25 12:17:11 -06003407 private_irq->irq_id = ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303408 } else {
3409 platform_irq->irq_id = irq_id;
Eric Holmbergdeace152012-07-25 12:17:11 -06003410 private_irq->irq_id = irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303411 ret_wake = enable_irq_wake(irq_id);
3412 if (ret_wake < 0) {
3413 pr_err("smd: enable_irq_wake failed on %s",
3414 platform_irq->irq_name);
3415 }
3416 }
3417
3418 return ret;
3419}
3420
Jeff Hugobdc734d2012-03-26 16:05:39 -06003421int sort_cmp_func(const void *a, const void *b)
3422{
3423 struct smem_area *left = (struct smem_area *)(a);
3424 struct smem_area *right = (struct smem_area *)(b);
3425
3426 return left->phys_addr - right->phys_addr;
3427}
3428
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303429int smd_core_platform_init(struct platform_device *pdev)
3430{
3431 int i;
3432 int ret;
3433 uint32_t num_ss;
3434 struct smd_platform *smd_platform_data;
3435 struct smd_subsystem_config *smd_ss_config_list;
3436 struct smd_subsystem_config *cfg;
3437 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003438 struct smd_smem_regions *smd_smem_areas;
3439 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303440
3441 smd_platform_data = pdev->dev.platform_data;
3442 num_ss = smd_platform_data->num_ss_configs;
3443 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3444
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003445 if (smd_platform_data->smd_ssr_config)
3446 disable_smsm_reset_handshake = smd_platform_data->
3447 smd_ssr_config->disable_smsm_reset_handshake;
3448
Jeff Hugobdc734d2012-03-26 16:05:39 -06003449 smd_smem_areas = smd_platform_data->smd_smem_areas;
3450 if (smd_smem_areas) {
3451 num_smem_areas = smd_platform_data->num_smem_areas;
3452 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3453 GFP_KERNEL);
3454 if (!smem_areas) {
3455 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3456 err_ret = -ENOMEM;
3457 goto smem_areas_alloc_fail;
3458 }
3459
3460 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3461 smem_areas[smem_idx].phys_addr =
3462 smd_smem_areas[smem_idx].phys_addr;
3463 smem_areas[smem_idx].size =
3464 smd_smem_areas[smem_idx].size;
3465 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3466 (unsigned long)(smem_areas[smem_idx].phys_addr),
3467 smem_areas[smem_idx].size);
3468 if (!smem_areas[smem_idx].virt_addr) {
3469 pr_err("%s: ioremap_nocache() of addr:%p"
3470 " size: %x\n", __func__,
3471 smem_areas[smem_idx].phys_addr,
3472 smem_areas[smem_idx].size);
3473 err_ret = -ENOMEM;
3474 ++smem_idx;
3475 goto smem_failed;
3476 }
3477 }
3478 sort(smem_areas, num_smem_areas,
3479 sizeof(struct smem_area),
3480 sort_cmp_func, NULL);
3481 }
3482
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303483 for (i = 0; i < num_ss; i++) {
3484 cfg = &smd_ss_config_list[i];
3485
3486 ret = intr_init(
3487 &private_intr_config[cfg->irq_config_id].smd,
3488 &cfg->smd_int,
3489 pdev
3490 );
3491
3492 if (ret < 0) {
3493 err_ret = ret;
3494 pr_err("smd: register irq failed on %s\n",
3495 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003496 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303497 }
3498
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303499 interrupt_stats[cfg->irq_config_id].smd_interrupt_id
3500 = cfg->smd_int.irq_id;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003501 /* only init smsm structs if this edge supports smsm */
3502 if (cfg->smsm_int.irq_id)
3503 ret = intr_init(
3504 &private_intr_config[cfg->irq_config_id].smsm,
3505 &cfg->smsm_int,
3506 pdev
3507 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303508
3509 if (ret < 0) {
3510 err_ret = ret;
3511 pr_err("smd: register irq failed on %s\n",
3512 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003513 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303514 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003515
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303516 if (cfg->smsm_int.irq_id)
3517 interrupt_stats[cfg->irq_config_id].smsm_interrupt_id
3518 = cfg->smsm_int.irq_id;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003519 if (cfg->subsys_name)
3520 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003521 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303522 }
3523
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303524
3525 SMD_INFO("smd_core_platform_init() done\n");
3526 return 0;
3527
Jeff Hugobdc734d2012-03-26 16:05:39 -06003528intr_failed:
3529 pr_err("smd: deregistering IRQs\n");
3530 for (i = 0; i < num_ss; ++i) {
3531 cfg = &smd_ss_config_list[i];
3532
3533 if (cfg->smd_int.irq_id >= 0)
3534 free_irq(cfg->smd_int.irq_id,
3535 (void *)cfg->smd_int.dev_id
3536 );
3537 if (cfg->smsm_int.irq_id >= 0)
3538 free_irq(cfg->smsm_int.irq_id,
3539 (void *)cfg->smsm_int.dev_id
3540 );
3541 }
3542smem_failed:
3543 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3544 iounmap(smem_areas[smem_idx].virt_addr);
3545 kfree(smem_areas);
3546smem_areas_alloc_fail:
3547 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303548}
3549
Jeff Hugo412356e2012-09-27 17:14:23 -06003550static int __devinit parse_smd_devicetree(struct device_node *node,
3551 void *irq_out_base)
3552{
3553 uint32_t edge;
3554 char *key;
3555 int ret;
3556 uint32_t irq_offset;
3557 uint32_t irq_bitmask;
3558 uint32_t irq_line;
3559 unsigned long irq_flags = IRQF_TRIGGER_RISING;
3560 const char *pilstr;
3561 struct interrupt_config_item *private_irq;
3562
3563 key = "qcom,smd-edge";
3564 ret = of_property_read_u32(node, key, &edge);
3565 if (ret)
3566 goto missing_key;
3567 SMD_DBG("%s: %s = %d", __func__, key, edge);
3568
3569 key = "qcom,smd-irq-offset";
3570 ret = of_property_read_u32(node, key, &irq_offset);
3571 if (ret)
3572 goto missing_key;
3573 SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
3574
3575 key = "qcom,smd-irq-bitmask";
3576 ret = of_property_read_u32(node, key, &irq_bitmask);
3577 if (ret)
3578 goto missing_key;
3579 SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
3580
3581 key = "interrupts";
3582 irq_line = irq_of_parse_and_map(node, 0);
3583 if (!irq_line)
3584 goto missing_key;
3585 SMD_DBG("%s: %s = %d", __func__, key, irq_line);
3586
3587 key = "qcom,pil-string";
3588 pilstr = of_get_property(node, key, NULL);
3589 if (pilstr)
3590 SMD_DBG("%s: %s = %s", __func__, key, pilstr);
3591
3592 key = "qcom,irq-no-suspend";
3593 ret = of_property_read_bool(node, key);
3594 if (ret)
3595 irq_flags |= IRQF_NO_SUSPEND;
3596
3597 private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smd;
3598 private_irq->out_bit_pos = irq_bitmask;
3599 private_irq->out_offset = irq_offset;
3600 private_irq->out_base = irq_out_base;
3601 private_irq->irq_id = irq_line;
3602
3603 ret = request_irq(irq_line,
3604 private_irq->irq_handler,
3605 irq_flags,
3606 "smd_dev",
3607 NULL);
3608 if (ret < 0) {
3609 pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
3610 return ret;
3611 } else {
3612 ret = enable_irq_wake(irq_line);
3613 if (ret < 0)
3614 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
3615 irq_line);
3616 }
3617
3618 if (pilstr)
3619 strlcpy(edge_to_pids[edge].subsys_name, pilstr,
3620 SMD_MAX_CH_NAME_LEN);
3621
3622 return 0;
3623
3624missing_key:
3625 pr_err("%s: missing key: %s", __func__, key);
3626 return -ENODEV;
3627}
3628
3629static int __devinit parse_smsm_devicetree(struct device_node *node,
3630 void *irq_out_base)
3631{
3632 uint32_t edge;
3633 char *key;
3634 int ret;
3635 uint32_t irq_offset;
3636 uint32_t irq_bitmask;
3637 uint32_t irq_line;
3638 struct interrupt_config_item *private_irq;
3639
3640 key = "qcom,smsm-edge";
3641 ret = of_property_read_u32(node, key, &edge);
3642 if (ret)
3643 goto missing_key;
3644 SMD_DBG("%s: %s = %d", __func__, key, edge);
3645
3646 key = "qcom,smsm-irq-offset";
3647 ret = of_property_read_u32(node, key, &irq_offset);
3648 if (ret)
3649 goto missing_key;
3650 SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
3651
3652 key = "qcom,smsm-irq-bitmask";
3653 ret = of_property_read_u32(node, key, &irq_bitmask);
3654 if (ret)
3655 goto missing_key;
3656 SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
3657
3658 key = "interrupts";
3659 irq_line = irq_of_parse_and_map(node, 0);
3660 if (!irq_line)
3661 goto missing_key;
3662 SMD_DBG("%s: %s = %d", __func__, key, irq_line);
3663
3664 private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smsm;
3665 private_irq->out_bit_pos = irq_bitmask;
3666 private_irq->out_offset = irq_offset;
3667 private_irq->out_base = irq_out_base;
3668 private_irq->irq_id = irq_line;
3669
3670 ret = request_irq(irq_line,
3671 private_irq->irq_handler,
3672 IRQF_TRIGGER_RISING,
3673 "smsm_dev",
3674 NULL);
3675 if (ret < 0) {
3676 pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
3677 return ret;
3678 } else {
3679 ret = enable_irq_wake(irq_line);
3680 if (ret < 0)
3681 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
3682 irq_line);
3683 }
3684
3685 return 0;
3686
3687missing_key:
3688 pr_err("%s: missing key: %s", __func__, key);
3689 return -ENODEV;
3690}
3691
3692static void __devinit unparse_smd_devicetree(struct device_node *node)
3693{
3694 uint32_t irq_line;
3695
3696 irq_line = irq_of_parse_and_map(node, 0);
3697
3698 free_irq(irq_line, NULL);
3699}
3700
3701static void __devinit unparse_smsm_devicetree(struct device_node *node)
3702{
3703 uint32_t irq_line;
3704
3705 irq_line = irq_of_parse_and_map(node, 0);
3706
3707 free_irq(irq_line, NULL);
3708}
3709
3710static int __devinit smd_core_devicetree_init(struct platform_device *pdev)
3711{
3712 char *key;
3713 struct resource *r;
3714 void *irq_out_base;
3715 void *aux_mem_base;
3716 uint32_t aux_mem_size;
3717 int temp_string_size = 11; /* max 3 digit count */
3718 char temp_string[temp_string_size];
3719 int count;
3720 struct device_node *node;
3721 int ret;
3722 const char *compatible;
3723 int subnode_num = 0;
3724
3725 disable_smsm_reset_handshake = 1;
3726
3727 key = "irq-reg-base";
3728 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
3729 if (!r) {
3730 pr_err("%s: missing '%s'\n", __func__, key);
3731 return -ENODEV;
3732 }
3733 irq_out_base = (void *)(r->start);
3734 SMD_DBG("%s: %s = %p", __func__, key, irq_out_base);
3735
3736 count = 1;
3737 while (1) {
3738 scnprintf(temp_string, temp_string_size, "aux-mem%d", count);
3739 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3740 temp_string);
3741 if (!r)
3742 break;
3743
3744 ++num_smem_areas;
3745 ++count;
3746 if (count > 999) {
3747 pr_err("%s: max num aux mem regions reached\n",
3748 __func__);
3749 break;
3750 }
3751 }
3752
3753 if (num_smem_areas) {
3754 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3755 GFP_KERNEL);
3756 if (!smem_areas) {
3757 pr_err("%s: smem areas kmalloc failed\n", __func__);
3758 num_smem_areas = 0;
3759 return -ENOMEM;
3760 }
3761 count = 1;
3762 while (1) {
3763 scnprintf(temp_string, temp_string_size, "aux-mem%d",
3764 count);
3765 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3766 temp_string);
3767 if (!r)
3768 break;
3769 aux_mem_base = (void *)(r->start);
3770 aux_mem_size = (uint32_t)(resource_size(r));
3771 SMD_DBG("%s: %s = %p %x", __func__, temp_string,
3772 aux_mem_base, aux_mem_size);
3773 smem_areas[count - 1].phys_addr = aux_mem_base;
3774 smem_areas[count - 1].size = aux_mem_size;
3775 smem_areas[count - 1].virt_addr = ioremap_nocache(
3776 (unsigned long)(smem_areas[count-1].phys_addr),
3777 smem_areas[count - 1].size);
3778 if (!smem_areas[count - 1].virt_addr) {
3779 pr_err("%s: ioremap_nocache() of addr:%p size: %x\n",
3780 __func__,
3781 smem_areas[count - 1].phys_addr,
3782 smem_areas[count - 1].size);
3783 ret = -ENOMEM;
3784 goto free_smem_areas;
3785 }
3786
3787 ++count;
3788 if (count > 999) {
3789 pr_err("%s: max num aux mem regions reached\n",
3790 __func__);
3791 break;
3792 }
3793 }
3794 sort(smem_areas, num_smem_areas,
3795 sizeof(struct smem_area),
3796 sort_cmp_func, NULL);
3797 }
3798
3799 for_each_child_of_node(pdev->dev.of_node, node) {
3800 compatible = of_get_property(node, "compatible", NULL);
3801 if (!strcmp(compatible, "qcom,smd")) {
3802 ret = parse_smd_devicetree(node, irq_out_base);
3803 if (ret)
3804 goto rollback_subnodes;
3805 } else if (!strcmp(compatible, "qcom,smsm")) {
3806 ret = parse_smsm_devicetree(node, irq_out_base);
3807 if (ret)
3808 goto rollback_subnodes;
3809 } else {
3810 pr_err("%s: invalid child node named: %s\n", __func__,
3811 compatible);
3812 ret = -ENODEV;
3813 goto rollback_subnodes;
3814 }
3815 ++subnode_num;
3816 }
3817
3818 return 0;
3819
3820rollback_subnodes:
3821 count = 0;
3822 for_each_child_of_node(pdev->dev.of_node, node) {
3823 if (count >= subnode_num)
3824 break;
3825 ++count;
3826 compatible = of_get_property(node, "compatible", NULL);
3827 if (!strcmp(compatible, "qcom,smd"))
3828 unparse_smd_devicetree(node);
3829 else
3830 unparse_smsm_devicetree(node);
3831 }
3832free_smem_areas:
3833 num_smem_areas = 0;
3834 kfree(smem_areas);
3835 smem_areas = NULL;
3836 return ret;
3837}
3838
Gregory Bean4416e9e2010-07-28 10:22:12 -07003839static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003840{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303841 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003842
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303843 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003844 INIT_WORK(&probe_work, smd_channel_probe_worker);
3845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003846 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3847 if (IS_ERR(channel_close_wq)) {
3848 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3849 return -ENOMEM;
3850 }
3851
3852 if (smsm_init()) {
3853 pr_err("smsm_init() failed\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003854 return -1;
3855 }
3856
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303857 if (pdev) {
3858 if (pdev->dev.of_node) {
Jeff Hugo412356e2012-09-27 17:14:23 -06003859 ret = smd_core_devicetree_init(pdev);
3860 if (ret) {
3861 pr_err("%s: device tree init failed\n",
3862 __func__);
3863 return ret;
3864 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303865 } else if (pdev->dev.platform_data) {
3866 ret = smd_core_platform_init(pdev);
3867 if (ret) {
3868 pr_err(
3869 "SMD: smd_core_platform_init() failed\n");
3870 return -ENODEV;
3871 }
3872 } else {
3873 ret = smd_core_init();
3874 if (ret) {
3875 pr_err("smd_core_init() failed\n");
3876 return -ENODEV;
3877 }
3878 }
3879 } else {
3880 pr_err("SMD: PDEV not found\n");
3881 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003882 }
Iliyan Malchev1207bab2009-11-15 18:16:43 -08003883
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003884 smd_initialized = 1;
3885
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003886 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003887 smsm_irq_handler(0, 0);
3888 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003889
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003890 return 0;
3891}
3892
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003893static int restart_notifier_cb(struct notifier_block *this,
3894 unsigned long code,
3895 void *data);
3896
3897static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003898 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3899 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
Sameer Thalappil52381142012-10-04 17:22:24 -07003900 {SMD_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
Eric Holmbergca7ead22011-12-01 17:21:15 -07003901 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003902 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Jeff Hugod3cf6ec2012-09-26 15:30:10 -06003903 {SMD_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003904};
3905
3906static int restart_notifier_cb(struct notifier_block *this,
3907 unsigned long code,
3908 void *data)
3909{
Jeff Hugo73f356f2012-12-14 17:56:19 -07003910 /*
3911 * Some SMD or SMSM clients assume SMD/SMSM SSR handling will be
3912 * done in the AFTER_SHUTDOWN level. If this ever changes, extra
3913 * care should be taken to verify no clients are broken.
3914 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003915 if (code == SUBSYS_AFTER_SHUTDOWN) {
3916 struct restart_notifier_block *notifier;
3917
3918 notifier = container_of(this,
3919 struct restart_notifier_block, nb);
3920 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3921 __func__, notifier->processor,
3922 notifier->name);
3923
3924 smd_channel_reset(notifier->processor);
3925 }
3926
3927 return NOTIFY_DONE;
3928}
3929
3930static __init int modem_restart_late_init(void)
3931{
3932 int i;
3933 void *handle;
3934 struct restart_notifier_block *nb;
3935
3936 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3937 nb = &restart_notifiers[i];
3938 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3939 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3940 __func__, nb->name, handle);
3941 }
3942 return 0;
3943}
3944late_initcall(modem_restart_late_init);
3945
Jeff Hugo412356e2012-09-27 17:14:23 -06003946static struct of_device_id msm_smem_match_table[] = {
3947 { .compatible = "qcom,smem" },
3948 {},
3949};
3950
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003951static struct platform_driver msm_smd_driver = {
3952 .probe = msm_smd_probe,
3953 .driver = {
3954 .name = MODULE_NAME,
3955 .owner = THIS_MODULE,
Jeff Hugo412356e2012-09-27 17:14:23 -06003956 .of_match_table = msm_smem_match_table,
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003957 },
3958};
3959
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003960int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003961{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003962 static bool registered;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003963 int rc;
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003964
3965 if (registered)
3966 return 0;
3967
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05303968 smd_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smd");
3969 if (!smd_log_ctx) {
3970 pr_err("%s: unable to create logging context\n", __func__);
3971 msm_smd_debug_mask = 0;
3972 }
3973
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003974 registered = true;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003975 rc = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
3976 if (rc) {
3977 pr_err("%s: remote spinlock init failed %d\n", __func__, rc);
3978 return rc;
3979 }
3980 spinlocks_initialized = 1;
3981
3982 rc = platform_driver_register(&msm_smd_driver);
3983 if (rc) {
3984 pr_err("%s: msm_smd_driver register failed %d\n",
3985 __func__, rc);
3986 return rc;
3987 }
3988
3989 smd_module_init_notify(0, NULL);
3990
3991 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003992}
3993
3994module_init(msm_smd_init);
3995
3996MODULE_DESCRIPTION("MSM Shared Memory Core");
3997MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3998MODULE_LICENSE("GPL");