blob: 332d9f3237818ec65001d71db614a812f2afaf09 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07004 * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f9412012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070038#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070040#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053042#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070043#include <mach/proc_comm.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053044#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070045
46#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060050 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060051 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070052#define CONFIG_QDSP6 1
53#endif
54
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060055#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
56 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057#define CONFIG_DSPS 1
58#endif
59
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060060#if defined(CONFIG_ARCH_MSM8960) \
61 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060063#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070065
66#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067#define SMEM_VERSION 0x000B
68#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070069#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060070#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72uint32_t SMSM_NUM_ENTRIES = 8;
73uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070074
Eric Holmberge8a39322012-04-03 15:14:02 -060075/* Legacy SMSM interrupt notifications */
76#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
77 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070078
79enum {
80 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 MSM_SMSM_DEBUG = 1U << 1,
82 MSM_SMD_INFO = 1U << 2,
83 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070084 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085};
86
87struct smsm_shared_info {
88 uint32_t *state;
89 uint32_t *intr_mask;
90 uint32_t *intr_mux;
91};
92
93static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f9412012-03-19 10:04:22 -060094static struct kfifo smsm_snapshot_fifo;
95static struct wake_lock smsm_snapshot_wakelock;
96static int smsm_snapshot_count;
97static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098
99struct smsm_size_info_type {
100 uint32_t num_hosts;
101 uint32_t num_entries;
102 uint32_t reserved0;
103 uint32_t reserved1;
104};
105
106struct smsm_state_cb_info {
107 struct list_head cb_list;
108 uint32_t mask;
109 void *data;
110 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
111};
112
113struct smsm_state_info {
114 struct list_head callbacks;
115 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600116 uint32_t intr_mask_set;
117 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118};
119
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530120struct interrupt_config_item {
121 /* must be initialized */
122 irqreturn_t (*irq_handler)(int req, void *data);
123 /* outgoing interrupt config (set from platform data) */
124 uint32_t out_bit_pos;
125 void __iomem *out_base;
126 uint32_t out_offset;
Eric Holmbergdeace152012-07-25 12:17:11 -0600127 int irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530128};
129
130struct interrupt_config {
131 struct interrupt_config_item smd;
132 struct interrupt_config_item smsm;
133};
134
135static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700136static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530137static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700138static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530139static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700140static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530141static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700142static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600143static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530144static irqreturn_t smsm_irq_handler(int irq, void *data);
145
146static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
147 [SMD_MODEM] = {
148 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700149 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530150 },
151 [SMD_Q6] = {
152 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700153 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530154 },
155 [SMD_DSPS] = {
156 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700157 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530158 },
159 [SMD_WCNSS] = {
160 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700161 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530162 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600163 [SMD_RPM] = {
164 .smd.irq_handler = smd_rpm_irq_handler,
165 .smsm.irq_handler = NULL, /* does not support smsm */
166 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530167};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600168
169struct smem_area {
170 void *phys_addr;
171 unsigned size;
172 void __iomem *virt_addr;
173};
174static uint32_t num_smem_areas;
175static struct smem_area *smem_areas;
176static void *smem_range_check(void *base, unsigned offset);
177
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700178struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
181#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
182 entry * SMSM_NUM_HOSTS + host)
183#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
184
185/* Internal definitions which are not exported in some targets */
186enum {
187 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700188};
189
190static int msm_smd_debug_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700191module_param_named(debug_mask, msm_smd_debug_mask,
192 int, S_IRUGO | S_IWUSR | S_IWGRP);
193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194#if defined(CONFIG_MSM_SMD_DEBUG)
195#define SMD_DBG(x...) do { \
196 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
197 printk(KERN_DEBUG x); \
198 } while (0)
199
200#define SMSM_DBG(x...) do { \
201 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
202 printk(KERN_DEBUG x); \
203 } while (0)
204
205#define SMD_INFO(x...) do { \
206 if (msm_smd_debug_mask & MSM_SMD_INFO) \
207 printk(KERN_INFO x); \
208 } while (0)
209
210#define SMSM_INFO(x...) do { \
211 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
212 printk(KERN_INFO x); \
213 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700214#define SMx_POWER_INFO(x...) do { \
215 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
216 printk(KERN_INFO x); \
217 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218#else
219#define SMD_DBG(x...) do { } while (0)
220#define SMSM_DBG(x...) do { } while (0)
221#define SMD_INFO(x...) do { } while (0)
222#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700223#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224#endif
225
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700226static unsigned last_heap_free = 0xffffffff;
227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228static inline void smd_write_intr(unsigned int val,
229 const void __iomem *addr);
230
231#if defined(CONFIG_ARCH_MSM7X30)
232#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530233 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530235 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530237 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530239 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600241#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242#define MSM_TRIG_A2WCNSS_SMD_INT
243#define MSM_TRIG_A2WCNSS_SMSM_INT
244#elif defined(CONFIG_ARCH_MSM8X60)
245#define MSM_TRIG_A2M_SMD_INT \
246 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
247#define MSM_TRIG_A2Q6_SMD_INT \
248 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
249#define MSM_TRIG_A2M_SMSM_INT \
250 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
251#define MSM_TRIG_A2Q6_SMSM_INT \
252 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
253#define MSM_TRIG_A2DSPS_SMD_INT \
254 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600255#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256#define MSM_TRIG_A2WCNSS_SMD_INT
257#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600258#elif defined(CONFIG_ARCH_MSM9615)
259#define MSM_TRIG_A2M_SMD_INT \
260 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
261#define MSM_TRIG_A2Q6_SMD_INT \
262 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
263#define MSM_TRIG_A2M_SMSM_INT \
264 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
265#define MSM_TRIG_A2Q6_SMSM_INT \
266 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
267#define MSM_TRIG_A2DSPS_SMD_INT
268#define MSM_TRIG_A2DSPS_SMSM_INT
269#define MSM_TRIG_A2WCNSS_SMD_INT
270#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271#elif defined(CONFIG_ARCH_FSM9XXX)
272#define MSM_TRIG_A2Q6_SMD_INT \
273 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
274#define MSM_TRIG_A2Q6_SMSM_INT \
275 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
276#define MSM_TRIG_A2M_SMD_INT \
277 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
278#define MSM_TRIG_A2M_SMSM_INT \
279 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
280#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600281#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282#define MSM_TRIG_A2WCNSS_SMD_INT
283#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700284#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285#define MSM_TRIG_A2M_SMD_INT \
286 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700287#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288#define MSM_TRIG_A2M_SMSM_INT \
289 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700290#define MSM_TRIG_A2Q6_SMSM_INT
291#define MSM_TRIG_A2DSPS_SMD_INT
292#define MSM_TRIG_A2DSPS_SMSM_INT
293#define MSM_TRIG_A2WCNSS_SMD_INT
294#define MSM_TRIG_A2WCNSS_SMSM_INT
295#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
296#define MSM_TRIG_A2M_SMD_INT \
297 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
298#define MSM_TRIG_A2Q6_SMD_INT
299#define MSM_TRIG_A2M_SMSM_INT \
300 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
301#define MSM_TRIG_A2Q6_SMSM_INT
302#define MSM_TRIG_A2DSPS_SMD_INT
303#define MSM_TRIG_A2DSPS_SMSM_INT
304#define MSM_TRIG_A2WCNSS_SMD_INT
305#define MSM_TRIG_A2WCNSS_SMSM_INT
306#else /* use platform device / device tree configuration */
307#define MSM_TRIG_A2M_SMD_INT
308#define MSM_TRIG_A2Q6_SMD_INT
309#define MSM_TRIG_A2M_SMSM_INT
310#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600312#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313#define MSM_TRIG_A2WCNSS_SMD_INT
314#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700315#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316
Jeff Hugoee40b152012-02-09 17:39:47 -0700317/*
318 * stub out legacy macros if they are not being used so that the legacy
319 * code compiles even though it is not used
320 *
321 * these definitions should not be used in active code and will cause
322 * an early failure
323 */
324#ifndef INT_A9_M2A_0
325#define INT_A9_M2A_0 -1
326#endif
327#ifndef INT_A9_M2A_5
328#define INT_A9_M2A_5 -1
329#endif
330#ifndef INT_ADSP_A11
331#define INT_ADSP_A11 -1
332#endif
333#ifndef INT_ADSP_A11_SMSM
334#define INT_ADSP_A11_SMSM -1
335#endif
336#ifndef INT_DSPS_A11
337#define INT_DSPS_A11 -1
338#endif
339#ifndef INT_DSPS_A11_SMSM
340#define INT_DSPS_A11_SMSM -1
341#endif
342#ifndef INT_WCNSS_A11
343#define INT_WCNSS_A11 -1
344#endif
345#ifndef INT_WCNSS_A11_SMSM
346#define INT_WCNSS_A11_SMSM -1
347#endif
348
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349#define SMD_LOOPBACK_CID 100
350
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600351#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
352static remote_spinlock_t remote_spinlock;
353
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600356static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600358static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359static void notify_smsm_cb_clients_worker(struct work_struct *work);
360static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600361static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530363static int spinlocks_initialized;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -0600364
365/**
366 * Variables to indicate smd module initialization.
367 * Dependents to register for smd module init notifier.
368 */
369static int smd_module_inited;
370static RAW_NOTIFIER_HEAD(smd_module_init_notifier_list);
371static DEFINE_MUTEX(smd_module_init_notifier_lock);
372static void smd_module_init_notify(uint32_t state, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373
374static inline void smd_write_intr(unsigned int val,
375 const void __iomem *addr)
376{
377 wmb();
378 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700379}
380
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700381static inline void notify_modem_smd(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700382{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530383 static const struct interrupt_config_item *intr
384 = &private_intr_config[SMD_MODEM].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700385 if (intr->out_base) {
386 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530387 smd_write_intr(intr->out_bit_pos,
388 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700389 } else {
390 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530391 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700392 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700393}
394
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700395static inline void notify_dsp_smd(void)
396{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530397 static const struct interrupt_config_item *intr
398 = &private_intr_config[SMD_Q6].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700399 if (intr->out_base) {
400 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530401 smd_write_intr(intr->out_bit_pos,
402 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700403 } else {
404 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530405 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700406 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700407}
408
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530409static inline void notify_dsps_smd(void)
410{
411 static const struct interrupt_config_item *intr
412 = &private_intr_config[SMD_DSPS].smd;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700413 if (intr->out_base) {
414 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530415 smd_write_intr(intr->out_bit_pos,
416 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700417 } else {
418 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530419 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700420 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530421}
422
423static inline void notify_wcnss_smd(void)
424{
425 static const struct interrupt_config_item *intr
426 = &private_intr_config[SMD_WCNSS].smd;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530427
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700428 if (intr->out_base) {
429 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530430 smd_write_intr(intr->out_bit_pos,
431 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700432 } else {
433 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530434 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700435 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530436}
437
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600438static inline void notify_rpm_smd(void)
439{
440 static const struct interrupt_config_item *intr
441 = &private_intr_config[SMD_RPM].smd;
442
443 if (intr->out_base) {
444 ++interrupt_stats[SMD_RPM].smd_out_config_count;
445 smd_write_intr(intr->out_bit_pos,
446 intr->out_base + intr->out_offset);
447 }
448}
449
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530450static inline void notify_modem_smsm(void)
451{
452 static const struct interrupt_config_item *intr
453 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700454 if (intr->out_base) {
455 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530456 smd_write_intr(intr->out_bit_pos,
457 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700458 } else {
459 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530460 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700461 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530462}
463
464static inline void notify_dsp_smsm(void)
465{
466 static const struct interrupt_config_item *intr
467 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700468 if (intr->out_base) {
469 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530470 smd_write_intr(intr->out_bit_pos,
471 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700472 } else {
473 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530474 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700475 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530476}
477
478static inline void notify_dsps_smsm(void)
479{
480 static const struct interrupt_config_item *intr
481 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700482 if (intr->out_base) {
483 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530484 smd_write_intr(intr->out_bit_pos,
485 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700486 } else {
487 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530488 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700489 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530490}
491
492static inline void notify_wcnss_smsm(void)
493{
494 static const struct interrupt_config_item *intr
495 = &private_intr_config[SMD_WCNSS].smsm;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530496
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700497 if (intr->out_base) {
498 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530499 smd_write_intr(intr->out_bit_pos,
500 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700501 } else {
502 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530503 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700504 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530505}
506
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
508{
509 /* older protocol don't use smsm_intr_mask,
510 but still communicates with modem */
511 if (!smsm_info.intr_mask ||
512 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
513 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530514 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515
516 if (smsm_info.intr_mask &&
517 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
518 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519 uint32_t mux_val;
520
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600521 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 mux_val = __raw_readl(
523 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
524 mux_val++;
525 __raw_writel(mux_val,
526 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
527 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530528 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 }
530
531 if (smsm_info.intr_mask &&
532 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
533 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530534 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 }
536
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600537 if (smsm_info.intr_mask &&
538 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
539 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530540 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600541 }
542
Eric Holmbergda31d042012-03-28 14:01:02 -0600543 /*
544 * Notify local SMSM callback clients without wakelock since this
545 * code is used by power management during power-down/-up sequencing
546 * on DEM-based targets. Grabbing a wakelock in this case will
547 * abort the power-down sequencing.
548 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600549 if (smsm_info.intr_mask &&
550 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
551 & notify_mask)) {
552 smsm_cb_snapshot(0);
553 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700554}
555
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700556void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700557{
558 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700560
561 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
562 if (x != 0) {
563 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700564 SMD_INFO("smem: DIAG '%s'\n", x);
565 }
566
567 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
568 if (x != 0) {
569 x[size - 1] = 0;
570 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700571 }
572}
573
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700575static void handle_modem_crash(void)
576{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700578 smd_diag();
579
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580 /* hard reboot if possible FIXME
581 if (msm_reset_hook)
582 msm_reset_hook();
583 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700584
585 /* in this case the modem or watchdog should reboot us */
586 for (;;)
587 ;
588}
589
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700591{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592 /* if the modem's not ready yet, we have to hope for the best */
593 if (!smsm_info.state)
594 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700595
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700596 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700597 handle_modem_crash();
598 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700599 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700600 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700601}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700603
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700604/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700605 * irq handler and code that mutates the channel
606 * list or fiddles with channel state
607 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700608static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700609DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700610
611/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700612 * operations to avoid races while creating or
613 * destroying smd_channel structures
614 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700615static DEFINE_MUTEX(smd_creation_mutex);
616
617static int smd_initialized;
618
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619struct smd_shared_v1 {
620 struct smd_half_channel ch0;
621 unsigned char data0[SMD_BUF_SIZE];
622 struct smd_half_channel ch1;
623 unsigned char data1[SMD_BUF_SIZE];
624};
625
626struct smd_shared_v2 {
627 struct smd_half_channel ch0;
628 struct smd_half_channel ch1;
629};
630
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600631struct smd_shared_v2_word_access {
632 struct smd_half_channel_word_access ch0;
633 struct smd_half_channel_word_access ch1;
634};
635
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636struct smd_channel {
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600637 volatile void *send; /* some variant of smd_half_channel */
638 volatile void *recv; /* some variant of smd_half_channel */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 unsigned char *send_data;
640 unsigned char *recv_data;
641 unsigned fifo_size;
642 unsigned fifo_mask;
643 struct list_head ch_list;
644
645 unsigned current_packet;
646 unsigned n;
647 void *priv;
648 void (*notify)(void *priv, unsigned flags);
649
650 int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
651 int (*write)(smd_channel_t *ch, const void *data, int len,
652 int user_buf);
653 int (*read_avail)(smd_channel_t *ch);
654 int (*write_avail)(smd_channel_t *ch);
655 int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
656 int user_buf);
657
658 void (*update_state)(smd_channel_t *ch);
659 unsigned last_state;
660 void (*notify_other_cpu)(void);
661
662 char name[20];
663 struct platform_device pdev;
664 unsigned type;
665
666 int pending_pkt_sz;
667
668 char is_pkt_ch;
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600669
670 /*
671 * private internal functions to access *send and *recv.
672 * never to be exported outside of smd
673 */
674 struct smd_half_channel_access *half_ch;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675};
676
677struct edge_to_pid {
678 uint32_t local_pid;
679 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700680 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681};
682
683/**
684 * Maps edge type to local and remote processor ID's.
685 */
686static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700687 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
688 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "q6"},
689 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
690 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
691 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
692 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
693 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
694 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
695 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
696 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
697 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
698 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
699 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
700 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
701 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600702 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
703 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
704 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
705 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706};
707
708struct restart_notifier_block {
709 unsigned processor;
710 char *name;
711 struct notifier_block nb;
712};
713
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600714static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
716
717static LIST_HEAD(smd_ch_closed_list);
718static LIST_HEAD(smd_ch_closing_list);
719static LIST_HEAD(smd_ch_to_close_list);
720static LIST_HEAD(smd_ch_list_modem);
721static LIST_HEAD(smd_ch_list_dsp);
722static LIST_HEAD(smd_ch_list_dsps);
723static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600724static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700725
726static unsigned char smd_ch_allocated[64];
727static struct work_struct probe_work;
728
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729static void finalize_channel_close_fn(struct work_struct *work);
730static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
731static struct workqueue_struct *channel_close_wq;
732
733static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
734
735/* on smp systems, the probe might get called from multiple cores,
736 hence use a lock */
737static DEFINE_MUTEX(smd_probe_lock);
738
739static void smd_channel_probe_worker(struct work_struct *work)
740{
741 struct smd_alloc_elm *shared;
742 unsigned n;
743 uint32_t type;
744
745 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
746
747 if (!shared) {
748 pr_err("%s: allocation table not initialized\n", __func__);
749 return;
750 }
751
752 mutex_lock(&smd_probe_lock);
753 for (n = 0; n < 64; n++) {
754 if (smd_ch_allocated[n])
755 continue;
756
757 /* channel should be allocated only if APPS
758 processor is involved */
759 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600760 if (type >= ARRAY_SIZE(edge_to_pids) ||
761 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762 continue;
763 if (!shared[n].ref_count)
764 continue;
765 if (!shared[n].name[0])
766 continue;
767
768 if (!smd_alloc_channel(&shared[n]))
769 smd_ch_allocated[n] = 1;
770 else
771 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
772 }
773 mutex_unlock(&smd_probe_lock);
774}
775
776/**
777 * Lookup processor ID and determine if it belongs to the proved edge
778 * type.
779 *
780 * @shared2: Pointer to v2 shared channel structure
781 * @type: Edge type
782 * @pid: Processor ID of processor on edge
783 * @local_ch: Channel that belongs to processor @pid
784 * @remote_ch: Other side of edge contained @pid
785 *
786 * Returns 0 for not on edge, 1 for found on edge
787 */
788static int pid_is_on_edge(struct smd_shared_v2 *shared2,
789 uint32_t type, uint32_t pid,
790 struct smd_half_channel **local_ch,
791 struct smd_half_channel **remote_ch
792 )
793{
794 int ret = 0;
795 struct edge_to_pid *edge;
796
797 *local_ch = 0;
798 *remote_ch = 0;
799
800 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
801 return 0;
802
803 edge = &edge_to_pids[type];
804 if (edge->local_pid != edge->remote_pid) {
805 if (pid == edge->local_pid) {
806 *local_ch = &shared2->ch0;
807 *remote_ch = &shared2->ch1;
808 ret = 1;
809 } else if (pid == edge->remote_pid) {
810 *local_ch = &shared2->ch1;
811 *remote_ch = &shared2->ch0;
812 ret = 1;
813 }
814 }
815
816 return ret;
817}
818
Eric Holmberg17992c12012-02-29 12:54:44 -0700819/*
820 * Returns a pointer to the subsystem name or NULL if no
821 * subsystem name is available.
822 *
823 * @type - Edge definition
824 */
825const char *smd_edge_to_subsystem(uint32_t type)
826{
827 const char *subsys = NULL;
828
829 if (type < ARRAY_SIZE(edge_to_pids)) {
830 subsys = edge_to_pids[type].subsys_name;
831 if (subsys[0] == 0x0)
832 subsys = NULL;
833 }
834 return subsys;
835}
836EXPORT_SYMBOL(smd_edge_to_subsystem);
837
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700838/*
839 * Returns a pointer to the subsystem name given the
840 * remote processor ID.
841 *
842 * @pid Remote processor ID
843 * @returns Pointer to subsystem name or NULL if not found
844 */
845const char *smd_pid_to_subsystem(uint32_t pid)
846{
847 const char *subsys = NULL;
848 int i;
849
850 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
851 if (pid == edge_to_pids[i].remote_pid &&
852 edge_to_pids[i].subsys_name[0] != 0x0
853 ) {
854 subsys = edge_to_pids[i].subsys_name;
855 break;
856 }
857 }
858
859 return subsys;
860}
861EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700862
Eric Holmberg2a563c32011-10-05 14:51:43 -0600863static void smd_reset_edge(struct smd_half_channel *ch, unsigned new_state)
864{
865 if (ch->state != SMD_SS_CLOSED) {
866 ch->state = new_state;
867 ch->fDSR = 0;
868 ch->fCTS = 0;
869 ch->fCD = 0;
870 ch->fSTATE = 1;
871 }
872}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700873
874static void smd_channel_reset_state(struct smd_alloc_elm *shared,
875 unsigned new_state, unsigned pid)
876{
877 unsigned n;
878 struct smd_shared_v2 *shared2;
879 uint32_t type;
880 struct smd_half_channel *local_ch;
881 struct smd_half_channel *remote_ch;
882
883 for (n = 0; n < SMD_CHANNELS; n++) {
884 if (!shared[n].ref_count)
885 continue;
886 if (!shared[n].name[0])
887 continue;
888
889 type = SMD_CHANNEL_TYPE(shared[n].type);
890 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n, sizeof(*shared2));
891 if (!shared2)
892 continue;
893
Eric Holmberg2a563c32011-10-05 14:51:43 -0600894 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch))
895 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700896
Eric Holmberg2a563c32011-10-05 14:51:43 -0600897 /*
898 * ModemFW is in the same subsystem as ModemSW, but has
899 * separate SMD edges that need to be reset.
900 */
901 if (pid == SMSM_MODEM &&
902 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
903 &local_ch, &remote_ch))
904 smd_reset_edge(local_ch, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905 }
906}
907
908
909void smd_channel_reset(uint32_t restart_pid)
910{
911 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700912 unsigned long flags;
913
914 SMD_DBG("%s: starting reset\n", __func__);
915 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
916 if (!shared) {
917 pr_err("%s: allocation table not initialized\n", __func__);
918 return;
919 }
920
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600921 /* release any held spinlocks */
922 remote_spin_release(&remote_spinlock, restart_pid);
923 remote_spin_release_all(restart_pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700924
925 /* reset SMSM entry */
926 if (smsm_info.state) {
927 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
928
Eric Holmberg351a63c2011-12-02 17:49:43 -0700929 /* restart SMSM init handshake */
930 if (restart_pid == SMSM_MODEM) {
931 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700932 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
933 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700934 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700935
936 /* notify SMSM processors */
937 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700938 notify_modem_smsm();
939 notify_dsp_smsm();
940 notify_dsps_smsm();
941 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700942 }
943
944 /* change all remote states to CLOSING */
945 mutex_lock(&smd_probe_lock);
946 spin_lock_irqsave(&smd_lock, flags);
947 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
948 spin_unlock_irqrestore(&smd_lock, flags);
949 mutex_unlock(&smd_probe_lock);
950
951 /* notify SMD processors */
952 mb();
953 smd_fake_irq_handler(0);
954 notify_modem_smd();
955 notify_dsp_smd();
956 notify_dsps_smd();
957 notify_wcnss_smd();
958
959 /* change all remote states to CLOSED */
960 mutex_lock(&smd_probe_lock);
961 spin_lock_irqsave(&smd_lock, flags);
962 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
963 spin_unlock_irqrestore(&smd_lock, flags);
964 mutex_unlock(&smd_probe_lock);
965
966 /* notify SMD processors */
967 mb();
968 smd_fake_irq_handler(0);
969 notify_modem_smd();
970 notify_dsp_smd();
971 notify_dsps_smd();
972 notify_wcnss_smd();
973
974 SMD_DBG("%s: finished reset\n", __func__);
975}
976
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700977/* how many bytes are available for reading */
978static int smd_stream_read_avail(struct smd_channel *ch)
979{
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600980 return (ch->half_ch->get_head(ch->recv) -
981 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700982}
983
984/* how many bytes we are free to write */
985static int smd_stream_write_avail(struct smd_channel *ch)
986{
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600987 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
988 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700989}
990
991static int smd_packet_read_avail(struct smd_channel *ch)
992{
993 if (ch->current_packet) {
994 int n = smd_stream_read_avail(ch);
995 if (n > ch->current_packet)
996 n = ch->current_packet;
997 return n;
998 } else {
999 return 0;
1000 }
1001}
1002
1003static int smd_packet_write_avail(struct smd_channel *ch)
1004{
1005 int n = smd_stream_write_avail(ch);
1006 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1007}
1008
1009static int ch_is_open(struct smd_channel *ch)
1010{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001011 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1012 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1013 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001014}
1015
1016/* provide a pointer and length to readable data in the fifo */
1017static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1018{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001019 unsigned head = ch->half_ch->get_head(ch->recv);
1020 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001021 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001022
1023 if (tail <= head)
1024 return head - tail;
1025 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001026 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001027}
1028
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001029static int read_intr_blocked(struct smd_channel *ch)
1030{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001031 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001032}
1033
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001034/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1035static void ch_read_done(struct smd_channel *ch, unsigned count)
1036{
1037 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001038 ch->half_ch->set_tail(ch->recv,
1039 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001041 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001042}
1043
1044/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001045 * by smd_*_read() and update_packet_state()
1046 * will read-and-discard if the _data pointer is null
1047 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001048static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001049{
1050 void *ptr;
1051 unsigned n;
1052 unsigned char *data = _data;
1053 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001055
1056 while (len > 0) {
1057 n = ch_read_buffer(ch, &ptr);
1058 if (n == 0)
1059 break;
1060
1061 if (n > len)
1062 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001063 if (_data) {
1064 if (user_buf) {
1065 r = copy_to_user(data, ptr, n);
1066 if (r > 0) {
1067 pr_err("%s: "
1068 "copy_to_user could not copy "
1069 "%i bytes.\n",
1070 __func__,
1071 r);
1072 }
1073 } else
1074 memcpy(data, ptr, n);
1075 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001076
1077 data += n;
1078 len -= n;
1079 ch_read_done(ch, n);
1080 }
1081
1082 return orig_len - len;
1083}
1084
1085static void update_stream_state(struct smd_channel *ch)
1086{
1087 /* streams have no special state requiring updating */
1088}
1089
1090static void update_packet_state(struct smd_channel *ch)
1091{
1092 unsigned hdr[5];
1093 int r;
1094
1095 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096 while (ch->current_packet == 0) {
1097 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001098
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001099 /* don't bother unless we can get the full header */
1100 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1101 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001102
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001103 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1104 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001105
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106 ch->current_packet = hdr[0];
1107 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001108}
1109
1110/* provide a pointer and length to next free space in the fifo */
1111static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1112{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001113 unsigned head = ch->half_ch->get_head(ch->send);
1114 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001115 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001116
1117 if (head < tail) {
1118 return tail - head - 1;
1119 } else {
1120 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001121 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001122 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001123 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001124 }
1125}
1126
1127/* advace the fifo write pointer after freespace
1128 * from ch_write_buffer is filled
1129 */
1130static void ch_write_done(struct smd_channel *ch, unsigned count)
1131{
1132 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001133 ch->half_ch->set_head(ch->send,
1134 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001135 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001136 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001137}
1138
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001139static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001140{
1141 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001142 ch->half_ch->set_fDSR(ch->send, 1);
1143 ch->half_ch->set_fCTS(ch->send, 1);
1144 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001145 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001146 ch->half_ch->set_fDSR(ch->send, 0);
1147 ch->half_ch->set_fCTS(ch->send, 0);
1148 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001149 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001150 ch->half_ch->set_state(ch->send, n);
1151 ch->half_ch->set_fSTATE(ch->send, 1);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001152 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001153}
1154
1155static void do_smd_probe(void)
1156{
1157 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1158 if (shared->heap_info.free_offset != last_heap_free) {
1159 last_heap_free = shared->heap_info.free_offset;
1160 schedule_work(&probe_work);
1161 }
1162}
1163
1164static void smd_state_change(struct smd_channel *ch,
1165 unsigned last, unsigned next)
1166{
1167 ch->last_state = next;
1168
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001170
1171 switch (next) {
1172 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001173 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1174 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1175 ch->half_ch->set_tail(ch->recv, 0);
1176 ch->half_ch->set_head(ch->send, 0);
1177 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001178 ch_set_state(ch, SMD_SS_OPENING);
1179 }
1180 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001181 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001182 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001183 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184 ch->notify(ch->priv, SMD_EVENT_OPEN);
1185 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001186 break;
1187 case SMD_SS_FLUSHING:
1188 case SMD_SS_RESET:
1189 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001190 break;
1191 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001192 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193 ch_set_state(ch, SMD_SS_CLOSING);
1194 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001195 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1197 }
1198 break;
1199 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001200 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001201 list_move(&ch->ch_list,
1202 &smd_ch_to_close_list);
1203 queue_work(channel_close_wq,
1204 &finalize_channel_close_work);
1205 }
1206 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001207 }
1208}
1209
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210static void handle_smd_irq_closing_list(void)
1211{
1212 unsigned long flags;
1213 struct smd_channel *ch;
1214 struct smd_channel *index;
1215 unsigned tmp;
1216
1217 spin_lock_irqsave(&smd_lock, flags);
1218 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001219 if (ch->half_ch->get_fSTATE(ch->recv))
1220 ch->half_ch->set_fSTATE(ch->recv, 0);
1221 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001222 if (tmp != ch->last_state)
1223 smd_state_change(ch, ch->last_state, tmp);
1224 }
1225 spin_unlock_irqrestore(&smd_lock, flags);
1226}
1227
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001228static void handle_smd_irq(struct list_head *list, void (*notify)(void))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001229{
1230 unsigned long flags;
1231 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001232 unsigned ch_flags;
1233 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001234 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001235
1236 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001237 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001239 ch_flags = 0;
1240 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001241 if (ch->half_ch->get_fHEAD(ch->recv)) {
1242 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001243 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001244 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001245 if (ch->half_ch->get_fTAIL(ch->recv)) {
1246 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001247 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001248 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001249 if (ch->half_ch->get_fSTATE(ch->recv)) {
1250 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001251 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001252 }
1253 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001254 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001255 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001256 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1257 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001258 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001259 state_change = 1;
1260 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001261 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001262 ch->update_state(ch);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001263 SMx_POWER_INFO("SMD ch%d '%s' Data event r%d/w%d\n",
1264 ch->n, ch->name,
1265 ch->read_avail(ch),
1266 ch->fifo_size - ch->write_avail(ch));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001267 ch->notify(ch->priv, SMD_EVENT_DATA);
1268 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001269 if (ch_flags & 0x4 && !state_change) {
1270 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1271 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001272 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001273 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001274 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001275 spin_unlock_irqrestore(&smd_lock, flags);
1276 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001277}
1278
Brian Swetland37521a32009-07-01 18:30:47 -07001279static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001280{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001281 SMx_POWER_INFO("SMD Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001282 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001283 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001284 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001285 return IRQ_HANDLED;
1286}
1287
1288static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1289{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001290 SMx_POWER_INFO("SMD Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001291 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001292 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001293 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001294 return IRQ_HANDLED;
1295}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001296
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001297static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1298{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001299 SMx_POWER_INFO("SMD Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001300 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001301 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1302 handle_smd_irq_closing_list();
1303 return IRQ_HANDLED;
1304}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001305
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001306static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1307{
Eric Holmberg98c6c642012-02-24 11:29:35 -07001308 SMx_POWER_INFO("SMD Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001309 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1311 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001312 return IRQ_HANDLED;
1313}
1314
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001315static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1316{
1317 SMx_POWER_INFO("SMD Int RPM->Apps\n");
1318 ++interrupt_stats[SMD_RPM].smd_in_count;
1319 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1320 handle_smd_irq_closing_list();
1321 return IRQ_HANDLED;
1322}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001323
1324static void smd_fake_irq_handler(unsigned long arg)
1325{
Brian Swetland37521a32009-07-01 18:30:47 -07001326 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1327 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001328 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1329 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001330 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001331 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001332}
1333
1334static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1335
Brian Swetland37521a32009-07-01 18:30:47 -07001336static inline int smd_need_int(struct smd_channel *ch)
1337{
1338 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001339 if (ch->half_ch->get_fHEAD(ch->recv) ||
1340 ch->half_ch->get_fTAIL(ch->recv) ||
1341 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001342 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001343 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001344 return 1;
1345 }
1346 return 0;
1347}
1348
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001349void smd_sleep_exit(void)
1350{
1351 unsigned long flags;
1352 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001353 int need_int = 0;
1354
1355 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001356 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1357 if (smd_need_int(ch)) {
1358 need_int = 1;
1359 break;
1360 }
1361 }
1362 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1363 if (smd_need_int(ch)) {
1364 need_int = 1;
1365 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001366 }
1367 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001368 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1369 if (smd_need_int(ch)) {
1370 need_int = 1;
1371 break;
1372 }
1373 }
1374 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1375 if (smd_need_int(ch)) {
1376 need_int = 1;
1377 break;
1378 }
1379 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001380 spin_unlock_irqrestore(&smd_lock, flags);
1381 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001382
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001383 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001384 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001385 tasklet_schedule(&smd_fake_irq_tasklet);
1386 }
1387}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001388EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001389
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001390static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001391{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001392 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1393 return 0;
1394 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001395 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001396
1397 /* for cases where xfer type is 0 */
1398 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001399 return 0;
1400
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001401 /* for cases where xfer type is 0 */
1402 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1403 return 0;
1404
1405 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001406 return 1;
1407 else
1408 return 0;
1409}
1410
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001411static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1412 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001413{
1414 void *ptr;
1415 const unsigned char *buf = _data;
1416 unsigned xfer;
1417 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001419
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001420 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001421 if (len < 0)
1422 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001423 else if (len == 0)
1424 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001425
1426 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001427 if (!ch_is_open(ch)) {
1428 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001429 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001430 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001431 if (xfer > len)
1432 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001433 if (user_buf) {
1434 r = copy_from_user(ptr, buf, xfer);
1435 if (r > 0) {
1436 pr_err("%s: "
1437 "copy_from_user could not copy %i "
1438 "bytes.\n",
1439 __func__,
1440 r);
1441 }
1442 } else
1443 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001444 ch_write_done(ch, xfer);
1445 len -= xfer;
1446 buf += xfer;
1447 if (len == 0)
1448 break;
1449 }
1450
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001451 if (orig_len - len)
1452 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001453
1454 return orig_len - len;
1455}
1456
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001457static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1458 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001459{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001460 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001461 unsigned hdr[5];
1462
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001463 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001464 if (len < 0)
1465 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466 else if (len == 0)
1467 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001468
1469 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1470 return -ENOMEM;
1471
1472 hdr[0] = len;
1473 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1474
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001475
1476 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1477 if (ret < 0 || ret != sizeof(hdr)) {
1478 SMD_DBG("%s failed to write pkt header: "
1479 "%d returned\n", __func__, ret);
1480 return -1;
1481 }
1482
1483
1484 ret = smd_stream_write(ch, _data, len, user_buf);
1485 if (ret < 0 || ret != len) {
1486 SMD_DBG("%s failed to write pkt data: "
1487 "%d returned\n", __func__, ret);
1488 return ret;
1489 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001490
1491 return len;
1492}
1493
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001494static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001495{
1496 int r;
1497
1498 if (len < 0)
1499 return -EINVAL;
1500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001501 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001502 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001503 if (!read_intr_blocked(ch))
1504 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001505
1506 return r;
1507}
1508
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001509static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001510{
1511 unsigned long flags;
1512 int r;
1513
1514 if (len < 0)
1515 return -EINVAL;
1516
1517 if (len > ch->current_packet)
1518 len = ch->current_packet;
1519
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001520 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001521 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001522 if (!read_intr_blocked(ch))
1523 ch->notify_other_cpu();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001524
1525 spin_lock_irqsave(&smd_lock, flags);
1526 ch->current_packet -= r;
1527 update_packet_state(ch);
1528 spin_unlock_irqrestore(&smd_lock, flags);
1529
1530 return r;
1531}
1532
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001533static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1534 int user_buf)
1535{
1536 int r;
1537
1538 if (len < 0)
1539 return -EINVAL;
1540
1541 if (len > ch->current_packet)
1542 len = ch->current_packet;
1543
1544 r = ch_read(ch, data, len, user_buf);
1545 if (r > 0)
1546 if (!read_intr_blocked(ch))
1547 ch->notify_other_cpu();
1548
1549 ch->current_packet -= r;
1550 update_packet_state(ch);
1551
1552 return r;
1553}
1554
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301555#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001556static int smd_alloc_v2(struct smd_channel *ch)
1557{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558 void *buffer;
1559 unsigned buffer_sz;
1560
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001561 if (is_word_access_ch(ch->type)) {
1562 struct smd_shared_v2_word_access *shared2;
1563 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1564 sizeof(*shared2));
1565 if (!shared2) {
1566 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1567 return -EINVAL;
1568 }
1569 ch->send = &shared2->ch0;
1570 ch->recv = &shared2->ch1;
1571 } else {
1572 struct smd_shared_v2 *shared2;
1573 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1574 sizeof(*shared2));
1575 if (!shared2) {
1576 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1577 return -EINVAL;
1578 }
1579 ch->send = &shared2->ch0;
1580 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001581 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001582 ch->half_ch = get_half_ch_funcs(ch->type);
1583
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001584 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1585 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301586 SMD_INFO("smem_get_entry failed\n");
1587 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588 }
1589
1590 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301591 if (buffer_sz & (buffer_sz - 1)) {
1592 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1593 return -EINVAL;
1594 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001595 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001596 ch->send_data = buffer;
1597 ch->recv_data = buffer + buffer_sz;
1598 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001599
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001600 return 0;
1601}
1602
1603static int smd_alloc_v1(struct smd_channel *ch)
1604{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301605 return -EINVAL;
1606}
1607
1608#else /* define v1 for older targets */
1609static int smd_alloc_v2(struct smd_channel *ch)
1610{
1611 return -EINVAL;
1612}
1613
1614static int smd_alloc_v1(struct smd_channel *ch)
1615{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001616 struct smd_shared_v1 *shared1;
1617 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1618 if (!shared1) {
1619 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301620 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001621 }
1622 ch->send = &shared1->ch0;
1623 ch->recv = &shared1->ch1;
1624 ch->send_data = shared1->data0;
1625 ch->recv_data = shared1->data1;
1626 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001627 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001628 return 0;
1629}
1630
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301631#endif
1632
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001633static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001634{
1635 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001636
1637 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1638 if (ch == 0) {
1639 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001640 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001641 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001642 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001643 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001644
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001645 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001646 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001647 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001648 }
1649
1650 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001651
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001652 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001653 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001654 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001655 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001656 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657 else if (ch->type == SMD_APPS_DSPS)
1658 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001659 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001660 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001661 else if (ch->type == SMD_APPS_RPM)
1662 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001663
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001664 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001665 ch->read = smd_packet_read;
1666 ch->write = smd_packet_write;
1667 ch->read_avail = smd_packet_read_avail;
1668 ch->write_avail = smd_packet_write_avail;
1669 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001670 ch->read_from_cb = smd_packet_read_from_cb;
1671 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001672 } else {
1673 ch->read = smd_stream_read;
1674 ch->write = smd_stream_write;
1675 ch->read_avail = smd_stream_read_avail;
1676 ch->write_avail = smd_stream_write_avail;
1677 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001679 }
1680
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001681 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1682 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001683
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001684 ch->pdev.name = ch->name;
1685 ch->pdev.id = ch->type;
1686
1687 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1688 ch->name, ch->n);
1689
1690 mutex_lock(&smd_creation_mutex);
1691 list_add(&ch->ch_list, &smd_ch_closed_list);
1692 mutex_unlock(&smd_creation_mutex);
1693
1694 platform_device_register(&ch->pdev);
1695 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1696 /* create a platform driver to be used by smd_tty driver
1697 * so that it can access the loopback port
1698 */
1699 loopback_tty_pdev.id = ch->type;
1700 platform_device_register(&loopback_tty_pdev);
1701 }
1702 return 0;
1703}
1704
1705static inline void notify_loopback_smd(void)
1706{
1707 unsigned long flags;
1708 struct smd_channel *ch;
1709
1710 spin_lock_irqsave(&smd_lock, flags);
1711 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1712 ch->notify(ch->priv, SMD_EVENT_DATA);
1713 }
1714 spin_unlock_irqrestore(&smd_lock, flags);
1715}
1716
1717static int smd_alloc_loopback_channel(void)
1718{
1719 static struct smd_half_channel smd_loopback_ctl;
1720 static char smd_loopback_data[SMD_BUF_SIZE];
1721 struct smd_channel *ch;
1722
1723 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1724 if (ch == 0) {
1725 pr_err("%s: out of memory\n", __func__);
1726 return -1;
1727 }
1728 ch->n = SMD_LOOPBACK_CID;
1729
1730 ch->send = &smd_loopback_ctl;
1731 ch->recv = &smd_loopback_ctl;
1732 ch->send_data = smd_loopback_data;
1733 ch->recv_data = smd_loopback_data;
1734 ch->fifo_size = SMD_BUF_SIZE;
1735
1736 ch->fifo_mask = ch->fifo_size - 1;
1737 ch->type = SMD_LOOPBACK_TYPE;
1738 ch->notify_other_cpu = notify_loopback_smd;
1739
1740 ch->read = smd_stream_read;
1741 ch->write = smd_stream_write;
1742 ch->read_avail = smd_stream_read_avail;
1743 ch->write_avail = smd_stream_write_avail;
1744 ch->update_state = update_stream_state;
1745 ch->read_from_cb = smd_stream_read;
1746
1747 memset(ch->name, 0, 20);
1748 memcpy(ch->name, "local_loopback", 14);
1749
1750 ch->pdev.name = ch->name;
1751 ch->pdev.id = ch->type;
1752
1753 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001754
1755 mutex_lock(&smd_creation_mutex);
1756 list_add(&ch->ch_list, &smd_ch_closed_list);
1757 mutex_unlock(&smd_creation_mutex);
1758
1759 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001760 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001761}
1762
1763static void do_nothing_notify(void *priv, unsigned flags)
1764{
1765}
1766
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001767static void finalize_channel_close_fn(struct work_struct *work)
1768{
1769 unsigned long flags;
1770 struct smd_channel *ch;
1771 struct smd_channel *index;
1772
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001773 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001774 spin_lock_irqsave(&smd_lock, flags);
1775 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1776 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001777 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001778 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1779 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001780 }
1781 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001782 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001783}
1784
1785struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001786{
1787 struct smd_channel *ch;
1788
1789 mutex_lock(&smd_creation_mutex);
1790 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001791 if (!strcmp(name, ch->name) &&
1792 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001793 list_del(&ch->ch_list);
1794 mutex_unlock(&smd_creation_mutex);
1795 return ch;
1796 }
1797 }
1798 mutex_unlock(&smd_creation_mutex);
1799
1800 return NULL;
1801}
1802
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001803int smd_named_open_on_edge(const char *name, uint32_t edge,
1804 smd_channel_t **_ch,
1805 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001806{
1807 struct smd_channel *ch;
1808 unsigned long flags;
1809
1810 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001811 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001812 return -ENODEV;
1813 }
1814
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001815 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1816
1817 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001818 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001819 /* check closing list for port */
1820 spin_lock_irqsave(&smd_lock, flags);
1821 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1822 if (!strncmp(name, ch->name, 20) &&
1823 (edge == ch->type)) {
1824 /* channel exists, but is being closed */
1825 spin_unlock_irqrestore(&smd_lock, flags);
1826 return -EAGAIN;
1827 }
1828 }
1829
1830 /* check closing workqueue list for port */
1831 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1832 if (!strncmp(name, ch->name, 20) &&
1833 (edge == ch->type)) {
1834 /* channel exists, but is being closed */
1835 spin_unlock_irqrestore(&smd_lock, flags);
1836 return -EAGAIN;
1837 }
1838 }
1839 spin_unlock_irqrestore(&smd_lock, flags);
1840
1841 /* one final check to handle closing->closed race condition */
1842 ch = smd_get_channel(name, edge);
1843 if (!ch)
1844 return -ENODEV;
1845 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001846
1847 if (notify == 0)
1848 notify = do_nothing_notify;
1849
1850 ch->notify = notify;
1851 ch->current_packet = 0;
1852 ch->last_state = SMD_SS_CLOSED;
1853 ch->priv = priv;
1854
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001855 if (edge == SMD_LOOPBACK_TYPE) {
1856 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001857 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1858 ch->half_ch->set_fDSR(ch->send, 1);
1859 ch->half_ch->set_fCTS(ch->send, 1);
1860 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001861 }
1862
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001863 *_ch = ch;
1864
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001865 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1866
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001867 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001868 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001869 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001870 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001871 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001872 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1873 list_add(&ch->ch_list, &smd_ch_list_dsps);
1874 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1875 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001876 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1877 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001878 else
1879 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001880
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001881 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1882
1883 if (edge != SMD_LOOPBACK_TYPE)
1884 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1885
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001886 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001887
1888 return 0;
1889}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001890EXPORT_SYMBOL(smd_named_open_on_edge);
1891
1892
1893int smd_open(const char *name, smd_channel_t **_ch,
1894 void *priv, void (*notify)(void *, unsigned))
1895{
1896 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1897 notify);
1898}
1899EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001900
1901int smd_close(smd_channel_t *ch)
1902{
1903 unsigned long flags;
1904
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001905 if (ch == 0)
1906 return -1;
1907
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001908 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001909
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001910 spin_lock_irqsave(&smd_lock, flags);
1911 list_del(&ch->ch_list);
1912 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001913 ch->half_ch->set_fDSR(ch->send, 0);
1914 ch->half_ch->set_fCTS(ch->send, 0);
1915 ch->half_ch->set_fCD(ch->send, 0);
1916 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001917 } else
1918 ch_set_state(ch, SMD_SS_CLOSED);
1919
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001920 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001921 list_add(&ch->ch_list, &smd_ch_closing_list);
1922 spin_unlock_irqrestore(&smd_lock, flags);
1923 } else {
1924 spin_unlock_irqrestore(&smd_lock, flags);
1925 ch->notify = do_nothing_notify;
1926 mutex_lock(&smd_creation_mutex);
1927 list_add(&ch->ch_list, &smd_ch_closed_list);
1928 mutex_unlock(&smd_creation_mutex);
1929 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001930
1931 return 0;
1932}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001933EXPORT_SYMBOL(smd_close);
1934
1935int smd_write_start(smd_channel_t *ch, int len)
1936{
1937 int ret;
1938 unsigned hdr[5];
1939
1940 if (!ch) {
1941 pr_err("%s: Invalid channel specified\n", __func__);
1942 return -ENODEV;
1943 }
1944 if (!ch->is_pkt_ch) {
1945 pr_err("%s: non-packet channel specified\n", __func__);
1946 return -EACCES;
1947 }
1948 if (len < 1) {
1949 pr_err("%s: invalid length: %d\n", __func__, len);
1950 return -EINVAL;
1951 }
1952
1953 if (ch->pending_pkt_sz) {
1954 pr_err("%s: packet of size: %d in progress\n", __func__,
1955 ch->pending_pkt_sz);
1956 return -EBUSY;
1957 }
1958 ch->pending_pkt_sz = len;
1959
1960 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
1961 ch->pending_pkt_sz = 0;
1962 SMD_DBG("%s: no space to write packet header\n", __func__);
1963 return -EAGAIN;
1964 }
1965
1966 hdr[0] = len;
1967 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1968
1969
1970 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1971 if (ret < 0 || ret != sizeof(hdr)) {
1972 ch->pending_pkt_sz = 0;
1973 pr_err("%s: packet header failed to write\n", __func__);
1974 return -EPERM;
1975 }
1976 return 0;
1977}
1978EXPORT_SYMBOL(smd_write_start);
1979
1980int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
1981{
1982 int bytes_written;
1983
1984 if (!ch) {
1985 pr_err("%s: Invalid channel specified\n", __func__);
1986 return -ENODEV;
1987 }
1988 if (len < 1) {
1989 pr_err("%s: invalid length: %d\n", __func__, len);
1990 return -EINVAL;
1991 }
1992
1993 if (!ch->pending_pkt_sz) {
1994 pr_err("%s: no transaction in progress\n", __func__);
1995 return -ENOEXEC;
1996 }
1997 if (ch->pending_pkt_sz - len < 0) {
1998 pr_err("%s: segment of size: %d will make packet go over "
1999 "length\n", __func__, len);
2000 return -EINVAL;
2001 }
2002
2003 bytes_written = smd_stream_write(ch, data, len, user_buf);
2004
2005 ch->pending_pkt_sz -= bytes_written;
2006
2007 return bytes_written;
2008}
2009EXPORT_SYMBOL(smd_write_segment);
2010
2011int smd_write_end(smd_channel_t *ch)
2012{
2013
2014 if (!ch) {
2015 pr_err("%s: Invalid channel specified\n", __func__);
2016 return -ENODEV;
2017 }
2018 if (ch->pending_pkt_sz) {
2019 pr_err("%s: current packet not completely written\n", __func__);
2020 return -E2BIG;
2021 }
2022
2023 return 0;
2024}
2025EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002026
2027int smd_read(smd_channel_t *ch, void *data, int len)
2028{
Jack Pham1b236d12012-03-19 15:27:18 -07002029 if (!ch) {
2030 pr_err("%s: Invalid channel specified\n", __func__);
2031 return -ENODEV;
2032 }
2033
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002034 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002035}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002036EXPORT_SYMBOL(smd_read);
2037
2038int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2039{
Jack Pham1b236d12012-03-19 15:27:18 -07002040 if (!ch) {
2041 pr_err("%s: Invalid channel specified\n", __func__);
2042 return -ENODEV;
2043 }
2044
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002045 return ch->read(ch, data, len, 1);
2046}
2047EXPORT_SYMBOL(smd_read_user_buffer);
2048
2049int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2050{
Jack Pham1b236d12012-03-19 15:27:18 -07002051 if (!ch) {
2052 pr_err("%s: Invalid channel specified\n", __func__);
2053 return -ENODEV;
2054 }
2055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002056 return ch->read_from_cb(ch, data, len, 0);
2057}
2058EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002059
2060int smd_write(smd_channel_t *ch, const void *data, int len)
2061{
Jack Pham1b236d12012-03-19 15:27:18 -07002062 if (!ch) {
2063 pr_err("%s: Invalid channel specified\n", __func__);
2064 return -ENODEV;
2065 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002067 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002068}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002069EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002070
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002071int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002072{
Jack Pham1b236d12012-03-19 15:27:18 -07002073 if (!ch) {
2074 pr_err("%s: Invalid channel specified\n", __func__);
2075 return -ENODEV;
2076 }
2077
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002078 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002079}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002080EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002081
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002082int smd_read_avail(smd_channel_t *ch)
2083{
Jack Pham1b236d12012-03-19 15:27:18 -07002084 if (!ch) {
2085 pr_err("%s: Invalid channel specified\n", __func__);
2086 return -ENODEV;
2087 }
2088
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002089 return ch->read_avail(ch);
2090}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002091EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002092
2093int smd_write_avail(smd_channel_t *ch)
2094{
Jack Pham1b236d12012-03-19 15:27:18 -07002095 if (!ch) {
2096 pr_err("%s: Invalid channel specified\n", __func__);
2097 return -ENODEV;
2098 }
2099
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002100 return ch->write_avail(ch);
2101}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002102EXPORT_SYMBOL(smd_write_avail);
2103
2104void smd_enable_read_intr(smd_channel_t *ch)
2105{
2106 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002107 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002108}
2109EXPORT_SYMBOL(smd_enable_read_intr);
2110
2111void smd_disable_read_intr(smd_channel_t *ch)
2112{
2113 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002114 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002115}
2116EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002117
Eric Holmbergdeace152012-07-25 12:17:11 -06002118/**
2119 * Enable/disable receive interrupts for the remote processor used by a
2120 * particular channel.
2121 * @ch: open channel handle to use for the edge
2122 * @mask: 1 = mask interrupts; 0 = unmask interrupts
2123 * @returns: 0 for success; < 0 for failure
2124 *
2125 * Note that this enables/disables all interrupts from the remote subsystem for
2126 * all channels. As such, it should be used with care and only for specific
2127 * use cases such as power-collapse sequencing.
2128 */
2129int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
2130{
2131 struct irq_chip *irq_chip;
2132 struct irq_data *irq_data;
2133 struct interrupt_config_item *int_cfg;
2134
2135 if (!ch)
2136 return -EINVAL;
2137
2138 if (ch->type >= ARRAY_SIZE(edge_to_pids))
2139 return -ENODEV;
2140
2141 int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
2142
2143 if (int_cfg->irq_id < 0)
2144 return -ENODEV;
2145
2146 irq_chip = irq_get_chip(int_cfg->irq_id);
2147 if (!irq_chip)
2148 return -ENODEV;
2149
2150 irq_data = irq_get_irq_data(int_cfg->irq_id);
2151 if (!irq_data)
2152 return -ENODEV;
2153
2154 if (mask) {
2155 SMx_POWER_INFO("SMD Masking interrupts from %s\n",
2156 edge_to_pids[ch->type].subsys_name);
2157 irq_chip->irq_mask(irq_data);
2158 } else {
2159 SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
2160 edge_to_pids[ch->type].subsys_name);
2161 irq_chip->irq_unmask(irq_data);
2162 }
2163
2164 return 0;
2165}
2166EXPORT_SYMBOL(smd_mask_receive_interrupt);
2167
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002168int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2169{
2170 return -1;
2171}
2172
2173int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2174{
2175 return -1;
2176}
2177
2178int smd_cur_packet_size(smd_channel_t *ch)
2179{
Jack Pham1b236d12012-03-19 15:27:18 -07002180 if (!ch) {
2181 pr_err("%s: Invalid channel specified\n", __func__);
2182 return -ENODEV;
2183 }
2184
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002185 return ch->current_packet;
2186}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002187EXPORT_SYMBOL(smd_cur_packet_size);
2188
2189int smd_tiocmget(smd_channel_t *ch)
2190{
Jack Pham1b236d12012-03-19 15:27:18 -07002191 if (!ch) {
2192 pr_err("%s: Invalid channel specified\n", __func__);
2193 return -ENODEV;
2194 }
2195
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002196 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2197 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2198 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2199 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2200 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2201 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002202}
2203EXPORT_SYMBOL(smd_tiocmget);
2204
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002205/* this api will be called while holding smd_lock */
2206int
2207smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002208{
Jack Pham1b236d12012-03-19 15:27:18 -07002209 if (!ch) {
2210 pr_err("%s: Invalid channel specified\n", __func__);
2211 return -ENODEV;
2212 }
2213
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002214 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002215 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002216
2217 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002218 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002219
2220 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002221 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002222
2223 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002224 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002225
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002226 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002227 barrier();
2228 ch->notify_other_cpu();
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002229
2230 return 0;
2231}
2232EXPORT_SYMBOL(smd_tiocmset_from_cb);
2233
2234int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2235{
2236 unsigned long flags;
2237
Jack Pham1b236d12012-03-19 15:27:18 -07002238 if (!ch) {
2239 pr_err("%s: Invalid channel specified\n", __func__);
2240 return -ENODEV;
2241 }
2242
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002243 spin_lock_irqsave(&smd_lock, flags);
2244 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002245 spin_unlock_irqrestore(&smd_lock, flags);
2246
2247 return 0;
2248}
2249EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002250
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002251int smd_is_pkt_avail(smd_channel_t *ch)
2252{
Jeff Hugoa8549f12012-08-13 20:36:18 -06002253 unsigned long flags;
2254
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002255 if (!ch || !ch->is_pkt_ch)
2256 return -EINVAL;
2257
2258 if (ch->current_packet)
2259 return 1;
2260
Jeff Hugoa8549f12012-08-13 20:36:18 -06002261 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002262 update_packet_state(ch);
Jeff Hugoa8549f12012-08-13 20:36:18 -06002263 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002264
2265 return ch->current_packet ? 1 : 0;
2266}
2267EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002268
2269
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002270/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002271
Jeff Hugobdc734d2012-03-26 16:05:39 -06002272/*
2273 * Shared Memory Range Check
2274 *
2275 * Takes a physical address and an offset and checks if the resulting physical
2276 * address would fit into one of the aux smem regions. If so, returns the
2277 * corresponding virtual address. Otherwise returns NULL. Expects the array
2278 * of smem regions to be in ascending physical address order.
2279 *
2280 * @base: physical base address to check
2281 * @offset: offset from the base to get the final address
2282 */
2283static void *smem_range_check(void *base, unsigned offset)
2284{
2285 int i;
2286 void *phys_addr;
2287 unsigned size;
2288
2289 for (i = 0; i < num_smem_areas; ++i) {
2290 phys_addr = smem_areas[i].phys_addr;
2291 size = smem_areas[i].size;
2292 if (base < phys_addr)
2293 return NULL;
2294 if (base > phys_addr + size)
2295 continue;
2296 if (base >= phys_addr && base + offset < phys_addr + size)
2297 return smem_areas[i].virt_addr + offset;
2298 }
2299
2300 return NULL;
2301}
2302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002303/* smem_alloc returns the pointer to smem item if it is already allocated.
2304 * Otherwise, it returns NULL.
2305 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002306void *smem_alloc(unsigned id, unsigned size)
2307{
2308 return smem_find(id, size);
2309}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002310EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002311
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002312/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2313 * it allocates it and then returns the pointer to it.
2314 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302315void *smem_alloc2(unsigned id, unsigned size_in)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002316{
2317 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2318 struct smem_heap_entry *toc = shared->heap_toc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002319 unsigned long flags;
2320 void *ret = NULL;
2321
2322 if (!shared->heap_info.initialized) {
2323 pr_err("%s: smem heap info not initialized\n", __func__);
2324 return NULL;
2325 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002326
2327 if (id >= SMEM_NUM_ITEMS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002328 return NULL;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002329
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002330 size_in = ALIGN(size_in, 8);
2331 remote_spin_lock_irqsave(&remote_spinlock, flags);
2332 if (toc[id].allocated) {
2333 SMD_DBG("%s: %u already allocated\n", __func__, id);
2334 if (size_in != toc[id].size)
2335 pr_err("%s: wrong size %u (expected %u)\n",
2336 __func__, toc[id].size, size_in);
2337 else
2338 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2339 } else if (id > SMEM_FIXED_ITEM_LAST) {
2340 SMD_DBG("%s: allocating %u\n", __func__, id);
2341 if (shared->heap_info.heap_remaining >= size_in) {
2342 toc[id].offset = shared->heap_info.free_offset;
2343 toc[id].size = size_in;
2344 wmb();
2345 toc[id].allocated = 1;
2346
2347 shared->heap_info.free_offset += size_in;
2348 shared->heap_info.heap_remaining -= size_in;
2349 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2350 } else
2351 pr_err("%s: not enough memory %u (required %u)\n",
2352 __func__, shared->heap_info.heap_remaining,
2353 size_in);
2354 }
2355 wmb();
2356 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2357 return ret;
2358}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302359EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002360
2361void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002362{
2363 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2364 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302365 int use_spinlocks = spinlocks_initialized;
2366 void *ret = 0;
2367 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002368
2369 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302370 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002371
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302372 if (use_spinlocks)
2373 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002374 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002375 if (toc[id].allocated) {
2376 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002377 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002378 if (!(toc[id].reserved & BASE_ADDR_MASK))
2379 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2380 else
2381 ret = smem_range_check(
2382 (void *)(toc[id].reserved & BASE_ADDR_MASK),
2383 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002384 } else {
2385 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002386 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302387 if (use_spinlocks)
2388 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002389
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302390 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002391}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002392EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002393
2394void *smem_find(unsigned id, unsigned size_in)
2395{
2396 unsigned size;
2397 void *ptr;
2398
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002399 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002400 if (!ptr)
2401 return 0;
2402
2403 size_in = ALIGN(size_in, 8);
2404 if (size_in != size) {
2405 pr_err("smem_find(%d, %d): wrong size %d\n",
2406 id, size_in, size);
2407 return 0;
2408 }
2409
2410 return ptr;
2411}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002412EXPORT_SYMBOL(smem_find);
2413
2414static int smsm_cb_init(void)
2415{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002416 struct smsm_state_info *state_info;
2417 int n;
2418 int ret = 0;
2419
2420 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2421 GFP_KERNEL);
2422
2423 if (!smsm_states) {
2424 pr_err("%s: SMSM init failed\n", __func__);
2425 return -ENOMEM;
2426 }
2427
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002428 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2429 if (!smsm_cb_wq) {
2430 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2431 kfree(smsm_states);
2432 return -EFAULT;
2433 }
2434
Eric Holmbergc8002902011-09-16 13:55:57 -06002435 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002436 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2437 state_info = &smsm_states[n];
2438 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002439 state_info->intr_mask_set = 0x0;
2440 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002441 INIT_LIST_HEAD(&state_info->callbacks);
2442 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002443 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002444
2445 return ret;
2446}
2447
2448static int smsm_init(void)
2449{
2450 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2451 int i;
2452 struct smsm_size_info_type *smsm_size_info;
2453
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002454 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2455 sizeof(struct smsm_size_info_type));
2456 if (smsm_size_info) {
2457 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2458 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2459 }
2460
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002461 i = kfifo_alloc(&smsm_snapshot_fifo,
2462 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2463 GFP_KERNEL);
2464 if (i) {
2465 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2466 return i;
2467 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002468 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2469 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002471 if (!smsm_info.state) {
2472 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2473 SMSM_NUM_ENTRIES *
2474 sizeof(uint32_t));
2475
2476 if (smsm_info.state) {
2477 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2478 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2479 __raw_writel(0, \
2480 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2481 }
2482 }
2483
2484 if (!smsm_info.intr_mask) {
2485 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2486 SMSM_NUM_ENTRIES *
2487 SMSM_NUM_HOSTS *
2488 sizeof(uint32_t));
2489
Eric Holmberge8a39322012-04-03 15:14:02 -06002490 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002491 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002492 __raw_writel(0x0,
2493 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2494
2495 /* Configure legacy modem bits */
2496 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2497 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2498 SMSM_APPS));
2499 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002500 }
2501
2502 if (!smsm_info.intr_mux)
2503 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2504 SMSM_NUM_INTR_MUX *
2505 sizeof(uint32_t));
2506
2507 i = smsm_cb_init();
2508 if (i)
2509 return i;
2510
2511 wmb();
2512 return 0;
2513}
2514
2515void smsm_reset_modem(unsigned mode)
2516{
2517 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2518 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2519 } else if (mode == SMSM_MODEM_WAIT) {
2520 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2521 } else { /* reset_mode is SMSM_RESET or default */
2522 mode = SMSM_RESET;
2523 }
2524
2525 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2526}
2527EXPORT_SYMBOL(smsm_reset_modem);
2528
2529void smsm_reset_modem_cont(void)
2530{
2531 unsigned long flags;
2532 uint32_t state;
2533
2534 if (!smsm_info.state)
2535 return;
2536
2537 spin_lock_irqsave(&smem_lock, flags);
2538 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2539 & ~SMSM_MODEM_WAIT;
2540 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2541 wmb();
2542 spin_unlock_irqrestore(&smem_lock, flags);
2543}
2544EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002545
Eric Holmbergda31d042012-03-28 14:01:02 -06002546static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002547{
2548 int n;
2549 uint32_t new_state;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002550 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002551 int ret;
2552
2553 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002554 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002555 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2556 return;
2557 }
2558
Eric Holmberg96b55f62012-04-03 19:10:46 -06002559 /*
2560 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2561 * following sequence must be followed:
2562 * 1) increment snapshot count
2563 * 2) insert data into FIFO
2564 *
2565 * Potentially in parallel, the worker:
2566 * a) verifies >= 1 snapshots are in FIFO
2567 * b) processes snapshot
2568 * c) decrements reference count
2569 *
2570 * This order ensures that 1 will always occur before abc.
2571 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002572 if (use_wakelock) {
2573 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2574 if (smsm_snapshot_count == 0) {
2575 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2576 wake_lock(&smsm_snapshot_wakelock);
2577 }
2578 ++smsm_snapshot_count;
2579 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2580 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002581
2582 /* queue state entries */
2583 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2584 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2585
2586 ret = kfifo_in(&smsm_snapshot_fifo,
2587 &new_state, sizeof(new_state));
2588 if (ret != sizeof(new_state)) {
2589 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2590 goto restore_snapshot_count;
2591 }
2592 }
2593
2594 /* queue wakelock usage flag */
2595 ret = kfifo_in(&smsm_snapshot_fifo,
2596 &use_wakelock, sizeof(use_wakelock));
2597 if (ret != sizeof(use_wakelock)) {
2598 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2599 goto restore_snapshot_count;
2600 }
2601
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002602 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002603 return;
2604
2605restore_snapshot_count:
2606 if (use_wakelock) {
2607 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2608 if (smsm_snapshot_count) {
2609 --smsm_snapshot_count;
2610 if (smsm_snapshot_count == 0) {
2611 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2612 wake_unlock(&smsm_snapshot_wakelock);
2613 }
2614 } else {
2615 pr_err("%s: invalid snapshot count\n", __func__);
2616 }
2617 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2618 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002619}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002620
2621static irqreturn_t smsm_irq_handler(int irq, void *data)
2622{
2623 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002624
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002625 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002626 uint32_t mux_val;
2627 static uint32_t prev_smem_q6_apps_smsm;
2628
2629 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2630 mux_val = __raw_readl(
2631 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2632 if (mux_val != prev_smem_q6_apps_smsm)
2633 prev_smem_q6_apps_smsm = mux_val;
2634 }
2635
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002636 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002637 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002638 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002639 return IRQ_HANDLED;
2640 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002641
2642 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002643 if (!smsm_info.state) {
2644 SMSM_INFO("<SM NO STATE>\n");
2645 } else {
2646 unsigned old_apps, apps;
2647 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002648
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002649 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002650
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002651 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2652 if (apps & SMSM_RESET) {
2653 /* If we get an interrupt and the apps SMSM_RESET
2654 bit is already set, the modem is acking the
2655 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002656 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302657 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002658 /* Issue a fake irq to handle any
2659 * smd state changes during reset
2660 */
2661 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002662
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002663 /* queue modem restart notify chain */
2664 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002665
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002666 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002667 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302668 if (!disable_smsm_reset_handshake) {
2669 apps |= SMSM_RESET;
2670 flush_cache_all();
2671 outer_flush_all();
2672 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002673 modem_queue_start_reset_notify();
2674
2675 } else if (modm & SMSM_INIT) {
2676 if (!(apps & SMSM_INIT)) {
2677 apps |= SMSM_INIT;
2678 modem_queue_smsm_init_notify();
2679 }
2680
2681 if (modm & SMSM_SMDINIT)
2682 apps |= SMSM_SMDINIT;
2683 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2684 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2685 apps |= SMSM_RUN;
2686 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2687 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2688 modem_queue_start_reset_notify();
2689 }
2690
2691 if (old_apps != apps) {
2692 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2693 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2694 do_smd_probe();
2695 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2696 }
2697
Eric Holmbergda31d042012-03-28 14:01:02 -06002698 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002699 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002700 spin_unlock_irqrestore(&smem_lock, flags);
2701 return IRQ_HANDLED;
2702}
2703
Eric Holmberg98c6c642012-02-24 11:29:35 -07002704static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002705{
Eric Holmberg98c6c642012-02-24 11:29:35 -07002706 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002707 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002708 return smsm_irq_handler(irq, data);
2709}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002710
Eric Holmberg98c6c642012-02-24 11:29:35 -07002711static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2712{
2713 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002714 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002715 return smsm_irq_handler(irq, data);
2716}
2717
2718static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2719{
2720 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002721 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002722 return smsm_irq_handler(irq, data);
2723}
2724
2725static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2726{
2727 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002728 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002729 return smsm_irq_handler(irq, data);
2730}
2731
Eric Holmberge8a39322012-04-03 15:14:02 -06002732/*
2733 * Changes the global interrupt mask. The set and clear masks are re-applied
2734 * every time the global interrupt mask is updated for callback registration
2735 * and de-registration.
2736 *
2737 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2738 * mask and the set mask, the result will be that the interrupt is set.
2739 *
2740 * @smsm_entry SMSM entry to change
2741 * @clear_mask 1 = clear bit, 0 = no-op
2742 * @set_mask 1 = set bit, 0 = no-op
2743 *
2744 * @returns 0 for success, < 0 for error
2745 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002746int smsm_change_intr_mask(uint32_t smsm_entry,
2747 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002748{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002749 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002750 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002751
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002752 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2753 pr_err("smsm_change_state: Invalid entry %d\n",
2754 smsm_entry);
2755 return -EINVAL;
2756 }
2757
2758 if (!smsm_info.intr_mask) {
2759 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002760 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002761 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002762
2763 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002764 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2765 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002766
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002767 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2768 new_mask = (old_mask & ~clear_mask) | set_mask;
2769 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002770
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002771 wmb();
2772 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002773
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002774 return 0;
2775}
2776EXPORT_SYMBOL(smsm_change_intr_mask);
2777
2778int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2779{
2780 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2781 pr_err("smsm_change_state: Invalid entry %d\n",
2782 smsm_entry);
2783 return -EINVAL;
2784 }
2785
2786 if (!smsm_info.intr_mask) {
2787 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2788 return -EIO;
2789 }
2790
2791 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2792 return 0;
2793}
2794EXPORT_SYMBOL(smsm_get_intr_mask);
2795
2796int smsm_change_state(uint32_t smsm_entry,
2797 uint32_t clear_mask, uint32_t set_mask)
2798{
2799 unsigned long flags;
2800 uint32_t old_state, new_state;
2801
2802 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2803 pr_err("smsm_change_state: Invalid entry %d",
2804 smsm_entry);
2805 return -EINVAL;
2806 }
2807
2808 if (!smsm_info.state) {
2809 pr_err("smsm_change_state <SM NO STATE>\n");
2810 return -EIO;
2811 }
2812 spin_lock_irqsave(&smem_lock, flags);
2813
2814 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2815 new_state = (old_state & ~clear_mask) | set_mask;
2816 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2817 SMSM_DBG("smsm_change_state %x\n", new_state);
2818 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002819
2820 spin_unlock_irqrestore(&smem_lock, flags);
2821
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002822 return 0;
2823}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002824EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002825
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002826uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002827{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002828 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002829
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002830 /* needs interface change to return error code */
2831 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2832 pr_err("smsm_change_state: Invalid entry %d",
2833 smsm_entry);
2834 return 0;
2835 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002836
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002837 if (!smsm_info.state) {
2838 pr_err("smsm_get_state <SM NO STATE>\n");
2839 } else {
2840 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2841 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002842
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002843 return rv;
2844}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002845EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002846
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002847/**
2848 * Performs SMSM callback client notifiction.
2849 */
2850void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002851{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002852 struct smsm_state_cb_info *cb_info;
2853 struct smsm_state_info *state_info;
2854 int n;
2855 uint32_t new_state;
2856 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002857 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002858 int ret;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002859 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002860
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002861 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002862 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002863
Eric Holmbergda31d042012-03-28 14:01:02 -06002864 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002865 mutex_lock(&smsm_lock);
2866 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2867 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002868
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002869 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2870 sizeof(new_state));
2871 if (ret != sizeof(new_state)) {
2872 pr_err("%s: snapshot underflow %d\n",
2873 __func__, ret);
2874 mutex_unlock(&smsm_lock);
2875 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002876 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002877
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002878 state_changes = state_info->last_value ^ new_state;
2879 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002880 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2881 n, state_info->last_value,
2882 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002883 list_for_each_entry(cb_info,
2884 &state_info->callbacks, cb_list) {
2885
2886 if (cb_info->mask & state_changes)
2887 cb_info->notify(cb_info->data,
2888 state_info->last_value,
2889 new_state);
2890 }
2891 state_info->last_value = new_state;
2892 }
2893 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002894
Eric Holmbergda31d042012-03-28 14:01:02 -06002895 /* read wakelock flag */
2896 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2897 sizeof(use_wakelock));
2898 if (ret != sizeof(use_wakelock)) {
2899 pr_err("%s: snapshot underflow %d\n",
2900 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002901 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002902 return;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002903 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002904 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002905
2906 if (use_wakelock) {
2907 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2908 if (smsm_snapshot_count) {
2909 --smsm_snapshot_count;
2910 if (smsm_snapshot_count == 0) {
2911 SMx_POWER_INFO("SMSM snapshot"
2912 " wake unlock\n");
2913 wake_unlock(&smsm_snapshot_wakelock);
2914 }
2915 } else {
2916 pr_err("%s: invalid snapshot count\n",
2917 __func__);
2918 }
2919 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2920 flags);
2921 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002922 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002923}
2924
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002925
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002926/**
2927 * Registers callback for SMSM state notifications when the specified
2928 * bits change.
2929 *
2930 * @smsm_entry Processor entry to deregister
2931 * @mask Bits to deregister (if result is 0, callback is removed)
2932 * @notify Notification function to deregister
2933 * @data Opaque data passed in to callback
2934 *
2935 * @returns Status code
2936 * <0 error code
2937 * 0 inserted new entry
2938 * 1 updated mask of existing entry
2939 */
2940int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2941 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002942{
Eric Holmberge8a39322012-04-03 15:14:02 -06002943 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002944 struct smsm_state_cb_info *cb_info;
2945 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06002946 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002947 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002948
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002949 if (smsm_entry >= SMSM_NUM_ENTRIES)
2950 return -EINVAL;
2951
Eric Holmbergc8002902011-09-16 13:55:57 -06002952 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002953
2954 if (!smsm_states) {
2955 /* smsm not yet initialized */
2956 ret = -ENODEV;
2957 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002958 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002959
Eric Holmberge8a39322012-04-03 15:14:02 -06002960 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002961 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06002962 &state->callbacks, cb_list) {
2963 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002964 (cb_info->data == data)) {
2965 cb_info->mask |= mask;
2966 cb_found = cb_info;
2967 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002968 }
Eric Holmberge8a39322012-04-03 15:14:02 -06002969 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002970 }
2971
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002972 if (!cb_found) {
2973 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
2974 GFP_ATOMIC);
2975 if (!cb_info) {
2976 ret = -ENOMEM;
2977 goto cleanup;
2978 }
2979
2980 cb_info->mask = mask;
2981 cb_info->notify = notify;
2982 cb_info->data = data;
2983 INIT_LIST_HEAD(&cb_info->cb_list);
2984 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06002985 &state->callbacks);
2986 new_mask |= mask;
2987 }
2988
2989 /* update interrupt notification mask */
2990 if (smsm_entry == SMSM_MODEM_STATE)
2991 new_mask |= LEGACY_MODEM_SMSM_MASK;
2992
2993 if (smsm_info.intr_mask) {
2994 unsigned long flags;
2995
2996 spin_lock_irqsave(&smem_lock, flags);
2997 new_mask = (new_mask & ~state->intr_mask_clear)
2998 | state->intr_mask_set;
2999 __raw_writel(new_mask,
3000 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3001 wmb();
3002 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003003 }
3004
3005cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06003006 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003007 return ret;
3008}
3009EXPORT_SYMBOL(smsm_state_cb_register);
3010
3011
3012/**
3013 * Deregisters for SMSM state notifications for the specified bits.
3014 *
3015 * @smsm_entry Processor entry to deregister
3016 * @mask Bits to deregister (if result is 0, callback is removed)
3017 * @notify Notification function to deregister
3018 * @data Opaque data passed in to callback
3019 *
3020 * @returns Status code
3021 * <0 error code
3022 * 0 not found
3023 * 1 updated mask
3024 * 2 removed callback
3025 */
3026int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
3027 void (*notify)(void *, uint32_t, uint32_t), void *data)
3028{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003029 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06003030 struct smsm_state_cb_info *cb_tmp;
3031 struct smsm_state_info *state;
3032 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003033 int ret = 0;
3034
3035 if (smsm_entry >= SMSM_NUM_ENTRIES)
3036 return -EINVAL;
3037
Eric Holmbergc8002902011-09-16 13:55:57 -06003038 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003039
3040 if (!smsm_states) {
3041 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003042 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003043 return -ENODEV;
3044 }
3045
Eric Holmberge8a39322012-04-03 15:14:02 -06003046 state = &smsm_states[smsm_entry];
3047 list_for_each_entry_safe(cb_info, cb_tmp,
3048 &state->callbacks, cb_list) {
3049 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003050 (cb_info->data == data)) {
3051 cb_info->mask &= ~mask;
3052 ret = 1;
3053 if (!cb_info->mask) {
3054 /* no mask bits set, remove callback */
3055 list_del(&cb_info->cb_list);
3056 kfree(cb_info);
3057 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003058 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003059 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003060 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003061 new_mask |= cb_info->mask;
3062 }
3063
3064 /* update interrupt notification mask */
3065 if (smsm_entry == SMSM_MODEM_STATE)
3066 new_mask |= LEGACY_MODEM_SMSM_MASK;
3067
3068 if (smsm_info.intr_mask) {
3069 unsigned long flags;
3070
3071 spin_lock_irqsave(&smem_lock, flags);
3072 new_mask = (new_mask & ~state->intr_mask_clear)
3073 | state->intr_mask_set;
3074 __raw_writel(new_mask,
3075 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3076 wmb();
3077 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003078 }
3079
Eric Holmbergc8002902011-09-16 13:55:57 -06003080 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003081 return ret;
3082}
3083EXPORT_SYMBOL(smsm_state_cb_deregister);
3084
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003085int smd_module_init_notifier_register(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003086{
3087 int ret;
3088 if (!nb)
3089 return -EINVAL;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003090 mutex_lock(&smd_module_init_notifier_lock);
3091 ret = raw_notifier_chain_register(&smd_module_init_notifier_list, nb);
3092 if (smd_module_inited)
3093 nb->notifier_call(nb, 0, NULL);
3094 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003095 return ret;
3096}
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003097EXPORT_SYMBOL(smd_module_init_notifier_register);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003098
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003099int smd_module_init_notifier_unregister(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003100{
3101 int ret;
3102 if (!nb)
3103 return -EINVAL;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003104 mutex_lock(&smd_module_init_notifier_lock);
3105 ret = raw_notifier_chain_unregister(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003106 nb);
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003107 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003108 return ret;
3109}
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003110EXPORT_SYMBOL(smd_module_init_notifier_unregister);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003111
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003112static void smd_module_init_notify(uint32_t state, void *data)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003113{
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003114 mutex_lock(&smd_module_init_notifier_lock);
3115 smd_module_inited = 1;
3116 raw_notifier_call_chain(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003117 state, data);
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003118 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003119}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003120
3121int smd_core_init(void)
3122{
3123 int r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003124 unsigned long flags = IRQF_TRIGGER_RISING;
3125 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003126
Brian Swetland37521a32009-07-01 18:30:47 -07003127 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003128 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003129 if (r < 0)
3130 return r;
3131 r = enable_irq_wake(INT_A9_M2A_0);
3132 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003133 pr_err("smd_core_init: "
3134 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003135
Eric Holmberg98c6c642012-02-24 11:29:35 -07003136 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003137 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003138 if (r < 0) {
3139 free_irq(INT_A9_M2A_0, 0);
3140 return r;
3141 }
3142 r = enable_irq_wake(INT_A9_M2A_5);
3143 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003144 pr_err("smd_core_init: "
3145 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003146
Brian Swetland37521a32009-07-01 18:30:47 -07003147#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003148#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3149 flags |= IRQF_SHARED;
3150#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003151 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003152 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003153 if (r < 0) {
3154 free_irq(INT_A9_M2A_0, 0);
3155 free_irq(INT_A9_M2A_5, 0);
3156 return r;
3157 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003158
Eric Holmberg98c6c642012-02-24 11:29:35 -07003159 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3160 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003161 if (r < 0) {
3162 free_irq(INT_A9_M2A_0, 0);
3163 free_irq(INT_A9_M2A_5, 0);
3164 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3165 return r;
3166 }
3167
3168 r = enable_irq_wake(INT_ADSP_A11);
3169 if (r < 0)
3170 pr_err("smd_core_init: "
3171 "enable_irq_wake failed for INT_ADSP_A11\n");
3172
3173#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3174 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3175 if (r < 0)
3176 pr_err("smd_core_init: enable_irq_wake "
3177 "failed for INT_ADSP_A11_SMSM\n");
3178#endif
3179 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003180#endif
3181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003182#if defined(CONFIG_DSPS)
3183 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3184 flags, "smd_dev", smd_dsps_irq_handler);
3185 if (r < 0) {
3186 free_irq(INT_A9_M2A_0, 0);
3187 free_irq(INT_A9_M2A_5, 0);
3188 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003189 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003190 return r;
3191 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003192
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003193 r = enable_irq_wake(INT_DSPS_A11);
3194 if (r < 0)
3195 pr_err("smd_core_init: "
3196 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003197#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003199#if defined(CONFIG_WCNSS)
3200 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3201 flags, "smd_dev", smd_wcnss_irq_handler);
3202 if (r < 0) {
3203 free_irq(INT_A9_M2A_0, 0);
3204 free_irq(INT_A9_M2A_5, 0);
3205 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003206 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003207 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3208 return r;
3209 }
3210
3211 r = enable_irq_wake(INT_WCNSS_A11);
3212 if (r < 0)
3213 pr_err("smd_core_init: "
3214 "enable_irq_wake failed for INT_WCNSS_A11\n");
3215
Eric Holmberg98c6c642012-02-24 11:29:35 -07003216 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3217 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003218 if (r < 0) {
3219 free_irq(INT_A9_M2A_0, 0);
3220 free_irq(INT_A9_M2A_5, 0);
3221 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003222 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003223 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3224 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3225 return r;
3226 }
3227
3228 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3229 if (r < 0)
3230 pr_err("smd_core_init: "
3231 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3232#endif
3233
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003234#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003235 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3236 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003237 if (r < 0) {
3238 free_irq(INT_A9_M2A_0, 0);
3239 free_irq(INT_A9_M2A_5, 0);
3240 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003241 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003242 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3243 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003244 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003245 return r;
3246 }
3247
3248 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3249 if (r < 0)
3250 pr_err("smd_core_init: "
3251 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3252#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003253 SMD_INFO("smd_core_init() done\n");
3254
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003255 return 0;
3256}
3257
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303258static int intr_init(struct interrupt_config_item *private_irq,
3259 struct smd_irq_config *platform_irq,
3260 struct platform_device *pdev
3261 )
3262{
3263 int irq_id;
3264 int ret;
3265 int ret_wake;
3266
3267 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3268 private_irq->out_offset = platform_irq->out_offset;
3269 private_irq->out_base = platform_irq->out_base;
3270
3271 irq_id = platform_get_irq_byname(
3272 pdev,
3273 platform_irq->irq_name
3274 );
3275 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3276 platform_irq->irq_name, irq_id);
3277 ret = request_irq(irq_id,
3278 private_irq->irq_handler,
3279 platform_irq->flags,
3280 platform_irq->device_name,
3281 (void *)platform_irq->dev_id
3282 );
3283 if (ret < 0) {
3284 platform_irq->irq_id = ret;
Eric Holmbergdeace152012-07-25 12:17:11 -06003285 private_irq->irq_id = ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303286 } else {
3287 platform_irq->irq_id = irq_id;
Eric Holmbergdeace152012-07-25 12:17:11 -06003288 private_irq->irq_id = irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303289 ret_wake = enable_irq_wake(irq_id);
3290 if (ret_wake < 0) {
3291 pr_err("smd: enable_irq_wake failed on %s",
3292 platform_irq->irq_name);
3293 }
3294 }
3295
3296 return ret;
3297}
3298
Jeff Hugobdc734d2012-03-26 16:05:39 -06003299int sort_cmp_func(const void *a, const void *b)
3300{
3301 struct smem_area *left = (struct smem_area *)(a);
3302 struct smem_area *right = (struct smem_area *)(b);
3303
3304 return left->phys_addr - right->phys_addr;
3305}
3306
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303307int smd_core_platform_init(struct platform_device *pdev)
3308{
3309 int i;
3310 int ret;
3311 uint32_t num_ss;
3312 struct smd_platform *smd_platform_data;
3313 struct smd_subsystem_config *smd_ss_config_list;
3314 struct smd_subsystem_config *cfg;
3315 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003316 struct smd_smem_regions *smd_smem_areas;
3317 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303318
3319 smd_platform_data = pdev->dev.platform_data;
3320 num_ss = smd_platform_data->num_ss_configs;
3321 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3322
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003323 if (smd_platform_data->smd_ssr_config)
3324 disable_smsm_reset_handshake = smd_platform_data->
3325 smd_ssr_config->disable_smsm_reset_handshake;
3326
Jeff Hugobdc734d2012-03-26 16:05:39 -06003327 smd_smem_areas = smd_platform_data->smd_smem_areas;
3328 if (smd_smem_areas) {
3329 num_smem_areas = smd_platform_data->num_smem_areas;
3330 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3331 GFP_KERNEL);
3332 if (!smem_areas) {
3333 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3334 err_ret = -ENOMEM;
3335 goto smem_areas_alloc_fail;
3336 }
3337
3338 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3339 smem_areas[smem_idx].phys_addr =
3340 smd_smem_areas[smem_idx].phys_addr;
3341 smem_areas[smem_idx].size =
3342 smd_smem_areas[smem_idx].size;
3343 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3344 (unsigned long)(smem_areas[smem_idx].phys_addr),
3345 smem_areas[smem_idx].size);
3346 if (!smem_areas[smem_idx].virt_addr) {
3347 pr_err("%s: ioremap_nocache() of addr:%p"
3348 " size: %x\n", __func__,
3349 smem_areas[smem_idx].phys_addr,
3350 smem_areas[smem_idx].size);
3351 err_ret = -ENOMEM;
3352 ++smem_idx;
3353 goto smem_failed;
3354 }
3355 }
3356 sort(smem_areas, num_smem_areas,
3357 sizeof(struct smem_area),
3358 sort_cmp_func, NULL);
3359 }
3360
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303361 for (i = 0; i < num_ss; i++) {
3362 cfg = &smd_ss_config_list[i];
3363
3364 ret = intr_init(
3365 &private_intr_config[cfg->irq_config_id].smd,
3366 &cfg->smd_int,
3367 pdev
3368 );
3369
3370 if (ret < 0) {
3371 err_ret = ret;
3372 pr_err("smd: register irq failed on %s\n",
3373 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003374 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303375 }
3376
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003377 /* only init smsm structs if this edge supports smsm */
3378 if (cfg->smsm_int.irq_id)
3379 ret = intr_init(
3380 &private_intr_config[cfg->irq_config_id].smsm,
3381 &cfg->smsm_int,
3382 pdev
3383 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303384
3385 if (ret < 0) {
3386 err_ret = ret;
3387 pr_err("smd: register irq failed on %s\n",
3388 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003389 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303390 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003391
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003392 if (cfg->subsys_name)
3393 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003394 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303395 }
3396
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303397
3398 SMD_INFO("smd_core_platform_init() done\n");
3399 return 0;
3400
Jeff Hugobdc734d2012-03-26 16:05:39 -06003401intr_failed:
3402 pr_err("smd: deregistering IRQs\n");
3403 for (i = 0; i < num_ss; ++i) {
3404 cfg = &smd_ss_config_list[i];
3405
3406 if (cfg->smd_int.irq_id >= 0)
3407 free_irq(cfg->smd_int.irq_id,
3408 (void *)cfg->smd_int.dev_id
3409 );
3410 if (cfg->smsm_int.irq_id >= 0)
3411 free_irq(cfg->smsm_int.irq_id,
3412 (void *)cfg->smsm_int.dev_id
3413 );
3414 }
3415smem_failed:
3416 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3417 iounmap(smem_areas[smem_idx].virt_addr);
3418 kfree(smem_areas);
3419smem_areas_alloc_fail:
3420 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303421}
3422
Gregory Bean4416e9e2010-07-28 10:22:12 -07003423static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003424{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303425 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003426
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303427 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003428 INIT_WORK(&probe_work, smd_channel_probe_worker);
3429
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003430 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3431 if (IS_ERR(channel_close_wq)) {
3432 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3433 return -ENOMEM;
3434 }
3435
3436 if (smsm_init()) {
3437 pr_err("smsm_init() failed\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003438 return -1;
3439 }
3440
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303441 if (pdev) {
3442 if (pdev->dev.of_node) {
3443 pr_err("SMD: Device tree not currently supported\n");
3444 return -ENODEV;
3445 } else if (pdev->dev.platform_data) {
3446 ret = smd_core_platform_init(pdev);
3447 if (ret) {
3448 pr_err(
3449 "SMD: smd_core_platform_init() failed\n");
3450 return -ENODEV;
3451 }
3452 } else {
3453 ret = smd_core_init();
3454 if (ret) {
3455 pr_err("smd_core_init() failed\n");
3456 return -ENODEV;
3457 }
3458 }
3459 } else {
3460 pr_err("SMD: PDEV not found\n");
3461 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003462 }
Iliyan Malchev1207bab2009-11-15 18:16:43 -08003463
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003464 smd_initialized = 1;
3465
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003466 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003467 smsm_irq_handler(0, 0);
3468 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003469
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003470 return 0;
3471}
3472
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003473static int restart_notifier_cb(struct notifier_block *this,
3474 unsigned long code,
3475 void *data);
3476
3477static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003478 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3479 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
3480 {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
3481 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003482 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003483};
3484
3485static int restart_notifier_cb(struct notifier_block *this,
3486 unsigned long code,
3487 void *data)
3488{
3489 if (code == SUBSYS_AFTER_SHUTDOWN) {
3490 struct restart_notifier_block *notifier;
3491
3492 notifier = container_of(this,
3493 struct restart_notifier_block, nb);
3494 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3495 __func__, notifier->processor,
3496 notifier->name);
3497
3498 smd_channel_reset(notifier->processor);
3499 }
3500
3501 return NOTIFY_DONE;
3502}
3503
3504static __init int modem_restart_late_init(void)
3505{
3506 int i;
3507 void *handle;
3508 struct restart_notifier_block *nb;
3509
3510 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3511 nb = &restart_notifiers[i];
3512 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3513 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3514 __func__, nb->name, handle);
3515 }
3516 return 0;
3517}
3518late_initcall(modem_restart_late_init);
3519
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003520static struct platform_driver msm_smd_driver = {
3521 .probe = msm_smd_probe,
3522 .driver = {
3523 .name = MODULE_NAME,
3524 .owner = THIS_MODULE,
3525 },
3526};
3527
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003528int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003529{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003530 static bool registered;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003531 int rc;
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003532
3533 if (registered)
3534 return 0;
3535
3536 registered = true;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003537 rc = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
3538 if (rc) {
3539 pr_err("%s: remote spinlock init failed %d\n", __func__, rc);
3540 return rc;
3541 }
3542 spinlocks_initialized = 1;
3543
3544 rc = platform_driver_register(&msm_smd_driver);
3545 if (rc) {
3546 pr_err("%s: msm_smd_driver register failed %d\n",
3547 __func__, rc);
3548 return rc;
3549 }
3550
3551 smd_module_init_notify(0, NULL);
3552
3553 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003554}
3555
3556module_init(msm_smd_init);
3557
3558MODULE_DESCRIPTION("MSM Shared Memory Core");
3559MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3560MODULE_LICENSE("GPL");