blob: 40ef20e98f95625c2e08ce628579a7b37064df82 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmberg6275b602012-11-19 13:05:04 -07004 * Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f9412012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Eric Holmberg144c2de2012-10-04 13:37:28 -060038#include <linux/suspend.h>
Jeff Hugo412356e2012-09-27 17:14:23 -060039#include <linux/of.h>
40#include <linux/of_irq.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070041#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070043#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053045#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070046#include <mach/proc_comm.h>
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +053047#include <mach/msm_ipc_logging.h>
Ram Somani8b9589f2012-04-03 12:07:18 +053048#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070049
50#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051#include "modem_notifier.h"
Eric Holmbergcfbc1d52013-03-13 18:30:19 -060052#include "ramdump.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060055 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060056 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070057#define CONFIG_QDSP6 1
58#endif
59
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060060#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
61 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#define CONFIG_DSPS 1
63#endif
64
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060065#if defined(CONFIG_ARCH_MSM8960) \
66 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060068#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070070
71#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072#define SMEM_VERSION 0x000B
73#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070074#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060075#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Eric Holmberge5266d32013-02-25 18:29:27 -070076#define RSPIN_INIT_WAIT_MS 1000
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070077
78uint32_t SMSM_NUM_ENTRIES = 8;
79uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070080
Eric Holmberge8a39322012-04-03 15:14:02 -060081/* Legacy SMSM interrupt notifications */
82#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
83 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070084
85enum {
86 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087 MSM_SMSM_DEBUG = 1U << 1,
88 MSM_SMD_INFO = 1U << 2,
89 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070090 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091};
92
93struct smsm_shared_info {
94 uint32_t *state;
95 uint32_t *intr_mask;
96 uint32_t *intr_mux;
97};
98
99static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f9412012-03-19 10:04:22 -0600100static struct kfifo smsm_snapshot_fifo;
101static struct wake_lock smsm_snapshot_wakelock;
102static int smsm_snapshot_count;
103static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104
105struct smsm_size_info_type {
106 uint32_t num_hosts;
107 uint32_t num_entries;
108 uint32_t reserved0;
109 uint32_t reserved1;
110};
111
112struct smsm_state_cb_info {
113 struct list_head cb_list;
114 uint32_t mask;
115 void *data;
116 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
117};
118
119struct smsm_state_info {
120 struct list_head callbacks;
121 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600122 uint32_t intr_mask_set;
123 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124};
125
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530126struct interrupt_config_item {
127 /* must be initialized */
128 irqreturn_t (*irq_handler)(int req, void *data);
129 /* outgoing interrupt config (set from platform data) */
130 uint32_t out_bit_pos;
131 void __iomem *out_base;
132 uint32_t out_offset;
Eric Holmbergdeace152012-07-25 12:17:11 -0600133 int irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530134};
135
136struct interrupt_config {
137 struct interrupt_config_item smd;
138 struct interrupt_config_item smsm;
139};
140
141static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700142static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530143static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700144static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530145static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700146static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530147static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700148static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600149static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530150static irqreturn_t smsm_irq_handler(int irq, void *data);
151
152static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
153 [SMD_MODEM] = {
154 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700155 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530156 },
157 [SMD_Q6] = {
158 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700159 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530160 },
161 [SMD_DSPS] = {
162 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700163 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530164 },
165 [SMD_WCNSS] = {
166 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700167 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530168 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600169 [SMD_RPM] = {
170 .smd.irq_handler = smd_rpm_irq_handler,
171 .smsm.irq_handler = NULL, /* does not support smsm */
172 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530173};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600174
175struct smem_area {
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -0800176 phys_addr_t phys_addr;
177 resource_size_t size;
Jeff Hugobdc734d2012-03-26 16:05:39 -0600178 void __iomem *virt_addr;
179};
180static uint32_t num_smem_areas;
181static struct smem_area *smem_areas;
Eric Holmbergcfbc1d52013-03-13 18:30:19 -0600182static struct ramdump_segment *smem_ramdump_segments;
183static void *smem_ramdump_dev;
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -0800184static void *smem_range_check(phys_addr_t base, unsigned offset);
Eric Holmbergcfbc1d52013-03-13 18:30:19 -0600185static void *smd_dev;
Jeff Hugobdc734d2012-03-26 16:05:39 -0600186
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700187struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530188
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
190#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
191 entry * SMSM_NUM_HOSTS + host)
192#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
193
194/* Internal definitions which are not exported in some targets */
195enum {
196 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700197};
198
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530199static int msm_smd_debug_mask = MSM_SMx_POWER_INFO;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700200module_param_named(debug_mask, msm_smd_debug_mask,
201 int, S_IRUGO | S_IWUSR | S_IWGRP);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530202static void *smd_log_ctx;
203#define NUM_LOG_PAGES 4
204
205#define IPC_LOG(level, x...) do { \
206 if (smd_log_ctx) \
207 ipc_log_string(smd_log_ctx, x); \
208 else \
209 printk(level x); \
210 } while (0)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700211
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700212#if defined(CONFIG_MSM_SMD_DEBUG)
213#define SMD_DBG(x...) do { \
214 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530215 IPC_LOG(KERN_DEBUG, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216 } while (0)
217
218#define SMSM_DBG(x...) do { \
219 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530220 IPC_LOG(KERN_DEBUG, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221 } while (0)
222
223#define SMD_INFO(x...) do { \
224 if (msm_smd_debug_mask & MSM_SMD_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530225 IPC_LOG(KERN_INFO, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226 } while (0)
227
228#define SMSM_INFO(x...) do { \
229 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530230 IPC_LOG(KERN_INFO, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700232#define SMx_POWER_INFO(x...) do { \
233 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530234 IPC_LOG(KERN_INFO, x); \
Eric Holmberg98c6c642012-02-24 11:29:35 -0700235 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236#else
237#define SMD_DBG(x...) do { } while (0)
238#define SMSM_DBG(x...) do { } while (0)
239#define SMD_INFO(x...) do { } while (0)
240#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700241#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242#endif
243
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700244static unsigned last_heap_free = 0xffffffff;
245
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700246static inline void smd_write_intr(unsigned int val,
247 const void __iomem *addr);
248
249#if defined(CONFIG_ARCH_MSM7X30)
250#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530251 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530253 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530255 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530257 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600259#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260#define MSM_TRIG_A2WCNSS_SMD_INT
261#define MSM_TRIG_A2WCNSS_SMSM_INT
262#elif defined(CONFIG_ARCH_MSM8X60)
263#define MSM_TRIG_A2M_SMD_INT \
264 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
265#define MSM_TRIG_A2Q6_SMD_INT \
266 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
267#define MSM_TRIG_A2M_SMSM_INT \
268 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
269#define MSM_TRIG_A2Q6_SMSM_INT \
270 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
271#define MSM_TRIG_A2DSPS_SMD_INT \
272 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600273#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274#define MSM_TRIG_A2WCNSS_SMD_INT
275#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600276#elif defined(CONFIG_ARCH_MSM9615)
277#define MSM_TRIG_A2M_SMD_INT \
278 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
279#define MSM_TRIG_A2Q6_SMD_INT \
280 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
281#define MSM_TRIG_A2M_SMSM_INT \
282 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
283#define MSM_TRIG_A2Q6_SMSM_INT \
284 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
285#define MSM_TRIG_A2DSPS_SMD_INT
286#define MSM_TRIG_A2DSPS_SMSM_INT
287#define MSM_TRIG_A2WCNSS_SMD_INT
288#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289#elif defined(CONFIG_ARCH_FSM9XXX)
290#define MSM_TRIG_A2Q6_SMD_INT \
291 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
292#define MSM_TRIG_A2Q6_SMSM_INT \
293 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
294#define MSM_TRIG_A2M_SMD_INT \
295 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
296#define MSM_TRIG_A2M_SMSM_INT \
297 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
298#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600299#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300#define MSM_TRIG_A2WCNSS_SMD_INT
301#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700302#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303#define MSM_TRIG_A2M_SMD_INT \
304 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700305#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306#define MSM_TRIG_A2M_SMSM_INT \
307 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700308#define MSM_TRIG_A2Q6_SMSM_INT
309#define MSM_TRIG_A2DSPS_SMD_INT
310#define MSM_TRIG_A2DSPS_SMSM_INT
311#define MSM_TRIG_A2WCNSS_SMD_INT
312#define MSM_TRIG_A2WCNSS_SMSM_INT
313#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
314#define MSM_TRIG_A2M_SMD_INT \
315 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
316#define MSM_TRIG_A2Q6_SMD_INT
317#define MSM_TRIG_A2M_SMSM_INT \
318 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
319#define MSM_TRIG_A2Q6_SMSM_INT
320#define MSM_TRIG_A2DSPS_SMD_INT
321#define MSM_TRIG_A2DSPS_SMSM_INT
322#define MSM_TRIG_A2WCNSS_SMD_INT
323#define MSM_TRIG_A2WCNSS_SMSM_INT
324#else /* use platform device / device tree configuration */
325#define MSM_TRIG_A2M_SMD_INT
326#define MSM_TRIG_A2Q6_SMD_INT
327#define MSM_TRIG_A2M_SMSM_INT
328#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700329#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600330#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331#define MSM_TRIG_A2WCNSS_SMD_INT
332#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700333#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334
Jeff Hugoee40b152012-02-09 17:39:47 -0700335/*
336 * stub out legacy macros if they are not being used so that the legacy
337 * code compiles even though it is not used
338 *
339 * these definitions should not be used in active code and will cause
340 * an early failure
341 */
342#ifndef INT_A9_M2A_0
343#define INT_A9_M2A_0 -1
344#endif
345#ifndef INT_A9_M2A_5
346#define INT_A9_M2A_5 -1
347#endif
348#ifndef INT_ADSP_A11
349#define INT_ADSP_A11 -1
350#endif
351#ifndef INT_ADSP_A11_SMSM
352#define INT_ADSP_A11_SMSM -1
353#endif
354#ifndef INT_DSPS_A11
355#define INT_DSPS_A11 -1
356#endif
357#ifndef INT_DSPS_A11_SMSM
358#define INT_DSPS_A11_SMSM -1
359#endif
360#ifndef INT_WCNSS_A11
361#define INT_WCNSS_A11 -1
362#endif
363#ifndef INT_WCNSS_A11_SMSM
364#define INT_WCNSS_A11_SMSM -1
365#endif
366
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367#define SMD_LOOPBACK_CID 100
368
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600369#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
370static remote_spinlock_t remote_spinlock;
371
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600374static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600376static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377static void notify_smsm_cb_clients_worker(struct work_struct *work);
378static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600379static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700380static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530381static int spinlocks_initialized;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -0600382
383/**
384 * Variables to indicate smd module initialization.
385 * Dependents to register for smd module init notifier.
386 */
387static int smd_module_inited;
388static RAW_NOTIFIER_HEAD(smd_module_init_notifier_list);
389static DEFINE_MUTEX(smd_module_init_notifier_lock);
390static void smd_module_init_notify(uint32_t state, void *data);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530391static int smd_stream_write_avail(struct smd_channel *ch);
392static int smd_stream_read_avail(struct smd_channel *ch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393
394static inline void smd_write_intr(unsigned int val,
395 const void __iomem *addr)
396{
397 wmb();
398 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700399}
400
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530401static inline void log_notify(uint32_t subsystem, smd_channel_t *ch)
402{
403 const char *subsys = smd_edge_to_subsystem(subsystem);
404
Jay Chokshi83b4f6132013-02-14 16:20:56 -0800405 (void) subsys;
406
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530407 if (!ch)
408 SMx_POWER_INFO("Apps->%s\n", subsys);
409 else
410 SMx_POWER_INFO(
411 "Apps->%s ch%d '%s': tx%d/rx%d %dr/%dw : %dr/%dw\n",
412 subsys, ch->n, ch->name,
413 ch->fifo_size -
414 (smd_stream_write_avail(ch) + 1),
415 smd_stream_read_avail(ch),
416 ch->half_ch->get_tail(ch->send),
417 ch->half_ch->get_head(ch->send),
418 ch->half_ch->get_tail(ch->recv),
419 ch->half_ch->get_head(ch->recv)
420 );
421}
422
423static inline void notify_modem_smd(smd_channel_t *ch)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700424{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530425 static const struct interrupt_config_item *intr
426 = &private_intr_config[SMD_MODEM].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530427
428 log_notify(SMD_APPS_MODEM, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700429 if (intr->out_base) {
430 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530431 smd_write_intr(intr->out_bit_pos,
432 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700433 } else {
434 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530435 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700436 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700437}
438
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530439static inline void notify_dsp_smd(smd_channel_t *ch)
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700440{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530441 static const struct interrupt_config_item *intr
442 = &private_intr_config[SMD_Q6].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530443
444 log_notify(SMD_APPS_QDSP, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700445 if (intr->out_base) {
446 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530447 smd_write_intr(intr->out_bit_pos,
448 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700449 } else {
450 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530451 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700452 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700453}
454
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530455static inline void notify_dsps_smd(smd_channel_t *ch)
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530456{
457 static const struct interrupt_config_item *intr
458 = &private_intr_config[SMD_DSPS].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530459
460 log_notify(SMD_APPS_DSPS, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700461 if (intr->out_base) {
462 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530463 smd_write_intr(intr->out_bit_pos,
464 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700465 } else {
466 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530467 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700468 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530469}
470
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530471static inline void notify_wcnss_smd(struct smd_channel *ch)
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530472{
473 static const struct interrupt_config_item *intr
474 = &private_intr_config[SMD_WCNSS].smd;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530475
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530476 log_notify(SMD_APPS_WCNSS, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700477 if (intr->out_base) {
478 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530479 smd_write_intr(intr->out_bit_pos,
480 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700481 } else {
482 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530483 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700484 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530485}
486
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530487static inline void notify_rpm_smd(smd_channel_t *ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600488{
489 static const struct interrupt_config_item *intr
490 = &private_intr_config[SMD_RPM].smd;
491
492 if (intr->out_base) {
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530493 log_notify(SMD_APPS_RPM, ch);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600494 ++interrupt_stats[SMD_RPM].smd_out_config_count;
495 smd_write_intr(intr->out_bit_pos,
496 intr->out_base + intr->out_offset);
497 }
498}
499
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530500static inline void notify_modem_smsm(void)
501{
502 static const struct interrupt_config_item *intr
503 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700504 if (intr->out_base) {
505 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530506 smd_write_intr(intr->out_bit_pos,
507 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700508 } else {
509 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530510 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700511 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530512}
513
514static inline void notify_dsp_smsm(void)
515{
516 static const struct interrupt_config_item *intr
517 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700518 if (intr->out_base) {
519 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530520 smd_write_intr(intr->out_bit_pos,
521 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700522 } else {
523 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530524 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700525 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530526}
527
528static inline void notify_dsps_smsm(void)
529{
530 static const struct interrupt_config_item *intr
531 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700532 if (intr->out_base) {
533 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530534 smd_write_intr(intr->out_bit_pos,
535 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700536 } else {
537 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530538 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700539 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530540}
541
542static inline void notify_wcnss_smsm(void)
543{
544 static const struct interrupt_config_item *intr
545 = &private_intr_config[SMD_WCNSS].smsm;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530546
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700547 if (intr->out_base) {
548 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530549 smd_write_intr(intr->out_bit_pos,
550 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700551 } else {
552 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530553 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700554 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530555}
556
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
558{
559 /* older protocol don't use smsm_intr_mask,
560 but still communicates with modem */
561 if (!smsm_info.intr_mask ||
562 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
563 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530564 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565
566 if (smsm_info.intr_mask &&
567 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
568 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700569 uint32_t mux_val;
570
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600571 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 mux_val = __raw_readl(
573 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
574 mux_val++;
575 __raw_writel(mux_val,
576 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
577 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530578 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 }
580
581 if (smsm_info.intr_mask &&
582 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
583 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530584 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585 }
586
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600587 if (smsm_info.intr_mask &&
588 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
589 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530590 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600591 }
592
Eric Holmbergda31d042012-03-28 14:01:02 -0600593 /*
594 * Notify local SMSM callback clients without wakelock since this
595 * code is used by power management during power-down/-up sequencing
596 * on DEM-based targets. Grabbing a wakelock in this case will
597 * abort the power-down sequencing.
598 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600599 if (smsm_info.intr_mask &&
600 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
601 & notify_mask)) {
602 smsm_cb_snapshot(0);
603 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700604}
605
Eric Holmberg144c2de2012-10-04 13:37:28 -0600606static int smsm_pm_notifier(struct notifier_block *nb,
607 unsigned long event, void *unused)
608{
609 switch (event) {
610 case PM_SUSPEND_PREPARE:
611 smsm_change_state(SMSM_APPS_STATE, SMSM_PROC_AWAKE, 0);
612 break;
613
614 case PM_POST_SUSPEND:
615 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_PROC_AWAKE);
616 break;
617 }
618 return NOTIFY_DONE;
619}
620
621static struct notifier_block smsm_pm_nb = {
622 .notifier_call = smsm_pm_notifier,
623 .priority = 0,
624};
625
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700627{
628 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700630
631 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
632 if (x != 0) {
633 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700634 SMD_INFO("smem: DIAG '%s'\n", x);
635 }
636
637 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
638 if (x != 0) {
639 x[size - 1] = 0;
640 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700641 }
642}
643
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700645static void handle_modem_crash(void)
646{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700648 smd_diag();
649
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650 /* hard reboot if possible FIXME
651 if (msm_reset_hook)
652 msm_reset_hook();
653 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700654
655 /* in this case the modem or watchdog should reboot us */
656 for (;;)
657 ;
658}
659
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700660int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700661{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700662 /* if the modem's not ready yet, we have to hope for the best */
663 if (!smsm_info.state)
664 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700665
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700667 handle_modem_crash();
668 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700669 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700670 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700671}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700673
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700674/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700675 * irq handler and code that mutates the channel
676 * list or fiddles with channel state
677 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700679DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700680
681/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700682 * operations to avoid races while creating or
683 * destroying smd_channel structures
684 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700685static DEFINE_MUTEX(smd_creation_mutex);
686
687static int smd_initialized;
688
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689struct smd_shared_v1 {
690 struct smd_half_channel ch0;
691 unsigned char data0[SMD_BUF_SIZE];
692 struct smd_half_channel ch1;
693 unsigned char data1[SMD_BUF_SIZE];
694};
695
696struct smd_shared_v2 {
697 struct smd_half_channel ch0;
698 struct smd_half_channel ch1;
699};
700
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600701struct smd_shared_v2_word_access {
702 struct smd_half_channel_word_access ch0;
703 struct smd_half_channel_word_access ch1;
704};
705
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706struct edge_to_pid {
707 uint32_t local_pid;
708 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700709 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710};
711
712/**
713 * Maps edge type to local and remote processor ID's.
714 */
715static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700716 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
Stephen Boyd77db8bb2012-06-27 15:15:16 -0700717 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "adsp"},
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700718 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
719 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
720 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
721 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
722 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
723 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
724 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
725 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
726 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
727 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
728 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
729 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
730 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600731 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
732 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
733 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
734 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700735};
736
737struct restart_notifier_block {
738 unsigned processor;
739 char *name;
740 struct notifier_block nb;
741};
742
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600743static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
745
746static LIST_HEAD(smd_ch_closed_list);
747static LIST_HEAD(smd_ch_closing_list);
748static LIST_HEAD(smd_ch_to_close_list);
749static LIST_HEAD(smd_ch_list_modem);
750static LIST_HEAD(smd_ch_list_dsp);
751static LIST_HEAD(smd_ch_list_dsps);
752static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600753static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700754
755static unsigned char smd_ch_allocated[64];
756static struct work_struct probe_work;
757
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700758static void finalize_channel_close_fn(struct work_struct *work);
759static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
760static struct workqueue_struct *channel_close_wq;
761
762static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
763
764/* on smp systems, the probe might get called from multiple cores,
765 hence use a lock */
766static DEFINE_MUTEX(smd_probe_lock);
767
768static void smd_channel_probe_worker(struct work_struct *work)
769{
770 struct smd_alloc_elm *shared;
771 unsigned n;
772 uint32_t type;
773
774 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
775
776 if (!shared) {
777 pr_err("%s: allocation table not initialized\n", __func__);
778 return;
779 }
780
781 mutex_lock(&smd_probe_lock);
782 for (n = 0; n < 64; n++) {
783 if (smd_ch_allocated[n])
784 continue;
785
786 /* channel should be allocated only if APPS
787 processor is involved */
788 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600789 if (type >= ARRAY_SIZE(edge_to_pids) ||
790 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700791 continue;
792 if (!shared[n].ref_count)
793 continue;
794 if (!shared[n].name[0])
795 continue;
796
797 if (!smd_alloc_channel(&shared[n]))
798 smd_ch_allocated[n] = 1;
799 else
800 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
801 }
802 mutex_unlock(&smd_probe_lock);
803}
804
805/**
806 * Lookup processor ID and determine if it belongs to the proved edge
807 * type.
808 *
809 * @shared2: Pointer to v2 shared channel structure
810 * @type: Edge type
811 * @pid: Processor ID of processor on edge
812 * @local_ch: Channel that belongs to processor @pid
813 * @remote_ch: Other side of edge contained @pid
Jeff Hugo00be6282012-09-07 11:24:32 -0600814 * @is_word_access_ch: Bool, is this a word aligned access channel
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700815 *
816 * Returns 0 for not on edge, 1 for found on edge
817 */
Jeff Hugo00be6282012-09-07 11:24:32 -0600818static int pid_is_on_edge(void *shared2,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 uint32_t type, uint32_t pid,
Jeff Hugo00be6282012-09-07 11:24:32 -0600820 void **local_ch,
821 void **remote_ch,
822 int is_word_access_ch
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823 )
824{
825 int ret = 0;
826 struct edge_to_pid *edge;
Jeff Hugo00be6282012-09-07 11:24:32 -0600827 void *ch0;
828 void *ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829
830 *local_ch = 0;
831 *remote_ch = 0;
832
833 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
834 return 0;
835
Jeff Hugo00be6282012-09-07 11:24:32 -0600836 if (is_word_access_ch) {
837 ch0 = &((struct smd_shared_v2_word_access *)(shared2))->ch0;
838 ch1 = &((struct smd_shared_v2_word_access *)(shared2))->ch1;
839 } else {
840 ch0 = &((struct smd_shared_v2 *)(shared2))->ch0;
841 ch1 = &((struct smd_shared_v2 *)(shared2))->ch1;
842 }
843
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 edge = &edge_to_pids[type];
845 if (edge->local_pid != edge->remote_pid) {
846 if (pid == edge->local_pid) {
Jeff Hugo00be6282012-09-07 11:24:32 -0600847 *local_ch = ch0;
848 *remote_ch = ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700849 ret = 1;
850 } else if (pid == edge->remote_pid) {
Jeff Hugo00be6282012-09-07 11:24:32 -0600851 *local_ch = ch1;
852 *remote_ch = ch0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700853 ret = 1;
854 }
855 }
856
857 return ret;
858}
859
Eric Holmberg17992c12012-02-29 12:54:44 -0700860/*
861 * Returns a pointer to the subsystem name or NULL if no
862 * subsystem name is available.
863 *
864 * @type - Edge definition
865 */
866const char *smd_edge_to_subsystem(uint32_t type)
867{
868 const char *subsys = NULL;
869
870 if (type < ARRAY_SIZE(edge_to_pids)) {
871 subsys = edge_to_pids[type].subsys_name;
872 if (subsys[0] == 0x0)
873 subsys = NULL;
874 }
875 return subsys;
876}
877EXPORT_SYMBOL(smd_edge_to_subsystem);
878
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700879/*
880 * Returns a pointer to the subsystem name given the
881 * remote processor ID.
Arun Kumar Neelakantama35c7a72012-10-18 15:45:15 +0530882 * subsystem is not necessarily PIL-loadable
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700883 *
884 * @pid Remote processor ID
885 * @returns Pointer to subsystem name or NULL if not found
886 */
887const char *smd_pid_to_subsystem(uint32_t pid)
888{
889 const char *subsys = NULL;
890 int i;
891
892 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
Arun Kumar Neelakantama35c7a72012-10-18 15:45:15 +0530893 if (pid == edge_to_pids[i].remote_pid) {
894 if (edge_to_pids[i].subsys_name[0] != 0x0) {
895 subsys = edge_to_pids[i].subsys_name;
896 break;
897 } else if (pid == SMD_RPM) {
898 subsys = "rpm";
899 break;
900 }
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700901 }
902 }
903
904 return subsys;
905}
906EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700907
Jeff Hugo00be6282012-09-07 11:24:32 -0600908static void smd_reset_edge(void *void_ch, unsigned new_state,
909 int is_word_access_ch)
Eric Holmberg2a563c32011-10-05 14:51:43 -0600910{
Jeff Hugo00be6282012-09-07 11:24:32 -0600911 if (is_word_access_ch) {
912 struct smd_half_channel_word_access *ch =
913 (struct smd_half_channel_word_access *)(void_ch);
914 if (ch->state != SMD_SS_CLOSED) {
915 ch->state = new_state;
916 ch->fDSR = 0;
917 ch->fCTS = 0;
918 ch->fCD = 0;
919 ch->fSTATE = 1;
920 }
921 } else {
922 struct smd_half_channel *ch =
923 (struct smd_half_channel *)(void_ch);
924 if (ch->state != SMD_SS_CLOSED) {
925 ch->state = new_state;
926 ch->fDSR = 0;
927 ch->fCTS = 0;
928 ch->fCD = 0;
929 ch->fSTATE = 1;
930 }
Eric Holmberg2a563c32011-10-05 14:51:43 -0600931 }
932}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933
934static void smd_channel_reset_state(struct smd_alloc_elm *shared,
935 unsigned new_state, unsigned pid)
936{
937 unsigned n;
Jeff Hugo00be6282012-09-07 11:24:32 -0600938 void *shared2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 uint32_t type;
Jeff Hugo00be6282012-09-07 11:24:32 -0600940 void *local_ch;
941 void *remote_ch;
942 int is_word_access;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700943
944 for (n = 0; n < SMD_CHANNELS; n++) {
945 if (!shared[n].ref_count)
946 continue;
947 if (!shared[n].name[0])
948 continue;
949
950 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo00be6282012-09-07 11:24:32 -0600951 is_word_access = is_word_access_ch(type);
952 if (is_word_access)
953 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
954 sizeof(struct smd_shared_v2_word_access));
955 else
956 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
957 sizeof(struct smd_shared_v2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700958 if (!shared2)
959 continue;
960
Jeff Hugo00be6282012-09-07 11:24:32 -0600961 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch,
962 is_word_access))
963 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700964
Eric Holmberg2a563c32011-10-05 14:51:43 -0600965 /*
966 * ModemFW is in the same subsystem as ModemSW, but has
967 * separate SMD edges that need to be reset.
968 */
969 if (pid == SMSM_MODEM &&
970 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
Jeff Hugo00be6282012-09-07 11:24:32 -0600971 &local_ch, &remote_ch, is_word_access))
972 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 }
974}
975
976
977void smd_channel_reset(uint32_t restart_pid)
978{
979 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700980 unsigned long flags;
981
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530982 SMx_POWER_INFO("%s: starting reset\n", __func__);
Eric Holmberg50ad86f2012-09-07 13:54:31 -0600983
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
985 if (!shared) {
986 pr_err("%s: allocation table not initialized\n", __func__);
987 return;
988 }
989
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990 /* reset SMSM entry */
991 if (smsm_info.state) {
992 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
993
Eric Holmberg351a63c2011-12-02 17:49:43 -0700994 /* restart SMSM init handshake */
995 if (restart_pid == SMSM_MODEM) {
996 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700997 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
998 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700999 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000
1001 /* notify SMSM processors */
1002 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -07001003 notify_modem_smsm();
1004 notify_dsp_smsm();
1005 notify_dsps_smsm();
1006 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001007 }
1008
1009 /* change all remote states to CLOSING */
1010 mutex_lock(&smd_probe_lock);
1011 spin_lock_irqsave(&smd_lock, flags);
1012 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
1013 spin_unlock_irqrestore(&smd_lock, flags);
1014 mutex_unlock(&smd_probe_lock);
1015
1016 /* notify SMD processors */
1017 mb();
1018 smd_fake_irq_handler(0);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301019 notify_modem_smd(NULL);
1020 notify_dsp_smd(NULL);
1021 notify_dsps_smd(NULL);
1022 notify_wcnss_smd(NULL);
1023 notify_rpm_smd(NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001024
1025 /* change all remote states to CLOSED */
1026 mutex_lock(&smd_probe_lock);
1027 spin_lock_irqsave(&smd_lock, flags);
1028 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
1029 spin_unlock_irqrestore(&smd_lock, flags);
1030 mutex_unlock(&smd_probe_lock);
1031
1032 /* notify SMD processors */
1033 mb();
1034 smd_fake_irq_handler(0);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301035 notify_modem_smd(NULL);
1036 notify_dsp_smd(NULL);
1037 notify_dsps_smd(NULL);
1038 notify_wcnss_smd(NULL);
1039 notify_rpm_smd(NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301041 SMx_POWER_INFO("%s: finished reset\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042}
1043
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001044/* how many bytes are available for reading */
1045static int smd_stream_read_avail(struct smd_channel *ch)
1046{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001047 return (ch->half_ch->get_head(ch->recv) -
1048 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001049}
1050
1051/* how many bytes we are free to write */
1052static int smd_stream_write_avail(struct smd_channel *ch)
1053{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001054 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
1055 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001056}
1057
1058static int smd_packet_read_avail(struct smd_channel *ch)
1059{
1060 if (ch->current_packet) {
1061 int n = smd_stream_read_avail(ch);
1062 if (n > ch->current_packet)
1063 n = ch->current_packet;
1064 return n;
1065 } else {
1066 return 0;
1067 }
1068}
1069
1070static int smd_packet_write_avail(struct smd_channel *ch)
1071{
1072 int n = smd_stream_write_avail(ch);
1073 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1074}
1075
1076static int ch_is_open(struct smd_channel *ch)
1077{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001078 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1079 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1080 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001081}
1082
1083/* provide a pointer and length to readable data in the fifo */
1084static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1085{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001086 unsigned head = ch->half_ch->get_head(ch->recv);
1087 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001088 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001089
1090 if (tail <= head)
1091 return head - tail;
1092 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001093 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001094}
1095
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096static int read_intr_blocked(struct smd_channel *ch)
1097{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001098 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001099}
1100
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001101/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1102static void ch_read_done(struct smd_channel *ch, unsigned count)
1103{
1104 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001105 ch->half_ch->set_tail(ch->recv,
1106 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001108 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001109}
1110
1111/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001112 * by smd_*_read() and update_packet_state()
1113 * will read-and-discard if the _data pointer is null
1114 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001115static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001116{
1117 void *ptr;
1118 unsigned n;
1119 unsigned char *data = _data;
1120 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001121 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001122
1123 while (len > 0) {
1124 n = ch_read_buffer(ch, &ptr);
1125 if (n == 0)
1126 break;
1127
1128 if (n > len)
1129 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001130 if (_data) {
1131 if (user_buf) {
1132 r = copy_to_user(data, ptr, n);
1133 if (r > 0) {
1134 pr_err("%s: "
1135 "copy_to_user could not copy "
1136 "%i bytes.\n",
1137 __func__,
1138 r);
1139 }
1140 } else
1141 memcpy(data, ptr, n);
1142 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001143
1144 data += n;
1145 len -= n;
1146 ch_read_done(ch, n);
1147 }
1148
1149 return orig_len - len;
1150}
1151
1152static void update_stream_state(struct smd_channel *ch)
1153{
1154 /* streams have no special state requiring updating */
1155}
1156
1157static void update_packet_state(struct smd_channel *ch)
1158{
1159 unsigned hdr[5];
1160 int r;
1161
1162 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001163 while (ch->current_packet == 0) {
1164 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001165
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001166 /* don't bother unless we can get the full header */
1167 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1168 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001169
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001170 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1171 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001172
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001173 ch->current_packet = hdr[0];
1174 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001175}
1176
1177/* provide a pointer and length to next free space in the fifo */
1178static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1179{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001180 unsigned head = ch->half_ch->get_head(ch->send);
1181 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001182 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001183
1184 if (head < tail) {
1185 return tail - head - 1;
1186 } else {
1187 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001188 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001189 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001190 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001191 }
1192}
1193
1194/* advace the fifo write pointer after freespace
1195 * from ch_write_buffer is filled
1196 */
1197static void ch_write_done(struct smd_channel *ch, unsigned count)
1198{
1199 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001200 ch->half_ch->set_head(ch->send,
1201 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001203 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001204}
1205
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001206static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001207{
1208 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001209 ch->half_ch->set_fDSR(ch->send, 1);
1210 ch->half_ch->set_fCTS(ch->send, 1);
1211 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001212 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001213 ch->half_ch->set_fDSR(ch->send, 0);
1214 ch->half_ch->set_fCTS(ch->send, 0);
1215 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001216 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001217 ch->half_ch->set_state(ch->send, n);
1218 ch->half_ch->set_fSTATE(ch->send, 1);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301219 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001220}
1221
1222static void do_smd_probe(void)
1223{
1224 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1225 if (shared->heap_info.free_offset != last_heap_free) {
1226 last_heap_free = shared->heap_info.free_offset;
1227 schedule_work(&probe_work);
1228 }
1229}
1230
1231static void smd_state_change(struct smd_channel *ch,
1232 unsigned last, unsigned next)
1233{
1234 ch->last_state = next;
1235
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001236 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001237
1238 switch (next) {
1239 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001240 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1241 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1242 ch->half_ch->set_tail(ch->recv, 0);
1243 ch->half_ch->set_head(ch->send, 0);
1244 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245 ch_set_state(ch, SMD_SS_OPENING);
1246 }
1247 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001248 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001249 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001250 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251 ch->notify(ch->priv, SMD_EVENT_OPEN);
1252 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001253 break;
1254 case SMD_SS_FLUSHING:
1255 case SMD_SS_RESET:
1256 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257 break;
1258 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001259 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001260 ch_set_state(ch, SMD_SS_CLOSING);
1261 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001262 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001263 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1264 }
1265 break;
1266 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001267 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268 list_move(&ch->ch_list,
1269 &smd_ch_to_close_list);
1270 queue_work(channel_close_wq,
1271 &finalize_channel_close_work);
1272 }
1273 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001274 }
1275}
1276
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001277static void handle_smd_irq_closing_list(void)
1278{
1279 unsigned long flags;
1280 struct smd_channel *ch;
1281 struct smd_channel *index;
1282 unsigned tmp;
1283
1284 spin_lock_irqsave(&smd_lock, flags);
1285 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001286 if (ch->half_ch->get_fSTATE(ch->recv))
1287 ch->half_ch->set_fSTATE(ch->recv, 0);
1288 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001289 if (tmp != ch->last_state)
1290 smd_state_change(ch, ch->last_state, tmp);
1291 }
1292 spin_unlock_irqrestore(&smd_lock, flags);
1293}
1294
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301295static void handle_smd_irq(struct list_head *list,
1296 void (*notify)(smd_channel_t *ch))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001297{
1298 unsigned long flags;
1299 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001300 unsigned ch_flags;
1301 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001303
1304 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001305 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001306 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001307 ch_flags = 0;
1308 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001309 if (ch->half_ch->get_fHEAD(ch->recv)) {
1310 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001311 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001312 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001313 if (ch->half_ch->get_fTAIL(ch->recv)) {
1314 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001315 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001316 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001317 if (ch->half_ch->get_fSTATE(ch->recv)) {
1318 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001319 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001320 }
1321 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001322 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001323 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001324 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1325 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001326 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327 state_change = 1;
1328 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001329 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001330 ch->update_state(ch);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301331 SMx_POWER_INFO(
1332 "SMD ch%d '%s' Data event 0x%x tx%d/rx%d %dr/%dw : %dr/%dw\n",
1333 ch->n, ch->name,
1334 ch_flags,
1335 ch->fifo_size -
1336 (smd_stream_write_avail(ch) + 1),
1337 smd_stream_read_avail(ch),
1338 ch->half_ch->get_tail(ch->send),
1339 ch->half_ch->get_head(ch->send),
1340 ch->half_ch->get_tail(ch->recv),
1341 ch->half_ch->get_head(ch->recv)
1342 );
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001343 ch->notify(ch->priv, SMD_EVENT_DATA);
1344 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001345 if (ch_flags & 0x4 && !state_change) {
1346 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1347 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001348 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001349 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001350 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001351 spin_unlock_irqrestore(&smd_lock, flags);
1352 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001353}
1354
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301355static inline void log_irq(uint32_t subsystem)
1356{
1357 const char *subsys = smd_edge_to_subsystem(subsystem);
1358
Jay Chokshi83b4f6132013-02-14 16:20:56 -08001359 (void) subsys;
1360
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301361 SMx_POWER_INFO("SMD Int %s->Apps\n", subsys);
1362}
1363
Brian Swetland37521a32009-07-01 18:30:47 -07001364static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001365{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301366 log_irq(SMD_APPS_MODEM);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001367 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001368 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001369 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001370 return IRQ_HANDLED;
1371}
1372
1373static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1374{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301375 log_irq(SMD_APPS_QDSP);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001376 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001377 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001378 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001379 return IRQ_HANDLED;
1380}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001381
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001382static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1383{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301384 log_irq(SMD_APPS_DSPS);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001385 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001386 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1387 handle_smd_irq_closing_list();
1388 return IRQ_HANDLED;
1389}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001390
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001391static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1392{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301393 log_irq(SMD_APPS_WCNSS);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001394 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001395 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1396 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001397 return IRQ_HANDLED;
1398}
1399
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001400static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1401{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301402 log_irq(SMD_APPS_RPM);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001403 ++interrupt_stats[SMD_RPM].smd_in_count;
1404 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1405 handle_smd_irq_closing_list();
1406 return IRQ_HANDLED;
1407}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001408
1409static void smd_fake_irq_handler(unsigned long arg)
1410{
Brian Swetland37521a32009-07-01 18:30:47 -07001411 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1412 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001413 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1414 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001415 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001416 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001417}
1418
1419static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1420
Brian Swetland37521a32009-07-01 18:30:47 -07001421static inline int smd_need_int(struct smd_channel *ch)
1422{
1423 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001424 if (ch->half_ch->get_fHEAD(ch->recv) ||
1425 ch->half_ch->get_fTAIL(ch->recv) ||
1426 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001427 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001428 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001429 return 1;
1430 }
1431 return 0;
1432}
1433
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001434void smd_sleep_exit(void)
1435{
1436 unsigned long flags;
1437 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001438 int need_int = 0;
1439
1440 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001441 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1442 if (smd_need_int(ch)) {
1443 need_int = 1;
1444 break;
1445 }
1446 }
1447 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1448 if (smd_need_int(ch)) {
1449 need_int = 1;
1450 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001451 }
1452 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001453 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1454 if (smd_need_int(ch)) {
1455 need_int = 1;
1456 break;
1457 }
1458 }
1459 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1460 if (smd_need_int(ch)) {
1461 need_int = 1;
1462 break;
1463 }
1464 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001465 spin_unlock_irqrestore(&smd_lock, flags);
1466 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001467
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001468 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001469 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001470 tasklet_schedule(&smd_fake_irq_tasklet);
1471 }
1472}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001473EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001474
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001475static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001476{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001477 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1478 return 0;
1479 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001480 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001481
1482 /* for cases where xfer type is 0 */
1483 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001484 return 0;
1485
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001486 /* for cases where xfer type is 0 */
1487 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1488 return 0;
1489
1490 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001491 return 1;
1492 else
1493 return 0;
1494}
1495
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1497 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001498{
1499 void *ptr;
1500 const unsigned char *buf = _data;
1501 unsigned xfer;
1502 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001503 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001504
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001505 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001506 if (len < 0)
1507 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001508 else if (len == 0)
1509 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001510
1511 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001512 if (!ch_is_open(ch)) {
1513 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001514 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001515 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001516 if (xfer > len)
1517 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001518 if (user_buf) {
1519 r = copy_from_user(ptr, buf, xfer);
1520 if (r > 0) {
1521 pr_err("%s: "
1522 "copy_from_user could not copy %i "
1523 "bytes.\n",
1524 __func__,
1525 r);
1526 }
1527 } else
1528 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001529 ch_write_done(ch, xfer);
1530 len -= xfer;
1531 buf += xfer;
1532 if (len == 0)
1533 break;
1534 }
1535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001536 if (orig_len - len)
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301537 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001538
1539 return orig_len - len;
1540}
1541
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001542static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1543 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001544{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001545 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001546 unsigned hdr[5];
1547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001548 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001549 if (len < 0)
1550 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001551 else if (len == 0)
1552 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001553
1554 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1555 return -ENOMEM;
1556
1557 hdr[0] = len;
1558 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1559
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001560
1561 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1562 if (ret < 0 || ret != sizeof(hdr)) {
1563 SMD_DBG("%s failed to write pkt header: "
1564 "%d returned\n", __func__, ret);
1565 return -1;
1566 }
1567
1568
1569 ret = smd_stream_write(ch, _data, len, user_buf);
1570 if (ret < 0 || ret != len) {
1571 SMD_DBG("%s failed to write pkt data: "
1572 "%d returned\n", __func__, ret);
1573 return ret;
1574 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001575
1576 return len;
1577}
1578
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001579static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001580{
1581 int r;
1582
1583 if (len < 0)
1584 return -EINVAL;
1585
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001586 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001587 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301589 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001590
1591 return r;
1592}
1593
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001594static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001595{
1596 unsigned long flags;
1597 int r;
1598
1599 if (len < 0)
1600 return -EINVAL;
1601
1602 if (len > ch->current_packet)
1603 len = ch->current_packet;
1604
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001605 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001606 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001607 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301608 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001609
1610 spin_lock_irqsave(&smd_lock, flags);
1611 ch->current_packet -= r;
1612 update_packet_state(ch);
1613 spin_unlock_irqrestore(&smd_lock, flags);
1614
1615 return r;
1616}
1617
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001618static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1619 int user_buf)
1620{
1621 int r;
1622
1623 if (len < 0)
1624 return -EINVAL;
1625
1626 if (len > ch->current_packet)
1627 len = ch->current_packet;
1628
1629 r = ch_read(ch, data, len, user_buf);
1630 if (r > 0)
1631 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301632 ch->notify_other_cpu(ch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001633
1634 ch->current_packet -= r;
1635 update_packet_state(ch);
1636
1637 return r;
1638}
1639
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301640#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001641static int smd_alloc_v2(struct smd_channel *ch)
1642{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001643 void *buffer;
1644 unsigned buffer_sz;
1645
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001646 if (is_word_access_ch(ch->type)) {
1647 struct smd_shared_v2_word_access *shared2;
1648 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1649 sizeof(*shared2));
1650 if (!shared2) {
1651 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1652 return -EINVAL;
1653 }
1654 ch->send = &shared2->ch0;
1655 ch->recv = &shared2->ch1;
1656 } else {
1657 struct smd_shared_v2 *shared2;
1658 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1659 sizeof(*shared2));
1660 if (!shared2) {
1661 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1662 return -EINVAL;
1663 }
1664 ch->send = &shared2->ch0;
1665 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001666 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001667 ch->half_ch = get_half_ch_funcs(ch->type);
1668
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001669 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1670 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301671 SMD_INFO("smem_get_entry failed\n");
1672 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673 }
1674
1675 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301676 if (buffer_sz & (buffer_sz - 1)) {
1677 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1678 return -EINVAL;
1679 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001680 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001681 ch->send_data = buffer;
1682 ch->recv_data = buffer + buffer_sz;
1683 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001684
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001685 return 0;
1686}
1687
1688static int smd_alloc_v1(struct smd_channel *ch)
1689{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301690 return -EINVAL;
1691}
1692
1693#else /* define v1 for older targets */
1694static int smd_alloc_v2(struct smd_channel *ch)
1695{
1696 return -EINVAL;
1697}
1698
1699static int smd_alloc_v1(struct smd_channel *ch)
1700{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001701 struct smd_shared_v1 *shared1;
1702 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1703 if (!shared1) {
1704 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301705 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001706 }
1707 ch->send = &shared1->ch0;
1708 ch->recv = &shared1->ch1;
1709 ch->send_data = shared1->data0;
1710 ch->recv_data = shared1->data1;
1711 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001712 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001713 return 0;
1714}
1715
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301716#endif
1717
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001718static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001719{
1720 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001721
1722 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1723 if (ch == 0) {
1724 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001725 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001726 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001727 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001728 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001730 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001731 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001732 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001733 }
1734
1735 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001736
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001737 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001738 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001739 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001740 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001741 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001742 else if (ch->type == SMD_APPS_DSPS)
1743 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001744 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001745 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001746 else if (ch->type == SMD_APPS_RPM)
1747 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001748
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001749 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001750 ch->read = smd_packet_read;
1751 ch->write = smd_packet_write;
1752 ch->read_avail = smd_packet_read_avail;
1753 ch->write_avail = smd_packet_write_avail;
1754 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001755 ch->read_from_cb = smd_packet_read_from_cb;
1756 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001757 } else {
1758 ch->read = smd_stream_read;
1759 ch->write = smd_stream_write;
1760 ch->read_avail = smd_stream_read_avail;
1761 ch->write_avail = smd_stream_write_avail;
1762 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001763 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001764 }
1765
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001766 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1767 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001768
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001769 ch->pdev.name = ch->name;
1770 ch->pdev.id = ch->type;
1771
1772 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1773 ch->name, ch->n);
1774
1775 mutex_lock(&smd_creation_mutex);
1776 list_add(&ch->ch_list, &smd_ch_closed_list);
1777 mutex_unlock(&smd_creation_mutex);
1778
1779 platform_device_register(&ch->pdev);
1780 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1781 /* create a platform driver to be used by smd_tty driver
1782 * so that it can access the loopback port
1783 */
1784 loopback_tty_pdev.id = ch->type;
1785 platform_device_register(&loopback_tty_pdev);
1786 }
1787 return 0;
1788}
1789
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301790static inline void notify_loopback_smd(smd_channel_t *ch_notif)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001791{
1792 unsigned long flags;
1793 struct smd_channel *ch;
1794
1795 spin_lock_irqsave(&smd_lock, flags);
1796 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1797 ch->notify(ch->priv, SMD_EVENT_DATA);
1798 }
1799 spin_unlock_irqrestore(&smd_lock, flags);
1800}
1801
1802static int smd_alloc_loopback_channel(void)
1803{
1804 static struct smd_half_channel smd_loopback_ctl;
1805 static char smd_loopback_data[SMD_BUF_SIZE];
1806 struct smd_channel *ch;
1807
1808 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1809 if (ch == 0) {
1810 pr_err("%s: out of memory\n", __func__);
1811 return -1;
1812 }
1813 ch->n = SMD_LOOPBACK_CID;
1814
1815 ch->send = &smd_loopback_ctl;
1816 ch->recv = &smd_loopback_ctl;
1817 ch->send_data = smd_loopback_data;
1818 ch->recv_data = smd_loopback_data;
1819 ch->fifo_size = SMD_BUF_SIZE;
1820
1821 ch->fifo_mask = ch->fifo_size - 1;
1822 ch->type = SMD_LOOPBACK_TYPE;
1823 ch->notify_other_cpu = notify_loopback_smd;
1824
1825 ch->read = smd_stream_read;
1826 ch->write = smd_stream_write;
1827 ch->read_avail = smd_stream_read_avail;
1828 ch->write_avail = smd_stream_write_avail;
1829 ch->update_state = update_stream_state;
1830 ch->read_from_cb = smd_stream_read;
1831
1832 memset(ch->name, 0, 20);
1833 memcpy(ch->name, "local_loopback", 14);
1834
1835 ch->pdev.name = ch->name;
1836 ch->pdev.id = ch->type;
1837
1838 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001839
1840 mutex_lock(&smd_creation_mutex);
1841 list_add(&ch->ch_list, &smd_ch_closed_list);
1842 mutex_unlock(&smd_creation_mutex);
1843
1844 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001845 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001846}
1847
1848static void do_nothing_notify(void *priv, unsigned flags)
1849{
1850}
1851
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001852static void finalize_channel_close_fn(struct work_struct *work)
1853{
1854 unsigned long flags;
1855 struct smd_channel *ch;
1856 struct smd_channel *index;
1857
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001858 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001859 spin_lock_irqsave(&smd_lock, flags);
1860 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1861 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001862 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001863 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1864 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001865 }
1866 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001867 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001868}
1869
1870struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001871{
1872 struct smd_channel *ch;
1873
1874 mutex_lock(&smd_creation_mutex);
1875 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001876 if (!strcmp(name, ch->name) &&
1877 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001878 list_del(&ch->ch_list);
1879 mutex_unlock(&smd_creation_mutex);
1880 return ch;
1881 }
1882 }
1883 mutex_unlock(&smd_creation_mutex);
1884
1885 return NULL;
1886}
1887
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001888int smd_named_open_on_edge(const char *name, uint32_t edge,
1889 smd_channel_t **_ch,
1890 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001891{
1892 struct smd_channel *ch;
1893 unsigned long flags;
1894
1895 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001896 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001897 return -ENODEV;
1898 }
1899
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001900 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1901
1902 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001903 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001904 /* check closing list for port */
1905 spin_lock_irqsave(&smd_lock, flags);
1906 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1907 if (!strncmp(name, ch->name, 20) &&
1908 (edge == ch->type)) {
1909 /* channel exists, but is being closed */
1910 spin_unlock_irqrestore(&smd_lock, flags);
1911 return -EAGAIN;
1912 }
1913 }
1914
1915 /* check closing workqueue list for port */
1916 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1917 if (!strncmp(name, ch->name, 20) &&
1918 (edge == ch->type)) {
1919 /* channel exists, but is being closed */
1920 spin_unlock_irqrestore(&smd_lock, flags);
1921 return -EAGAIN;
1922 }
1923 }
1924 spin_unlock_irqrestore(&smd_lock, flags);
1925
1926 /* one final check to handle closing->closed race condition */
1927 ch = smd_get_channel(name, edge);
1928 if (!ch)
1929 return -ENODEV;
1930 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001931
1932 if (notify == 0)
1933 notify = do_nothing_notify;
1934
1935 ch->notify = notify;
1936 ch->current_packet = 0;
1937 ch->last_state = SMD_SS_CLOSED;
1938 ch->priv = priv;
1939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940 if (edge == SMD_LOOPBACK_TYPE) {
1941 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001942 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1943 ch->half_ch->set_fDSR(ch->send, 1);
1944 ch->half_ch->set_fCTS(ch->send, 1);
1945 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001946 }
1947
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001948 *_ch = ch;
1949
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001950 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1951
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001952 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001954 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001955 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001956 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001957 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1958 list_add(&ch->ch_list, &smd_ch_list_dsps);
1959 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1960 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001961 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1962 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001963 else
1964 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001965
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001966 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1967
1968 if (edge != SMD_LOOPBACK_TYPE)
1969 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1970
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001971 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001972
1973 return 0;
1974}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001975EXPORT_SYMBOL(smd_named_open_on_edge);
1976
1977
1978int smd_open(const char *name, smd_channel_t **_ch,
1979 void *priv, void (*notify)(void *, unsigned))
1980{
1981 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1982 notify);
1983}
1984EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001985
1986int smd_close(smd_channel_t *ch)
1987{
1988 unsigned long flags;
1989
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001990 if (ch == 0)
1991 return -1;
1992
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001993 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001994
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001995 spin_lock_irqsave(&smd_lock, flags);
1996 list_del(&ch->ch_list);
1997 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001998 ch->half_ch->set_fDSR(ch->send, 0);
1999 ch->half_ch->set_fCTS(ch->send, 0);
2000 ch->half_ch->set_fCD(ch->send, 0);
2001 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002002 } else
2003 ch_set_state(ch, SMD_SS_CLOSED);
2004
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002005 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002006 list_add(&ch->ch_list, &smd_ch_closing_list);
2007 spin_unlock_irqrestore(&smd_lock, flags);
2008 } else {
2009 spin_unlock_irqrestore(&smd_lock, flags);
2010 ch->notify = do_nothing_notify;
2011 mutex_lock(&smd_creation_mutex);
2012 list_add(&ch->ch_list, &smd_ch_closed_list);
2013 mutex_unlock(&smd_creation_mutex);
2014 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002015
2016 return 0;
2017}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018EXPORT_SYMBOL(smd_close);
2019
2020int smd_write_start(smd_channel_t *ch, int len)
2021{
2022 int ret;
2023 unsigned hdr[5];
2024
2025 if (!ch) {
2026 pr_err("%s: Invalid channel specified\n", __func__);
2027 return -ENODEV;
2028 }
2029 if (!ch->is_pkt_ch) {
2030 pr_err("%s: non-packet channel specified\n", __func__);
2031 return -EACCES;
2032 }
2033 if (len < 1) {
2034 pr_err("%s: invalid length: %d\n", __func__, len);
2035 return -EINVAL;
2036 }
2037
2038 if (ch->pending_pkt_sz) {
2039 pr_err("%s: packet of size: %d in progress\n", __func__,
2040 ch->pending_pkt_sz);
2041 return -EBUSY;
2042 }
2043 ch->pending_pkt_sz = len;
2044
2045 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
2046 ch->pending_pkt_sz = 0;
2047 SMD_DBG("%s: no space to write packet header\n", __func__);
2048 return -EAGAIN;
2049 }
2050
2051 hdr[0] = len;
2052 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
2053
2054
2055 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
2056 if (ret < 0 || ret != sizeof(hdr)) {
2057 ch->pending_pkt_sz = 0;
2058 pr_err("%s: packet header failed to write\n", __func__);
2059 return -EPERM;
2060 }
2061 return 0;
2062}
2063EXPORT_SYMBOL(smd_write_start);
2064
2065int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
2066{
2067 int bytes_written;
2068
2069 if (!ch) {
2070 pr_err("%s: Invalid channel specified\n", __func__);
2071 return -ENODEV;
2072 }
2073 if (len < 1) {
2074 pr_err("%s: invalid length: %d\n", __func__, len);
2075 return -EINVAL;
2076 }
2077
2078 if (!ch->pending_pkt_sz) {
2079 pr_err("%s: no transaction in progress\n", __func__);
2080 return -ENOEXEC;
2081 }
2082 if (ch->pending_pkt_sz - len < 0) {
2083 pr_err("%s: segment of size: %d will make packet go over "
2084 "length\n", __func__, len);
2085 return -EINVAL;
2086 }
2087
2088 bytes_written = smd_stream_write(ch, data, len, user_buf);
2089
2090 ch->pending_pkt_sz -= bytes_written;
2091
2092 return bytes_written;
2093}
2094EXPORT_SYMBOL(smd_write_segment);
2095
2096int smd_write_end(smd_channel_t *ch)
2097{
2098
2099 if (!ch) {
2100 pr_err("%s: Invalid channel specified\n", __func__);
2101 return -ENODEV;
2102 }
2103 if (ch->pending_pkt_sz) {
2104 pr_err("%s: current packet not completely written\n", __func__);
2105 return -E2BIG;
2106 }
2107
2108 return 0;
2109}
2110EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002111
2112int smd_read(smd_channel_t *ch, void *data, int len)
2113{
Jack Pham1b236d12012-03-19 15:27:18 -07002114 if (!ch) {
2115 pr_err("%s: Invalid channel specified\n", __func__);
2116 return -ENODEV;
2117 }
2118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002119 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002120}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002121EXPORT_SYMBOL(smd_read);
2122
2123int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2124{
Jack Pham1b236d12012-03-19 15:27:18 -07002125 if (!ch) {
2126 pr_err("%s: Invalid channel specified\n", __func__);
2127 return -ENODEV;
2128 }
2129
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002130 return ch->read(ch, data, len, 1);
2131}
2132EXPORT_SYMBOL(smd_read_user_buffer);
2133
2134int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2135{
Jack Pham1b236d12012-03-19 15:27:18 -07002136 if (!ch) {
2137 pr_err("%s: Invalid channel specified\n", __func__);
2138 return -ENODEV;
2139 }
2140
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002141 return ch->read_from_cb(ch, data, len, 0);
2142}
2143EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002144
2145int smd_write(smd_channel_t *ch, const void *data, int len)
2146{
Jack Pham1b236d12012-03-19 15:27:18 -07002147 if (!ch) {
2148 pr_err("%s: Invalid channel specified\n", __func__);
2149 return -ENODEV;
2150 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002151
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002152 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002153}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002154EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002155
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002156int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002157{
Jack Pham1b236d12012-03-19 15:27:18 -07002158 if (!ch) {
2159 pr_err("%s: Invalid channel specified\n", __func__);
2160 return -ENODEV;
2161 }
2162
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002163 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002164}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002165EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002166
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002167int smd_read_avail(smd_channel_t *ch)
2168{
Jack Pham1b236d12012-03-19 15:27:18 -07002169 if (!ch) {
2170 pr_err("%s: Invalid channel specified\n", __func__);
2171 return -ENODEV;
2172 }
2173
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002174 return ch->read_avail(ch);
2175}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002176EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002177
2178int smd_write_avail(smd_channel_t *ch)
2179{
Jack Pham1b236d12012-03-19 15:27:18 -07002180 if (!ch) {
2181 pr_err("%s: Invalid channel specified\n", __func__);
2182 return -ENODEV;
2183 }
2184
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002185 return ch->write_avail(ch);
2186}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002187EXPORT_SYMBOL(smd_write_avail);
2188
2189void smd_enable_read_intr(smd_channel_t *ch)
2190{
2191 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002192 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002193}
2194EXPORT_SYMBOL(smd_enable_read_intr);
2195
2196void smd_disable_read_intr(smd_channel_t *ch)
2197{
2198 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002199 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002200}
2201EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002202
Eric Holmbergdeace152012-07-25 12:17:11 -06002203/**
2204 * Enable/disable receive interrupts for the remote processor used by a
2205 * particular channel.
2206 * @ch: open channel handle to use for the edge
2207 * @mask: 1 = mask interrupts; 0 = unmask interrupts
2208 * @returns: 0 for success; < 0 for failure
2209 *
2210 * Note that this enables/disables all interrupts from the remote subsystem for
2211 * all channels. As such, it should be used with care and only for specific
2212 * use cases such as power-collapse sequencing.
2213 */
2214int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
2215{
2216 struct irq_chip *irq_chip;
2217 struct irq_data *irq_data;
2218 struct interrupt_config_item *int_cfg;
2219
2220 if (!ch)
2221 return -EINVAL;
2222
2223 if (ch->type >= ARRAY_SIZE(edge_to_pids))
2224 return -ENODEV;
2225
2226 int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
2227
2228 if (int_cfg->irq_id < 0)
2229 return -ENODEV;
2230
2231 irq_chip = irq_get_chip(int_cfg->irq_id);
2232 if (!irq_chip)
2233 return -ENODEV;
2234
2235 irq_data = irq_get_irq_data(int_cfg->irq_id);
2236 if (!irq_data)
2237 return -ENODEV;
2238
2239 if (mask) {
2240 SMx_POWER_INFO("SMD Masking interrupts from %s\n",
2241 edge_to_pids[ch->type].subsys_name);
2242 irq_chip->irq_mask(irq_data);
2243 } else {
2244 SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
2245 edge_to_pids[ch->type].subsys_name);
2246 irq_chip->irq_unmask(irq_data);
2247 }
2248
2249 return 0;
2250}
2251EXPORT_SYMBOL(smd_mask_receive_interrupt);
2252
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002253int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2254{
2255 return -1;
2256}
2257
2258int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2259{
2260 return -1;
2261}
2262
2263int smd_cur_packet_size(smd_channel_t *ch)
2264{
Jack Pham1b236d12012-03-19 15:27:18 -07002265 if (!ch) {
2266 pr_err("%s: Invalid channel specified\n", __func__);
2267 return -ENODEV;
2268 }
2269
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002270 return ch->current_packet;
2271}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002272EXPORT_SYMBOL(smd_cur_packet_size);
2273
2274int smd_tiocmget(smd_channel_t *ch)
2275{
Jack Pham1b236d12012-03-19 15:27:18 -07002276 if (!ch) {
2277 pr_err("%s: Invalid channel specified\n", __func__);
2278 return -ENODEV;
2279 }
2280
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002281 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2282 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2283 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2284 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2285 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2286 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002287}
2288EXPORT_SYMBOL(smd_tiocmget);
2289
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002290/* this api will be called while holding smd_lock */
2291int
2292smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002293{
Jack Pham1b236d12012-03-19 15:27:18 -07002294 if (!ch) {
2295 pr_err("%s: Invalid channel specified\n", __func__);
2296 return -ENODEV;
2297 }
2298
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002299 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002300 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002301
2302 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002303 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002304
2305 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002306 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002307
2308 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002309 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002310
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002311 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002312 barrier();
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05302313 ch->notify_other_cpu(ch);
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002314
2315 return 0;
2316}
2317EXPORT_SYMBOL(smd_tiocmset_from_cb);
2318
2319int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2320{
2321 unsigned long flags;
2322
Jack Pham1b236d12012-03-19 15:27:18 -07002323 if (!ch) {
2324 pr_err("%s: Invalid channel specified\n", __func__);
2325 return -ENODEV;
2326 }
2327
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002328 spin_lock_irqsave(&smd_lock, flags);
2329 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002330 spin_unlock_irqrestore(&smd_lock, flags);
2331
2332 return 0;
2333}
2334EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002335
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002336int smd_is_pkt_avail(smd_channel_t *ch)
2337{
Jeff Hugoa8549f12012-08-13 20:36:18 -06002338 unsigned long flags;
2339
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002340 if (!ch || !ch->is_pkt_ch)
2341 return -EINVAL;
2342
2343 if (ch->current_packet)
2344 return 1;
2345
Jeff Hugoa8549f12012-08-13 20:36:18 -06002346 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002347 update_packet_state(ch);
Jeff Hugoa8549f12012-08-13 20:36:18 -06002348 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002349
2350 return ch->current_packet ? 1 : 0;
2351}
2352EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002353
2354
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002355/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002356
Jeff Hugobdc734d2012-03-26 16:05:39 -06002357/*
2358 * Shared Memory Range Check
2359 *
2360 * Takes a physical address and an offset and checks if the resulting physical
2361 * address would fit into one of the aux smem regions. If so, returns the
2362 * corresponding virtual address. Otherwise returns NULL. Expects the array
2363 * of smem regions to be in ascending physical address order.
2364 *
2365 * @base: physical base address to check
2366 * @offset: offset from the base to get the final address
2367 */
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08002368static void *smem_range_check(phys_addr_t base, unsigned offset)
Jeff Hugobdc734d2012-03-26 16:05:39 -06002369{
2370 int i;
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08002371 phys_addr_t phys_addr;
2372 resource_size_t size;
Jeff Hugobdc734d2012-03-26 16:05:39 -06002373
2374 for (i = 0; i < num_smem_areas; ++i) {
2375 phys_addr = smem_areas[i].phys_addr;
2376 size = smem_areas[i].size;
2377 if (base < phys_addr)
2378 return NULL;
2379 if (base > phys_addr + size)
2380 continue;
2381 if (base >= phys_addr && base + offset < phys_addr + size)
2382 return smem_areas[i].virt_addr + offset;
2383 }
2384
2385 return NULL;
2386}
2387
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002388/* smem_alloc returns the pointer to smem item if it is already allocated.
2389 * Otherwise, it returns NULL.
2390 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002391void *smem_alloc(unsigned id, unsigned size)
2392{
2393 return smem_find(id, size);
2394}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002395EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002396
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002397/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2398 * it allocates it and then returns the pointer to it.
2399 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302400void *smem_alloc2(unsigned id, unsigned size_in)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002401{
2402 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2403 struct smem_heap_entry *toc = shared->heap_toc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002404 unsigned long flags;
2405 void *ret = NULL;
2406
2407 if (!shared->heap_info.initialized) {
2408 pr_err("%s: smem heap info not initialized\n", __func__);
2409 return NULL;
2410 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002411
2412 if (id >= SMEM_NUM_ITEMS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002413 return NULL;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002415 size_in = ALIGN(size_in, 8);
2416 remote_spin_lock_irqsave(&remote_spinlock, flags);
2417 if (toc[id].allocated) {
2418 SMD_DBG("%s: %u already allocated\n", __func__, id);
2419 if (size_in != toc[id].size)
2420 pr_err("%s: wrong size %u (expected %u)\n",
2421 __func__, toc[id].size, size_in);
2422 else
2423 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2424 } else if (id > SMEM_FIXED_ITEM_LAST) {
2425 SMD_DBG("%s: allocating %u\n", __func__, id);
2426 if (shared->heap_info.heap_remaining >= size_in) {
2427 toc[id].offset = shared->heap_info.free_offset;
2428 toc[id].size = size_in;
2429 wmb();
2430 toc[id].allocated = 1;
2431
2432 shared->heap_info.free_offset += size_in;
2433 shared->heap_info.heap_remaining -= size_in;
2434 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2435 } else
2436 pr_err("%s: not enough memory %u (required %u)\n",
2437 __func__, shared->heap_info.heap_remaining,
2438 size_in);
2439 }
2440 wmb();
2441 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2442 return ret;
2443}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302444EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002445
2446void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002447{
2448 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2449 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302450 int use_spinlocks = spinlocks_initialized;
2451 void *ret = 0;
2452 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002453
2454 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302455 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002456
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302457 if (use_spinlocks)
2458 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002459 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002460 if (toc[id].allocated) {
2461 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002462 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002463 if (!(toc[id].reserved & BASE_ADDR_MASK))
2464 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2465 else
2466 ret = smem_range_check(
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08002467 toc[id].reserved & BASE_ADDR_MASK,
Jeff Hugobdc734d2012-03-26 16:05:39 -06002468 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002469 } else {
2470 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002471 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302472 if (use_spinlocks)
2473 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002474
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302475 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002476}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002477EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002478
2479void *smem_find(unsigned id, unsigned size_in)
2480{
2481 unsigned size;
2482 void *ptr;
2483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002484 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002485 if (!ptr)
2486 return 0;
2487
2488 size_in = ALIGN(size_in, 8);
2489 if (size_in != size) {
2490 pr_err("smem_find(%d, %d): wrong size %d\n",
2491 id, size_in, size);
2492 return 0;
2493 }
2494
2495 return ptr;
2496}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002497EXPORT_SYMBOL(smem_find);
2498
2499static int smsm_cb_init(void)
2500{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002501 struct smsm_state_info *state_info;
2502 int n;
2503 int ret = 0;
2504
2505 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2506 GFP_KERNEL);
2507
2508 if (!smsm_states) {
2509 pr_err("%s: SMSM init failed\n", __func__);
2510 return -ENOMEM;
2511 }
2512
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002513 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2514 if (!smsm_cb_wq) {
2515 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2516 kfree(smsm_states);
2517 return -EFAULT;
2518 }
2519
Eric Holmbergc8002902011-09-16 13:55:57 -06002520 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002521 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2522 state_info = &smsm_states[n];
2523 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002524 state_info->intr_mask_set = 0x0;
2525 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002526 INIT_LIST_HEAD(&state_info->callbacks);
2527 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002528 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002529
2530 return ret;
2531}
2532
2533static int smsm_init(void)
2534{
2535 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2536 int i;
2537 struct smsm_size_info_type *smsm_size_info;
Eric Holmberge5266d32013-02-25 18:29:27 -07002538 unsigned long flags;
2539 unsigned long j_start;
2540
2541 /* Verify that remote spinlock is not deadlocked */
2542 j_start = jiffies;
2543 while (!remote_spin_trylock_irqsave(&remote_spinlock, flags)) {
2544 if (jiffies_to_msecs(jiffies - j_start) > RSPIN_INIT_WAIT_MS) {
2545 panic("%s: Remote processor %d will not release spinlock\n",
2546 __func__, remote_spin_owner(&remote_spinlock));
2547 }
2548 }
2549 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002550
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002551 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2552 sizeof(struct smsm_size_info_type));
2553 if (smsm_size_info) {
2554 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2555 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2556 }
2557
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002558 i = kfifo_alloc(&smsm_snapshot_fifo,
2559 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2560 GFP_KERNEL);
2561 if (i) {
2562 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2563 return i;
2564 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002565 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2566 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002567
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002568 if (!smsm_info.state) {
2569 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2570 SMSM_NUM_ENTRIES *
2571 sizeof(uint32_t));
2572
2573 if (smsm_info.state) {
2574 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2575 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2576 __raw_writel(0, \
2577 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2578 }
2579 }
2580
2581 if (!smsm_info.intr_mask) {
2582 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2583 SMSM_NUM_ENTRIES *
2584 SMSM_NUM_HOSTS *
2585 sizeof(uint32_t));
2586
Eric Holmberge8a39322012-04-03 15:14:02 -06002587 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002588 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002589 __raw_writel(0x0,
2590 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2591
2592 /* Configure legacy modem bits */
2593 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2594 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2595 SMSM_APPS));
2596 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002597 }
2598
2599 if (!smsm_info.intr_mux)
2600 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2601 SMSM_NUM_INTR_MUX *
2602 sizeof(uint32_t));
2603
2604 i = smsm_cb_init();
2605 if (i)
2606 return i;
2607
2608 wmb();
Eric Holmberg144c2de2012-10-04 13:37:28 -06002609
2610 smsm_pm_notifier(&smsm_pm_nb, PM_POST_SUSPEND, NULL);
2611 i = register_pm_notifier(&smsm_pm_nb);
2612 if (i)
2613 pr_err("%s: power state notif error %d\n", __func__, i);
2614
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002615 return 0;
2616}
2617
2618void smsm_reset_modem(unsigned mode)
2619{
2620 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2621 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2622 } else if (mode == SMSM_MODEM_WAIT) {
2623 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2624 } else { /* reset_mode is SMSM_RESET or default */
2625 mode = SMSM_RESET;
2626 }
2627
2628 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2629}
2630EXPORT_SYMBOL(smsm_reset_modem);
2631
2632void smsm_reset_modem_cont(void)
2633{
2634 unsigned long flags;
2635 uint32_t state;
2636
2637 if (!smsm_info.state)
2638 return;
2639
2640 spin_lock_irqsave(&smem_lock, flags);
2641 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2642 & ~SMSM_MODEM_WAIT;
2643 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2644 wmb();
2645 spin_unlock_irqrestore(&smem_lock, flags);
2646}
2647EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002648
Eric Holmbergda31d042012-03-28 14:01:02 -06002649static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002650{
2651 int n;
2652 uint32_t new_state;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002653 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002654 int ret;
2655
2656 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002657 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002658 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2659 return;
2660 }
2661
Eric Holmberg96b55f62012-04-03 19:10:46 -06002662 /*
2663 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2664 * following sequence must be followed:
2665 * 1) increment snapshot count
2666 * 2) insert data into FIFO
2667 *
2668 * Potentially in parallel, the worker:
2669 * a) verifies >= 1 snapshots are in FIFO
2670 * b) processes snapshot
2671 * c) decrements reference count
2672 *
2673 * This order ensures that 1 will always occur before abc.
2674 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002675 if (use_wakelock) {
2676 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2677 if (smsm_snapshot_count == 0) {
2678 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2679 wake_lock(&smsm_snapshot_wakelock);
2680 }
2681 ++smsm_snapshot_count;
2682 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2683 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002684
2685 /* queue state entries */
2686 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2687 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2688
2689 ret = kfifo_in(&smsm_snapshot_fifo,
2690 &new_state, sizeof(new_state));
2691 if (ret != sizeof(new_state)) {
2692 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2693 goto restore_snapshot_count;
2694 }
2695 }
2696
2697 /* queue wakelock usage flag */
2698 ret = kfifo_in(&smsm_snapshot_fifo,
2699 &use_wakelock, sizeof(use_wakelock));
2700 if (ret != sizeof(use_wakelock)) {
2701 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2702 goto restore_snapshot_count;
2703 }
2704
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002705 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002706 return;
2707
2708restore_snapshot_count:
2709 if (use_wakelock) {
2710 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2711 if (smsm_snapshot_count) {
2712 --smsm_snapshot_count;
2713 if (smsm_snapshot_count == 0) {
2714 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2715 wake_unlock(&smsm_snapshot_wakelock);
2716 }
2717 } else {
2718 pr_err("%s: invalid snapshot count\n", __func__);
2719 }
2720 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2721 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002722}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002723
2724static irqreturn_t smsm_irq_handler(int irq, void *data)
2725{
2726 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002727
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002728 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002729 uint32_t mux_val;
2730 static uint32_t prev_smem_q6_apps_smsm;
2731
2732 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2733 mux_val = __raw_readl(
2734 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2735 if (mux_val != prev_smem_q6_apps_smsm)
2736 prev_smem_q6_apps_smsm = mux_val;
2737 }
2738
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002739 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002740 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002741 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002742 return IRQ_HANDLED;
2743 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002744
2745 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002746 if (!smsm_info.state) {
2747 SMSM_INFO("<SM NO STATE>\n");
2748 } else {
2749 unsigned old_apps, apps;
2750 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002751
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002752 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002753
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002754 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2755 if (apps & SMSM_RESET) {
2756 /* If we get an interrupt and the apps SMSM_RESET
2757 bit is already set, the modem is acking the
2758 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002759 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302760 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002761 /* Issue a fake irq to handle any
2762 * smd state changes during reset
2763 */
2764 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002765
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002766 /* queue modem restart notify chain */
2767 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002768
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002769 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002770 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302771 if (!disable_smsm_reset_handshake) {
2772 apps |= SMSM_RESET;
2773 flush_cache_all();
2774 outer_flush_all();
2775 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002776 modem_queue_start_reset_notify();
2777
2778 } else if (modm & SMSM_INIT) {
2779 if (!(apps & SMSM_INIT)) {
2780 apps |= SMSM_INIT;
2781 modem_queue_smsm_init_notify();
2782 }
2783
2784 if (modm & SMSM_SMDINIT)
2785 apps |= SMSM_SMDINIT;
2786 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2787 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2788 apps |= SMSM_RUN;
2789 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2790 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2791 modem_queue_start_reset_notify();
2792 }
2793
2794 if (old_apps != apps) {
2795 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2796 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2797 do_smd_probe();
2798 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2799 }
2800
Eric Holmbergda31d042012-03-28 14:01:02 -06002801 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002802 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002803 spin_unlock_irqrestore(&smem_lock, flags);
2804 return IRQ_HANDLED;
2805}
2806
Eric Holmberg98c6c642012-02-24 11:29:35 -07002807static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002808{
Eric Holmberg98c6c642012-02-24 11:29:35 -07002809 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002810 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002811 return smsm_irq_handler(irq, data);
2812}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002813
Eric Holmberg98c6c642012-02-24 11:29:35 -07002814static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2815{
2816 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002817 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002818 return smsm_irq_handler(irq, data);
2819}
2820
2821static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2822{
2823 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002824 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002825 return smsm_irq_handler(irq, data);
2826}
2827
2828static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2829{
2830 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002831 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002832 return smsm_irq_handler(irq, data);
2833}
2834
Eric Holmberge8a39322012-04-03 15:14:02 -06002835/*
2836 * Changes the global interrupt mask. The set and clear masks are re-applied
2837 * every time the global interrupt mask is updated for callback registration
2838 * and de-registration.
2839 *
2840 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2841 * mask and the set mask, the result will be that the interrupt is set.
2842 *
2843 * @smsm_entry SMSM entry to change
2844 * @clear_mask 1 = clear bit, 0 = no-op
2845 * @set_mask 1 = set bit, 0 = no-op
2846 *
2847 * @returns 0 for success, < 0 for error
2848 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002849int smsm_change_intr_mask(uint32_t smsm_entry,
2850 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002851{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002852 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002853 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002854
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002855 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2856 pr_err("smsm_change_state: Invalid entry %d\n",
2857 smsm_entry);
2858 return -EINVAL;
2859 }
2860
2861 if (!smsm_info.intr_mask) {
2862 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002863 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002864 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002865
2866 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002867 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2868 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002869
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002870 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2871 new_mask = (old_mask & ~clear_mask) | set_mask;
2872 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002873
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002874 wmb();
2875 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002876
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002877 return 0;
2878}
2879EXPORT_SYMBOL(smsm_change_intr_mask);
2880
2881int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2882{
2883 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2884 pr_err("smsm_change_state: Invalid entry %d\n",
2885 smsm_entry);
2886 return -EINVAL;
2887 }
2888
2889 if (!smsm_info.intr_mask) {
2890 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2891 return -EIO;
2892 }
2893
2894 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2895 return 0;
2896}
2897EXPORT_SYMBOL(smsm_get_intr_mask);
2898
2899int smsm_change_state(uint32_t smsm_entry,
2900 uint32_t clear_mask, uint32_t set_mask)
2901{
2902 unsigned long flags;
2903 uint32_t old_state, new_state;
2904
2905 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2906 pr_err("smsm_change_state: Invalid entry %d",
2907 smsm_entry);
2908 return -EINVAL;
2909 }
2910
2911 if (!smsm_info.state) {
2912 pr_err("smsm_change_state <SM NO STATE>\n");
2913 return -EIO;
2914 }
2915 spin_lock_irqsave(&smem_lock, flags);
2916
2917 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2918 new_state = (old_state & ~clear_mask) | set_mask;
2919 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2920 SMSM_DBG("smsm_change_state %x\n", new_state);
2921 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002922
2923 spin_unlock_irqrestore(&smem_lock, flags);
2924
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002925 return 0;
2926}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002927EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002929uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002930{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002931 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002932
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002933 /* needs interface change to return error code */
2934 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2935 pr_err("smsm_change_state: Invalid entry %d",
2936 smsm_entry);
2937 return 0;
2938 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002940 if (!smsm_info.state) {
2941 pr_err("smsm_get_state <SM NO STATE>\n");
2942 } else {
2943 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2944 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002945
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002946 return rv;
2947}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002948EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002949
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002950/**
2951 * Performs SMSM callback client notifiction.
2952 */
2953void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002954{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002955 struct smsm_state_cb_info *cb_info;
2956 struct smsm_state_info *state_info;
2957 int n;
2958 uint32_t new_state;
2959 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002960 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002961 int ret;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002962 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002963
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002964 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002965 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002966
Eric Holmbergda31d042012-03-28 14:01:02 -06002967 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002968 mutex_lock(&smsm_lock);
2969 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2970 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002971
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002972 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2973 sizeof(new_state));
2974 if (ret != sizeof(new_state)) {
2975 pr_err("%s: snapshot underflow %d\n",
2976 __func__, ret);
2977 mutex_unlock(&smsm_lock);
2978 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002979 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002980
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002981 state_changes = state_info->last_value ^ new_state;
2982 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002983 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2984 n, state_info->last_value,
2985 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002986 list_for_each_entry(cb_info,
2987 &state_info->callbacks, cb_list) {
2988
2989 if (cb_info->mask & state_changes)
2990 cb_info->notify(cb_info->data,
2991 state_info->last_value,
2992 new_state);
2993 }
2994 state_info->last_value = new_state;
2995 }
2996 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002997
Eric Holmbergda31d042012-03-28 14:01:02 -06002998 /* read wakelock flag */
2999 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
3000 sizeof(use_wakelock));
3001 if (ret != sizeof(use_wakelock)) {
3002 pr_err("%s: snapshot underflow %d\n",
3003 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06003004 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06003005 return;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06003006 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06003007 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06003008
3009 if (use_wakelock) {
3010 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
3011 if (smsm_snapshot_count) {
3012 --smsm_snapshot_count;
3013 if (smsm_snapshot_count == 0) {
3014 SMx_POWER_INFO("SMSM snapshot"
3015 " wake unlock\n");
3016 wake_unlock(&smsm_snapshot_wakelock);
3017 }
3018 } else {
3019 pr_err("%s: invalid snapshot count\n",
3020 __func__);
3021 }
3022 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
3023 flags);
3024 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003025 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003026}
3027
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003028
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003029/**
3030 * Registers callback for SMSM state notifications when the specified
3031 * bits change.
3032 *
3033 * @smsm_entry Processor entry to deregister
3034 * @mask Bits to deregister (if result is 0, callback is removed)
3035 * @notify Notification function to deregister
3036 * @data Opaque data passed in to callback
3037 *
3038 * @returns Status code
3039 * <0 error code
3040 * 0 inserted new entry
3041 * 1 updated mask of existing entry
3042 */
3043int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
3044 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003045{
Eric Holmberge8a39322012-04-03 15:14:02 -06003046 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003047 struct smsm_state_cb_info *cb_info;
3048 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06003049 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003050 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003051
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003052 if (smsm_entry >= SMSM_NUM_ENTRIES)
3053 return -EINVAL;
3054
Eric Holmbergc8002902011-09-16 13:55:57 -06003055 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003056
3057 if (!smsm_states) {
3058 /* smsm not yet initialized */
3059 ret = -ENODEV;
3060 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003061 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003062
Eric Holmberge8a39322012-04-03 15:14:02 -06003063 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003064 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06003065 &state->callbacks, cb_list) {
3066 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003067 (cb_info->data == data)) {
3068 cb_info->mask |= mask;
3069 cb_found = cb_info;
3070 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003071 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003072 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003073 }
3074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003075 if (!cb_found) {
3076 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
3077 GFP_ATOMIC);
3078 if (!cb_info) {
3079 ret = -ENOMEM;
3080 goto cleanup;
3081 }
3082
3083 cb_info->mask = mask;
3084 cb_info->notify = notify;
3085 cb_info->data = data;
3086 INIT_LIST_HEAD(&cb_info->cb_list);
3087 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06003088 &state->callbacks);
3089 new_mask |= mask;
3090 }
3091
3092 /* update interrupt notification mask */
3093 if (smsm_entry == SMSM_MODEM_STATE)
3094 new_mask |= LEGACY_MODEM_SMSM_MASK;
3095
3096 if (smsm_info.intr_mask) {
3097 unsigned long flags;
3098
3099 spin_lock_irqsave(&smem_lock, flags);
3100 new_mask = (new_mask & ~state->intr_mask_clear)
3101 | state->intr_mask_set;
3102 __raw_writel(new_mask,
3103 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3104 wmb();
3105 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003106 }
3107
3108cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06003109 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003110 return ret;
3111}
3112EXPORT_SYMBOL(smsm_state_cb_register);
3113
3114
3115/**
3116 * Deregisters for SMSM state notifications for the specified bits.
3117 *
3118 * @smsm_entry Processor entry to deregister
3119 * @mask Bits to deregister (if result is 0, callback is removed)
3120 * @notify Notification function to deregister
3121 * @data Opaque data passed in to callback
3122 *
3123 * @returns Status code
3124 * <0 error code
3125 * 0 not found
3126 * 1 updated mask
3127 * 2 removed callback
3128 */
3129int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
3130 void (*notify)(void *, uint32_t, uint32_t), void *data)
3131{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003132 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06003133 struct smsm_state_cb_info *cb_tmp;
3134 struct smsm_state_info *state;
3135 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003136 int ret = 0;
3137
3138 if (smsm_entry >= SMSM_NUM_ENTRIES)
3139 return -EINVAL;
3140
Eric Holmbergc8002902011-09-16 13:55:57 -06003141 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003142
3143 if (!smsm_states) {
3144 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003145 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003146 return -ENODEV;
3147 }
3148
Eric Holmberge8a39322012-04-03 15:14:02 -06003149 state = &smsm_states[smsm_entry];
3150 list_for_each_entry_safe(cb_info, cb_tmp,
3151 &state->callbacks, cb_list) {
3152 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003153 (cb_info->data == data)) {
3154 cb_info->mask &= ~mask;
3155 ret = 1;
3156 if (!cb_info->mask) {
3157 /* no mask bits set, remove callback */
3158 list_del(&cb_info->cb_list);
3159 kfree(cb_info);
3160 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003161 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003162 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003163 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003164 new_mask |= cb_info->mask;
3165 }
3166
3167 /* update interrupt notification mask */
3168 if (smsm_entry == SMSM_MODEM_STATE)
3169 new_mask |= LEGACY_MODEM_SMSM_MASK;
3170
3171 if (smsm_info.intr_mask) {
3172 unsigned long flags;
3173
3174 spin_lock_irqsave(&smem_lock, flags);
3175 new_mask = (new_mask & ~state->intr_mask_clear)
3176 | state->intr_mask_set;
3177 __raw_writel(new_mask,
3178 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3179 wmb();
3180 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003181 }
3182
Eric Holmbergc8002902011-09-16 13:55:57 -06003183 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003184 return ret;
3185}
3186EXPORT_SYMBOL(smsm_state_cb_deregister);
3187
Eric Holmberg6275b602012-11-19 13:05:04 -07003188/**
3189 * smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
3190 *
3191 * @returns: pointer to SMEM remote spinlock
3192 */
3193remote_spinlock_t *smem_get_remote_spinlock(void)
3194{
3195 return &remote_spinlock;
3196}
3197EXPORT_SYMBOL(smem_get_remote_spinlock);
3198
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003199int smd_module_init_notifier_register(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003200{
3201 int ret;
3202 if (!nb)
3203 return -EINVAL;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003204 mutex_lock(&smd_module_init_notifier_lock);
3205 ret = raw_notifier_chain_register(&smd_module_init_notifier_list, nb);
3206 if (smd_module_inited)
3207 nb->notifier_call(nb, 0, NULL);
3208 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003209 return ret;
3210}
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003211EXPORT_SYMBOL(smd_module_init_notifier_register);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003212
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003213int smd_module_init_notifier_unregister(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003214{
3215 int ret;
3216 if (!nb)
3217 return -EINVAL;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003218 mutex_lock(&smd_module_init_notifier_lock);
3219 ret = raw_notifier_chain_unregister(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003220 nb);
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003221 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003222 return ret;
3223}
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003224EXPORT_SYMBOL(smd_module_init_notifier_unregister);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003225
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003226static void smd_module_init_notify(uint32_t state, void *data)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003227{
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003228 mutex_lock(&smd_module_init_notifier_lock);
3229 smd_module_inited = 1;
3230 raw_notifier_call_chain(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003231 state, data);
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003232 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003233}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003234
3235int smd_core_init(void)
3236{
3237 int r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003238 unsigned long flags = IRQF_TRIGGER_RISING;
3239 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003240
Brian Swetland37521a32009-07-01 18:30:47 -07003241 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003242 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003243 if (r < 0)
3244 return r;
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303245 interrupt_stats[SMD_MODEM].smd_interrupt_id = INT_A9_M2A_0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003246 r = enable_irq_wake(INT_A9_M2A_0);
3247 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003248 pr_err("smd_core_init: "
3249 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003250
Eric Holmberg98c6c642012-02-24 11:29:35 -07003251 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003252 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003253 if (r < 0) {
3254 free_irq(INT_A9_M2A_0, 0);
3255 return r;
3256 }
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303257 interrupt_stats[SMD_MODEM].smsm_interrupt_id = INT_A9_M2A_5;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003258 r = enable_irq_wake(INT_A9_M2A_5);
3259 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003260 pr_err("smd_core_init: "
3261 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003262
Brian Swetland37521a32009-07-01 18:30:47 -07003263#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003264#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3265 flags |= IRQF_SHARED;
3266#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003267 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003268 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003269 if (r < 0) {
3270 free_irq(INT_A9_M2A_0, 0);
3271 free_irq(INT_A9_M2A_5, 0);
3272 return r;
3273 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003274
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303275 interrupt_stats[SMD_Q6].smd_interrupt_id = INT_ADSP_A11;
Eric Holmberg98c6c642012-02-24 11:29:35 -07003276 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3277 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003278 if (r < 0) {
3279 free_irq(INT_A9_M2A_0, 0);
3280 free_irq(INT_A9_M2A_5, 0);
3281 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3282 return r;
3283 }
3284
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303285 interrupt_stats[SMD_Q6].smsm_interrupt_id = INT_ADSP_A11_SMSM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003286 r = enable_irq_wake(INT_ADSP_A11);
3287 if (r < 0)
3288 pr_err("smd_core_init: "
3289 "enable_irq_wake failed for INT_ADSP_A11\n");
3290
3291#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3292 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3293 if (r < 0)
3294 pr_err("smd_core_init: enable_irq_wake "
3295 "failed for INT_ADSP_A11_SMSM\n");
3296#endif
3297 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003298#endif
3299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003300#if defined(CONFIG_DSPS)
3301 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3302 flags, "smd_dev", smd_dsps_irq_handler);
3303 if (r < 0) {
3304 free_irq(INT_A9_M2A_0, 0);
3305 free_irq(INT_A9_M2A_5, 0);
3306 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003307 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003308 return r;
3309 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003310
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303311 interrupt_stats[SMD_DSPS].smd_interrupt_id = INT_DSPS_A11;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003312 r = enable_irq_wake(INT_DSPS_A11);
3313 if (r < 0)
3314 pr_err("smd_core_init: "
3315 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003316#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003317
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003318#if defined(CONFIG_WCNSS)
3319 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3320 flags, "smd_dev", smd_wcnss_irq_handler);
3321 if (r < 0) {
3322 free_irq(INT_A9_M2A_0, 0);
3323 free_irq(INT_A9_M2A_5, 0);
3324 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003325 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003326 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3327 return r;
3328 }
3329
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303330 interrupt_stats[SMD_WCNSS].smd_interrupt_id = INT_WCNSS_A11;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003331 r = enable_irq_wake(INT_WCNSS_A11);
3332 if (r < 0)
3333 pr_err("smd_core_init: "
3334 "enable_irq_wake failed for INT_WCNSS_A11\n");
3335
Eric Holmberg98c6c642012-02-24 11:29:35 -07003336 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3337 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003338 if (r < 0) {
3339 free_irq(INT_A9_M2A_0, 0);
3340 free_irq(INT_A9_M2A_5, 0);
3341 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003342 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003343 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3344 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3345 return r;
3346 }
3347
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303348 interrupt_stats[SMD_WCNSS].smsm_interrupt_id = INT_WCNSS_A11_SMSM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003349 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3350 if (r < 0)
3351 pr_err("smd_core_init: "
3352 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3353#endif
3354
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003355#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003356 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3357 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003358 if (r < 0) {
3359 free_irq(INT_A9_M2A_0, 0);
3360 free_irq(INT_A9_M2A_5, 0);
3361 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003362 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003363 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3364 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003365 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003366 return r;
3367 }
3368
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303369 interrupt_stats[SMD_DSPS].smsm_interrupt_id = INT_DSPS_A11_SMSM;
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003370 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3371 if (r < 0)
3372 pr_err("smd_core_init: "
3373 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3374#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003375 SMD_INFO("smd_core_init() done\n");
3376
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003377 return 0;
3378}
3379
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303380static int intr_init(struct interrupt_config_item *private_irq,
3381 struct smd_irq_config *platform_irq,
3382 struct platform_device *pdev
3383 )
3384{
3385 int irq_id;
3386 int ret;
3387 int ret_wake;
3388
3389 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3390 private_irq->out_offset = platform_irq->out_offset;
3391 private_irq->out_base = platform_irq->out_base;
3392
3393 irq_id = platform_get_irq_byname(
3394 pdev,
3395 platform_irq->irq_name
3396 );
3397 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3398 platform_irq->irq_name, irq_id);
3399 ret = request_irq(irq_id,
3400 private_irq->irq_handler,
3401 platform_irq->flags,
3402 platform_irq->device_name,
3403 (void *)platform_irq->dev_id
3404 );
3405 if (ret < 0) {
3406 platform_irq->irq_id = ret;
Eric Holmbergdeace152012-07-25 12:17:11 -06003407 private_irq->irq_id = ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303408 } else {
3409 platform_irq->irq_id = irq_id;
Eric Holmbergdeace152012-07-25 12:17:11 -06003410 private_irq->irq_id = irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303411 ret_wake = enable_irq_wake(irq_id);
3412 if (ret_wake < 0) {
3413 pr_err("smd: enable_irq_wake failed on %s",
3414 platform_irq->irq_name);
3415 }
3416 }
3417
3418 return ret;
3419}
3420
Jeff Hugobdc734d2012-03-26 16:05:39 -06003421int sort_cmp_func(const void *a, const void *b)
3422{
3423 struct smem_area *left = (struct smem_area *)(a);
3424 struct smem_area *right = (struct smem_area *)(b);
3425
3426 return left->phys_addr - right->phys_addr;
3427}
3428
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303429int smd_core_platform_init(struct platform_device *pdev)
3430{
3431 int i;
3432 int ret;
3433 uint32_t num_ss;
3434 struct smd_platform *smd_platform_data;
3435 struct smd_subsystem_config *smd_ss_config_list;
3436 struct smd_subsystem_config *cfg;
3437 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003438 struct smd_smem_regions *smd_smem_areas;
3439 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303440
3441 smd_platform_data = pdev->dev.platform_data;
3442 num_ss = smd_platform_data->num_ss_configs;
3443 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3444
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003445 if (smd_platform_data->smd_ssr_config)
3446 disable_smsm_reset_handshake = smd_platform_data->
3447 smd_ssr_config->disable_smsm_reset_handshake;
3448
Jeff Hugobdc734d2012-03-26 16:05:39 -06003449 smd_smem_areas = smd_platform_data->smd_smem_areas;
3450 if (smd_smem_areas) {
3451 num_smem_areas = smd_platform_data->num_smem_areas;
3452 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3453 GFP_KERNEL);
3454 if (!smem_areas) {
3455 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3456 err_ret = -ENOMEM;
3457 goto smem_areas_alloc_fail;
3458 }
3459
3460 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3461 smem_areas[smem_idx].phys_addr =
3462 smd_smem_areas[smem_idx].phys_addr;
3463 smem_areas[smem_idx].size =
3464 smd_smem_areas[smem_idx].size;
3465 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3466 (unsigned long)(smem_areas[smem_idx].phys_addr),
3467 smem_areas[smem_idx].size);
3468 if (!smem_areas[smem_idx].virt_addr) {
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003469 pr_err("%s: ioremap_nocache() of addr: %pa size: %pa\n",
3470 __func__,
3471 &smem_areas[smem_idx].phys_addr,
3472 &smem_areas[smem_idx].size);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003473 err_ret = -ENOMEM;
3474 ++smem_idx;
3475 goto smem_failed;
3476 }
3477 }
3478 sort(smem_areas, num_smem_areas,
3479 sizeof(struct smem_area),
3480 sort_cmp_func, NULL);
3481 }
3482
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303483 for (i = 0; i < num_ss; i++) {
3484 cfg = &smd_ss_config_list[i];
3485
3486 ret = intr_init(
3487 &private_intr_config[cfg->irq_config_id].smd,
3488 &cfg->smd_int,
3489 pdev
3490 );
3491
3492 if (ret < 0) {
3493 err_ret = ret;
3494 pr_err("smd: register irq failed on %s\n",
3495 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003496 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303497 }
3498
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303499 interrupt_stats[cfg->irq_config_id].smd_interrupt_id
3500 = cfg->smd_int.irq_id;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003501 /* only init smsm structs if this edge supports smsm */
3502 if (cfg->smsm_int.irq_id)
3503 ret = intr_init(
3504 &private_intr_config[cfg->irq_config_id].smsm,
3505 &cfg->smsm_int,
3506 pdev
3507 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303508
3509 if (ret < 0) {
3510 err_ret = ret;
3511 pr_err("smd: register irq failed on %s\n",
3512 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003513 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303514 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003515
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303516 if (cfg->smsm_int.irq_id)
3517 interrupt_stats[cfg->irq_config_id].smsm_interrupt_id
3518 = cfg->smsm_int.irq_id;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003519 if (cfg->subsys_name)
3520 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003521 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303522 }
3523
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303524
3525 SMD_INFO("smd_core_platform_init() done\n");
3526 return 0;
3527
Jeff Hugobdc734d2012-03-26 16:05:39 -06003528intr_failed:
3529 pr_err("smd: deregistering IRQs\n");
3530 for (i = 0; i < num_ss; ++i) {
3531 cfg = &smd_ss_config_list[i];
3532
3533 if (cfg->smd_int.irq_id >= 0)
3534 free_irq(cfg->smd_int.irq_id,
3535 (void *)cfg->smd_int.dev_id
3536 );
3537 if (cfg->smsm_int.irq_id >= 0)
3538 free_irq(cfg->smsm_int.irq_id,
3539 (void *)cfg->smsm_int.dev_id
3540 );
3541 }
3542smem_failed:
3543 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3544 iounmap(smem_areas[smem_idx].virt_addr);
3545 kfree(smem_areas);
3546smem_areas_alloc_fail:
3547 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303548}
3549
Jeff Hugo412356e2012-09-27 17:14:23 -06003550static int __devinit parse_smd_devicetree(struct device_node *node,
3551 void *irq_out_base)
3552{
3553 uint32_t edge;
3554 char *key;
3555 int ret;
3556 uint32_t irq_offset;
3557 uint32_t irq_bitmask;
3558 uint32_t irq_line;
3559 unsigned long irq_flags = IRQF_TRIGGER_RISING;
3560 const char *pilstr;
3561 struct interrupt_config_item *private_irq;
3562
3563 key = "qcom,smd-edge";
3564 ret = of_property_read_u32(node, key, &edge);
3565 if (ret)
3566 goto missing_key;
3567 SMD_DBG("%s: %s = %d", __func__, key, edge);
3568
3569 key = "qcom,smd-irq-offset";
3570 ret = of_property_read_u32(node, key, &irq_offset);
3571 if (ret)
3572 goto missing_key;
3573 SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
3574
3575 key = "qcom,smd-irq-bitmask";
3576 ret = of_property_read_u32(node, key, &irq_bitmask);
3577 if (ret)
3578 goto missing_key;
3579 SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
3580
3581 key = "interrupts";
3582 irq_line = irq_of_parse_and_map(node, 0);
3583 if (!irq_line)
3584 goto missing_key;
3585 SMD_DBG("%s: %s = %d", __func__, key, irq_line);
3586
3587 key = "qcom,pil-string";
3588 pilstr = of_get_property(node, key, NULL);
3589 if (pilstr)
3590 SMD_DBG("%s: %s = %s", __func__, key, pilstr);
3591
3592 key = "qcom,irq-no-suspend";
3593 ret = of_property_read_bool(node, key);
3594 if (ret)
3595 irq_flags |= IRQF_NO_SUSPEND;
3596
3597 private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smd;
3598 private_irq->out_bit_pos = irq_bitmask;
3599 private_irq->out_offset = irq_offset;
3600 private_irq->out_base = irq_out_base;
3601 private_irq->irq_id = irq_line;
3602
3603 ret = request_irq(irq_line,
3604 private_irq->irq_handler,
3605 irq_flags,
3606 "smd_dev",
3607 NULL);
3608 if (ret < 0) {
3609 pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
3610 return ret;
3611 } else {
3612 ret = enable_irq_wake(irq_line);
3613 if (ret < 0)
3614 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
3615 irq_line);
3616 }
3617
3618 if (pilstr)
3619 strlcpy(edge_to_pids[edge].subsys_name, pilstr,
3620 SMD_MAX_CH_NAME_LEN);
3621
3622 return 0;
3623
3624missing_key:
3625 pr_err("%s: missing key: %s", __func__, key);
3626 return -ENODEV;
3627}
3628
3629static int __devinit parse_smsm_devicetree(struct device_node *node,
3630 void *irq_out_base)
3631{
3632 uint32_t edge;
3633 char *key;
3634 int ret;
3635 uint32_t irq_offset;
3636 uint32_t irq_bitmask;
3637 uint32_t irq_line;
3638 struct interrupt_config_item *private_irq;
3639
3640 key = "qcom,smsm-edge";
3641 ret = of_property_read_u32(node, key, &edge);
3642 if (ret)
3643 goto missing_key;
3644 SMD_DBG("%s: %s = %d", __func__, key, edge);
3645
3646 key = "qcom,smsm-irq-offset";
3647 ret = of_property_read_u32(node, key, &irq_offset);
3648 if (ret)
3649 goto missing_key;
3650 SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
3651
3652 key = "qcom,smsm-irq-bitmask";
3653 ret = of_property_read_u32(node, key, &irq_bitmask);
3654 if (ret)
3655 goto missing_key;
3656 SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
3657
3658 key = "interrupts";
3659 irq_line = irq_of_parse_and_map(node, 0);
3660 if (!irq_line)
3661 goto missing_key;
3662 SMD_DBG("%s: %s = %d", __func__, key, irq_line);
3663
3664 private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smsm;
3665 private_irq->out_bit_pos = irq_bitmask;
3666 private_irq->out_offset = irq_offset;
3667 private_irq->out_base = irq_out_base;
3668 private_irq->irq_id = irq_line;
3669
3670 ret = request_irq(irq_line,
3671 private_irq->irq_handler,
3672 IRQF_TRIGGER_RISING,
3673 "smsm_dev",
3674 NULL);
3675 if (ret < 0) {
3676 pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
3677 return ret;
3678 } else {
3679 ret = enable_irq_wake(irq_line);
3680 if (ret < 0)
3681 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
3682 irq_line);
3683 }
3684
3685 return 0;
3686
3687missing_key:
3688 pr_err("%s: missing key: %s", __func__, key);
3689 return -ENODEV;
3690}
3691
3692static void __devinit unparse_smd_devicetree(struct device_node *node)
3693{
3694 uint32_t irq_line;
3695
3696 irq_line = irq_of_parse_and_map(node, 0);
3697
3698 free_irq(irq_line, NULL);
3699}
3700
3701static void __devinit unparse_smsm_devicetree(struct device_node *node)
3702{
3703 uint32_t irq_line;
3704
3705 irq_line = irq_of_parse_and_map(node, 0);
3706
3707 free_irq(irq_line, NULL);
3708}
3709
3710static int __devinit smd_core_devicetree_init(struct platform_device *pdev)
3711{
3712 char *key;
3713 struct resource *r;
3714 void *irq_out_base;
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003715 phys_addr_t aux_mem_base;
3716 resource_size_t aux_mem_size;
Jeff Hugo412356e2012-09-27 17:14:23 -06003717 int temp_string_size = 11; /* max 3 digit count */
3718 char temp_string[temp_string_size];
3719 int count;
3720 struct device_node *node;
3721 int ret;
3722 const char *compatible;
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003723 struct ramdump_segment *ramdump_segments_tmp;
Jeff Hugo412356e2012-09-27 17:14:23 -06003724 int subnode_num = 0;
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003725 resource_size_t irq_out_size;
Jeff Hugo412356e2012-09-27 17:14:23 -06003726
3727 disable_smsm_reset_handshake = 1;
3728
3729 key = "irq-reg-base";
3730 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
3731 if (!r) {
3732 pr_err("%s: missing '%s'\n", __func__, key);
3733 return -ENODEV;
3734 }
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003735 irq_out_size = resource_size(r);
3736 irq_out_base = ioremap_nocache(r->start, irq_out_size);
3737 if (!irq_out_base) {
3738 pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
3739 __func__, &r->start, &irq_out_size);
3740 return -ENOMEM;
3741 }
Jeff Hugo412356e2012-09-27 17:14:23 -06003742 SMD_DBG("%s: %s = %p", __func__, key, irq_out_base);
3743
3744 count = 1;
3745 while (1) {
3746 scnprintf(temp_string, temp_string_size, "aux-mem%d", count);
3747 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3748 temp_string);
3749 if (!r)
3750 break;
3751
3752 ++num_smem_areas;
3753 ++count;
3754 if (count > 999) {
3755 pr_err("%s: max num aux mem regions reached\n",
3756 __func__);
3757 break;
3758 }
3759 }
3760
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003761 /* initialize SSR ramdump regions */
3762 key = "smem";
3763 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
3764 if (!r) {
3765 pr_err("%s: missing '%s'\n", __func__, key);
3766 return -ENODEV;
3767 }
3768 ramdump_segments_tmp = kmalloc_array(num_smem_areas + 1,
3769 sizeof(struct ramdump_segment), GFP_KERNEL);
3770
3771 if (!ramdump_segments_tmp) {
3772 pr_err("%s: ramdump segment kmalloc failed\n", __func__);
3773 ret = -ENOMEM;
3774 goto free_smem_areas;
3775 }
3776 ramdump_segments_tmp[0].address = r->start;
3777 ramdump_segments_tmp[0].size = resource_size(r);
3778
Jeff Hugo412356e2012-09-27 17:14:23 -06003779 if (num_smem_areas) {
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003780
Jeff Hugo412356e2012-09-27 17:14:23 -06003781 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3782 GFP_KERNEL);
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003783
Jeff Hugo412356e2012-09-27 17:14:23 -06003784 if (!smem_areas) {
3785 pr_err("%s: smem areas kmalloc failed\n", __func__);
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003786 ret = -ENOMEM;
3787 goto free_smem_areas;
Jeff Hugo412356e2012-09-27 17:14:23 -06003788 }
3789 count = 1;
3790 while (1) {
3791 scnprintf(temp_string, temp_string_size, "aux-mem%d",
3792 count);
3793 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3794 temp_string);
3795 if (!r)
3796 break;
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003797 aux_mem_base = r->start;
3798 aux_mem_size = resource_size(r);
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003799
3800 /*
3801 * Add to ram-dumps segments.
3802 * ramdump_segments_tmp[0] is the main SMEM region,
3803 * so auxiliary segments are indexed by count
3804 * instead of count - 1.
3805 */
3806 ramdump_segments_tmp[count].address = aux_mem_base;
3807 ramdump_segments_tmp[count].size = aux_mem_size;
3808
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003809 SMD_DBG("%s: %s = %pa %pa", __func__, temp_string,
3810 &aux_mem_base, &aux_mem_size);
Jeff Hugo412356e2012-09-27 17:14:23 -06003811 smem_areas[count - 1].phys_addr = aux_mem_base;
3812 smem_areas[count - 1].size = aux_mem_size;
3813 smem_areas[count - 1].virt_addr = ioremap_nocache(
3814 (unsigned long)(smem_areas[count-1].phys_addr),
3815 smem_areas[count - 1].size);
3816 if (!smem_areas[count - 1].virt_addr) {
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003817 pr_err("%s: ioremap_nocache() of addr:%pa size: %pa\n",
Jeff Hugo412356e2012-09-27 17:14:23 -06003818 __func__,
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003819 &smem_areas[count - 1].phys_addr,
3820 &smem_areas[count - 1].size);
Jeff Hugo412356e2012-09-27 17:14:23 -06003821 ret = -ENOMEM;
3822 goto free_smem_areas;
3823 }
3824
3825 ++count;
3826 if (count > 999) {
3827 pr_err("%s: max num aux mem regions reached\n",
3828 __func__);
3829 break;
3830 }
3831 }
3832 sort(smem_areas, num_smem_areas,
3833 sizeof(struct smem_area),
3834 sort_cmp_func, NULL);
3835 }
3836
3837 for_each_child_of_node(pdev->dev.of_node, node) {
3838 compatible = of_get_property(node, "compatible", NULL);
3839 if (!strcmp(compatible, "qcom,smd")) {
3840 ret = parse_smd_devicetree(node, irq_out_base);
3841 if (ret)
3842 goto rollback_subnodes;
3843 } else if (!strcmp(compatible, "qcom,smsm")) {
3844 ret = parse_smsm_devicetree(node, irq_out_base);
3845 if (ret)
3846 goto rollback_subnodes;
3847 } else {
3848 pr_err("%s: invalid child node named: %s\n", __func__,
3849 compatible);
3850 ret = -ENODEV;
3851 goto rollback_subnodes;
3852 }
3853 ++subnode_num;
3854 }
3855
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003856 smem_ramdump_segments = ramdump_segments_tmp;
Jeff Hugo412356e2012-09-27 17:14:23 -06003857 return 0;
3858
3859rollback_subnodes:
3860 count = 0;
3861 for_each_child_of_node(pdev->dev.of_node, node) {
3862 if (count >= subnode_num)
3863 break;
3864 ++count;
3865 compatible = of_get_property(node, "compatible", NULL);
3866 if (!strcmp(compatible, "qcom,smd"))
3867 unparse_smd_devicetree(node);
3868 else
3869 unparse_smsm_devicetree(node);
3870 }
3871free_smem_areas:
3872 num_smem_areas = 0;
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003873 kfree(ramdump_segments_tmp);
Jeff Hugo412356e2012-09-27 17:14:23 -06003874 kfree(smem_areas);
3875 smem_areas = NULL;
3876 return ret;
3877}
3878
Gregory Bean4416e9e2010-07-28 10:22:12 -07003879static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003880{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303881 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003882
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303883 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003884 INIT_WORK(&probe_work, smd_channel_probe_worker);
3885
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003886 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3887 if (IS_ERR(channel_close_wq)) {
3888 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3889 return -ENOMEM;
3890 }
3891
3892 if (smsm_init()) {
3893 pr_err("smsm_init() failed\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003894 return -1;
3895 }
3896
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303897 if (pdev) {
3898 if (pdev->dev.of_node) {
Jeff Hugo412356e2012-09-27 17:14:23 -06003899 ret = smd_core_devicetree_init(pdev);
3900 if (ret) {
3901 pr_err("%s: device tree init failed\n",
3902 __func__);
3903 return ret;
3904 }
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003905 smd_dev = &pdev->dev;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303906 } else if (pdev->dev.platform_data) {
3907 ret = smd_core_platform_init(pdev);
3908 if (ret) {
3909 pr_err(
3910 "SMD: smd_core_platform_init() failed\n");
3911 return -ENODEV;
3912 }
3913 } else {
3914 ret = smd_core_init();
3915 if (ret) {
3916 pr_err("smd_core_init() failed\n");
3917 return -ENODEV;
3918 }
3919 }
3920 } else {
3921 pr_err("SMD: PDEV not found\n");
3922 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003923 }
Iliyan Malchev1207bab2009-11-15 18:16:43 -08003924
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003925 smd_initialized = 1;
3926
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003927 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003928 smsm_irq_handler(0, 0);
3929 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003930
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003931 return 0;
3932}
3933
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003934static int restart_notifier_cb(struct notifier_block *this,
3935 unsigned long code,
3936 void *data);
3937
3938static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003939 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3940 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
Sameer Thalappil52381142012-10-04 17:22:24 -07003941 {SMD_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
Eric Holmbergca7ead22011-12-01 17:21:15 -07003942 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003943 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Jeff Hugod3cf6ec2012-09-26 15:30:10 -06003944 {SMD_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003945};
3946
3947static int restart_notifier_cb(struct notifier_block *this,
3948 unsigned long code,
3949 void *data)
3950{
Jeff Hugo73f356f2012-12-14 17:56:19 -07003951 /*
3952 * Some SMD or SMSM clients assume SMD/SMSM SSR handling will be
3953 * done in the AFTER_SHUTDOWN level. If this ever changes, extra
3954 * care should be taken to verify no clients are broken.
3955 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003956 if (code == SUBSYS_AFTER_SHUTDOWN) {
3957 struct restart_notifier_block *notifier;
3958
3959 notifier = container_of(this,
3960 struct restart_notifier_block, nb);
3961 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3962 __func__, notifier->processor,
3963 notifier->name);
3964
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003965 remote_spin_release(&remote_spinlock, notifier->processor);
3966 remote_spin_release_all(notifier->processor);
3967
3968 if (smem_ramdump_dev) {
3969 int ret;
3970
3971 SMD_INFO("%s: saving ramdump\n", __func__);
3972 /*
3973 * XPU protection does not currently allow the
3974 * auxiliary memory regions to be dumped. If this
3975 * changes, then num_smem_areas + 1 should be passed
3976 * into do_elf_ramdump() to dump all regions.
3977 */
3978 ret = do_elf_ramdump(smem_ramdump_dev,
3979 smem_ramdump_segments, 1);
3980 if (ret < 0)
3981 pr_err("%s: unable to dump smem %d\n", __func__,
3982 ret);
3983 }
3984
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003985 smd_channel_reset(notifier->processor);
3986 }
3987
3988 return NOTIFY_DONE;
3989}
3990
3991static __init int modem_restart_late_init(void)
3992{
3993 int i;
3994 void *handle;
3995 struct restart_notifier_block *nb;
3996
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003997 smem_ramdump_dev = create_ramdump_device("smem-smd", smd_dev);
3998 if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
3999 pr_err("%s: Unable to create smem ramdump device.\n",
4000 __func__);
4001 smem_ramdump_dev = NULL;
4002 }
4003
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004004 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
4005 nb = &restart_notifiers[i];
4006 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
4007 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
4008 __func__, nb->name, handle);
4009 }
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06004010
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004011 return 0;
4012}
4013late_initcall(modem_restart_late_init);
4014
Jeff Hugo412356e2012-09-27 17:14:23 -06004015static struct of_device_id msm_smem_match_table[] = {
4016 { .compatible = "qcom,smem" },
4017 {},
4018};
4019
Brian Swetland2eb44eb2008-09-29 16:00:48 -07004020static struct platform_driver msm_smd_driver = {
4021 .probe = msm_smd_probe,
4022 .driver = {
4023 .name = MODULE_NAME,
4024 .owner = THIS_MODULE,
Jeff Hugo412356e2012-09-27 17:14:23 -06004025 .of_match_table = msm_smem_match_table,
Brian Swetland2eb44eb2008-09-29 16:00:48 -07004026 },
4027};
4028
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06004029int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07004030{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06004031 static bool registered;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06004032 int rc;
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06004033
4034 if (registered)
4035 return 0;
4036
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05304037 smd_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smd");
4038 if (!smd_log_ctx) {
4039 pr_err("%s: unable to create logging context\n", __func__);
4040 msm_smd_debug_mask = 0;
4041 }
4042
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06004043 registered = true;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06004044 rc = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
4045 if (rc) {
4046 pr_err("%s: remote spinlock init failed %d\n", __func__, rc);
4047 return rc;
4048 }
4049 spinlocks_initialized = 1;
4050
4051 rc = platform_driver_register(&msm_smd_driver);
4052 if (rc) {
4053 pr_err("%s: msm_smd_driver register failed %d\n",
4054 __func__, rc);
4055 return rc;
4056 }
4057
4058 smd_module_init_notify(0, NULL);
4059
4060 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07004061}
4062
4063module_init(msm_smd_init);
4064
4065MODULE_DESCRIPTION("MSM Shared Memory Core");
4066MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
4067MODULE_LICENSE("GPL");