blob: 07ac9304d02b2198d8864816856992bda2486446 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmberg6275b602012-11-19 13:05:04 -07004 * Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f9412012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Jeff Hugobdc734d2012-03-26 16:05:39 -060037#include <linux/sort.h>
Eric Holmberg144c2de2012-10-04 13:37:28 -060038#include <linux/suspend.h>
Jeff Hugo412356e2012-09-27 17:14:23 -060039#include <linux/of.h>
40#include <linux/of_irq.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070041
Brian Swetland2eb44eb2008-09-29 16:00:48 -070042#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070044#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053046#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070047#include <mach/proc_comm.h>
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +053048#include <mach/msm_ipc_logging.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070049#include <mach/ramdump.h>
50
Ram Somani8b9589f2012-04-03 12:07:18 +053051#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070052
53#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054#include "modem_notifier.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM8X60) \
Jeff Hugo56b933a2011-09-28 14:42:05 -060057 || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_FSM9XXX) \
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060058 || defined(CONFIG_ARCH_MSM9615) || defined(CONFIG_ARCH_APQ8064)
Brian Swetland37521a32009-07-01 18:30:47 -070059#define CONFIG_QDSP6 1
60#endif
61
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060062#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) \
63 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064#define CONFIG_DSPS 1
65#endif
66
Jeff Hugo0c0f5e92011-09-28 13:55:45 -060067#if defined(CONFIG_ARCH_MSM8960) \
68 || defined(CONFIG_ARCH_APQ8064)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069#define CONFIG_WCNSS 1
Jeff Hugo6a8057c2011-08-16 13:47:12 -060070#define CONFIG_DSPS_SMSM 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -070072
73#define MODULE_NAME "msm_smd"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074#define SMEM_VERSION 0x000B
75#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070076#define SMSM_SNAPSHOT_CNT 64
Eric Holmbergda31d042012-03-28 14:01:02 -060077#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4)
Eric Holmberge5266d32013-02-25 18:29:27 -070078#define RSPIN_INIT_WAIT_MS 1000
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079
80uint32_t SMSM_NUM_ENTRIES = 8;
81uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070082
Eric Holmberge8a39322012-04-03 15:14:02 -060083/* Legacy SMSM interrupt notifications */
84#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
85 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070086
87enum {
88 MSM_SMD_DEBUG = 1U << 0,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070089 MSM_SMSM_DEBUG = 1U << 1,
90 MSM_SMD_INFO = 1U << 2,
91 MSM_SMSM_INFO = 1U << 3,
Eric Holmberg98c6c642012-02-24 11:29:35 -070092 MSM_SMx_POWER_INFO = 1U << 4,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093};
94
95struct smsm_shared_info {
96 uint32_t *state;
97 uint32_t *intr_mask;
98 uint32_t *intr_mux;
99};
100
101static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f9412012-03-19 10:04:22 -0600102static struct kfifo smsm_snapshot_fifo;
103static struct wake_lock smsm_snapshot_wakelock;
104static int smsm_snapshot_count;
105static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106
107struct smsm_size_info_type {
108 uint32_t num_hosts;
109 uint32_t num_entries;
110 uint32_t reserved0;
111 uint32_t reserved1;
112};
113
114struct smsm_state_cb_info {
115 struct list_head cb_list;
116 uint32_t mask;
117 void *data;
118 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
119};
120
121struct smsm_state_info {
122 struct list_head callbacks;
123 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600124 uint32_t intr_mask_set;
125 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126};
127
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530128struct interrupt_config_item {
129 /* must be initialized */
130 irqreturn_t (*irq_handler)(int req, void *data);
131 /* outgoing interrupt config (set from platform data) */
132 uint32_t out_bit_pos;
133 void __iomem *out_base;
134 uint32_t out_offset;
Eric Holmbergdeace152012-07-25 12:17:11 -0600135 int irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530136};
137
138struct interrupt_config {
139 struct interrupt_config_item smd;
140 struct interrupt_config_item smsm;
141};
142
143static irqreturn_t smd_modem_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700144static irqreturn_t smsm_modem_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530145static irqreturn_t smd_dsp_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700146static irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530147static irqreturn_t smd_dsps_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700148static irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530149static irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
Eric Holmberg98c6c642012-02-24 11:29:35 -0700150static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600151static irqreturn_t smd_rpm_irq_handler(int irq, void *data);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530152static irqreturn_t smsm_irq_handler(int irq, void *data);
153
154static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
155 [SMD_MODEM] = {
156 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700157 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530158 },
159 [SMD_Q6] = {
160 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700161 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530162 },
163 [SMD_DSPS] = {
164 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700165 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530166 },
167 [SMD_WCNSS] = {
168 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700169 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530170 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600171 [SMD_RPM] = {
172 .smd.irq_handler = smd_rpm_irq_handler,
173 .smsm.irq_handler = NULL, /* does not support smsm */
174 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530175};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600176
177struct smem_area {
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -0800178 phys_addr_t phys_addr;
179 resource_size_t size;
Jeff Hugobdc734d2012-03-26 16:05:39 -0600180 void __iomem *virt_addr;
181};
182static uint32_t num_smem_areas;
183static struct smem_area *smem_areas;
Eric Holmbergcfbc1d52013-03-13 18:30:19 -0600184static struct ramdump_segment *smem_ramdump_segments;
185static void *smem_ramdump_dev;
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -0800186static void *smem_range_check(phys_addr_t base, unsigned offset);
Eric Holmbergcfbc1d52013-03-13 18:30:19 -0600187static void *smd_dev;
Jeff Hugobdc734d2012-03-26 16:05:39 -0600188
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700189struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530190
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
192#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
193 entry * SMSM_NUM_HOSTS + host)
194#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
195
196/* Internal definitions which are not exported in some targets */
197enum {
198 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700199};
200
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530201static int msm_smd_debug_mask = MSM_SMx_POWER_INFO;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700202module_param_named(debug_mask, msm_smd_debug_mask,
203 int, S_IRUGO | S_IWUSR | S_IWGRP);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530204static void *smd_log_ctx;
205#define NUM_LOG_PAGES 4
206
207#define IPC_LOG(level, x...) do { \
208 if (smd_log_ctx) \
209 ipc_log_string(smd_log_ctx, x); \
210 else \
211 printk(level x); \
212 } while (0)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700213
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214#if defined(CONFIG_MSM_SMD_DEBUG)
215#define SMD_DBG(x...) do { \
216 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530217 IPC_LOG(KERN_DEBUG, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218 } while (0)
219
220#define SMSM_DBG(x...) do { \
221 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530222 IPC_LOG(KERN_DEBUG, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223 } while (0)
224
225#define SMD_INFO(x...) do { \
226 if (msm_smd_debug_mask & MSM_SMD_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530227 IPC_LOG(KERN_INFO, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228 } while (0)
229
230#define SMSM_INFO(x...) do { \
231 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530232 IPC_LOG(KERN_INFO, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233 } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700234#define SMx_POWER_INFO(x...) do { \
235 if (msm_smd_debug_mask & MSM_SMx_POWER_INFO) \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530236 IPC_LOG(KERN_INFO, x); \
Eric Holmberg98c6c642012-02-24 11:29:35 -0700237 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238#else
239#define SMD_DBG(x...) do { } while (0)
240#define SMSM_DBG(x...) do { } while (0)
241#define SMD_INFO(x...) do { } while (0)
242#define SMSM_INFO(x...) do { } while (0)
Eric Holmberg98c6c642012-02-24 11:29:35 -0700243#define SMx_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244#endif
245
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700246static unsigned last_heap_free = 0xffffffff;
247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248static inline void smd_write_intr(unsigned int val,
249 const void __iomem *addr);
250
251#if defined(CONFIG_ARCH_MSM7X30)
252#define MSM_TRIG_A2M_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530253 (smd_write_intr(1 << 0, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254#define MSM_TRIG_A2Q6_SMD_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530255 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256#define MSM_TRIG_A2M_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530257 (smd_write_intr(1 << 5, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258#define MSM_TRIG_A2Q6_SMSM_INT \
Taniya Das298de8c2012-02-16 11:45:31 +0530259 (smd_write_intr(1 << 8, MSM_APCS_GCC_BASE + 0x8))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600261#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262#define MSM_TRIG_A2WCNSS_SMD_INT
263#define MSM_TRIG_A2WCNSS_SMSM_INT
264#elif defined(CONFIG_ARCH_MSM8X60)
265#define MSM_TRIG_A2M_SMD_INT \
266 (smd_write_intr(1 << 3, MSM_GCC_BASE + 0x8))
267#define MSM_TRIG_A2Q6_SMD_INT \
268 (smd_write_intr(1 << 15, MSM_GCC_BASE + 0x8))
269#define MSM_TRIG_A2M_SMSM_INT \
270 (smd_write_intr(1 << 4, MSM_GCC_BASE + 0x8))
271#define MSM_TRIG_A2Q6_SMSM_INT \
272 (smd_write_intr(1 << 14, MSM_GCC_BASE + 0x8))
273#define MSM_TRIG_A2DSPS_SMD_INT \
274 (smd_write_intr(1, MSM_SIC_NON_SECURE_BASE + 0x4080))
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600275#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276#define MSM_TRIG_A2WCNSS_SMD_INT
277#define MSM_TRIG_A2WCNSS_SMSM_INT
Jeff Hugo56b933a2011-09-28 14:42:05 -0600278#elif defined(CONFIG_ARCH_MSM9615)
279#define MSM_TRIG_A2M_SMD_INT \
280 (smd_write_intr(1 << 3, MSM_APCS_GCC_BASE + 0x8))
281#define MSM_TRIG_A2Q6_SMD_INT \
282 (smd_write_intr(1 << 15, MSM_APCS_GCC_BASE + 0x8))
283#define MSM_TRIG_A2M_SMSM_INT \
284 (smd_write_intr(1 << 4, MSM_APCS_GCC_BASE + 0x8))
285#define MSM_TRIG_A2Q6_SMSM_INT \
286 (smd_write_intr(1 << 14, MSM_APCS_GCC_BASE + 0x8))
287#define MSM_TRIG_A2DSPS_SMD_INT
288#define MSM_TRIG_A2DSPS_SMSM_INT
289#define MSM_TRIG_A2WCNSS_SMD_INT
290#define MSM_TRIG_A2WCNSS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291#elif defined(CONFIG_ARCH_FSM9XXX)
292#define MSM_TRIG_A2Q6_SMD_INT \
293 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
294#define MSM_TRIG_A2Q6_SMSM_INT \
295 (smd_write_intr(1 << 10, MSM_GCC_BASE + 0x8))
296#define MSM_TRIG_A2M_SMD_INT \
297 (smd_write_intr(1 << 0, MSM_GCC_BASE + 0x8))
298#define MSM_TRIG_A2M_SMSM_INT \
299 (smd_write_intr(1 << 5, MSM_GCC_BASE + 0x8))
300#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600301#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302#define MSM_TRIG_A2WCNSS_SMD_INT
303#define MSM_TRIG_A2WCNSS_SMSM_INT
Eric Holmberg73d45462012-02-28 10:41:31 -0700304#elif defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7x25)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305#define MSM_TRIG_A2M_SMD_INT \
306 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700307#define MSM_TRIG_A2Q6_SMD_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308#define MSM_TRIG_A2M_SMSM_INT \
309 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
Eric Holmberg73d45462012-02-28 10:41:31 -0700310#define MSM_TRIG_A2Q6_SMSM_INT
311#define MSM_TRIG_A2DSPS_SMD_INT
312#define MSM_TRIG_A2DSPS_SMSM_INT
313#define MSM_TRIG_A2WCNSS_SMD_INT
314#define MSM_TRIG_A2WCNSS_SMSM_INT
315#elif defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM7X27A)
316#define MSM_TRIG_A2M_SMD_INT \
317 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (0) * 4))
318#define MSM_TRIG_A2Q6_SMD_INT
319#define MSM_TRIG_A2M_SMSM_INT \
320 (smd_write_intr(1, MSM_CSR_BASE + 0x400 + (5) * 4))
321#define MSM_TRIG_A2Q6_SMSM_INT
322#define MSM_TRIG_A2DSPS_SMD_INT
323#define MSM_TRIG_A2DSPS_SMSM_INT
324#define MSM_TRIG_A2WCNSS_SMD_INT
325#define MSM_TRIG_A2WCNSS_SMSM_INT
326#else /* use platform device / device tree configuration */
327#define MSM_TRIG_A2M_SMD_INT
328#define MSM_TRIG_A2Q6_SMD_INT
329#define MSM_TRIG_A2M_SMSM_INT
330#define MSM_TRIG_A2Q6_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331#define MSM_TRIG_A2DSPS_SMD_INT
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600332#define MSM_TRIG_A2DSPS_SMSM_INT
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700333#define MSM_TRIG_A2WCNSS_SMD_INT
334#define MSM_TRIG_A2WCNSS_SMSM_INT
Brian Swetland37521a32009-07-01 18:30:47 -0700335#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336
Jeff Hugoee40b152012-02-09 17:39:47 -0700337/*
338 * stub out legacy macros if they are not being used so that the legacy
339 * code compiles even though it is not used
340 *
341 * these definitions should not be used in active code and will cause
342 * an early failure
343 */
344#ifndef INT_A9_M2A_0
345#define INT_A9_M2A_0 -1
346#endif
347#ifndef INT_A9_M2A_5
348#define INT_A9_M2A_5 -1
349#endif
350#ifndef INT_ADSP_A11
351#define INT_ADSP_A11 -1
352#endif
353#ifndef INT_ADSP_A11_SMSM
354#define INT_ADSP_A11_SMSM -1
355#endif
356#ifndef INT_DSPS_A11
357#define INT_DSPS_A11 -1
358#endif
359#ifndef INT_DSPS_A11_SMSM
360#define INT_DSPS_A11_SMSM -1
361#endif
362#ifndef INT_WCNSS_A11
363#define INT_WCNSS_A11 -1
364#endif
365#ifndef INT_WCNSS_A11_SMSM
366#define INT_WCNSS_A11_SMSM -1
367#endif
368
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369#define SMD_LOOPBACK_CID 100
370
Eric Holmbergf6d7d1a2011-09-23 18:31:04 -0600371#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
372static remote_spinlock_t remote_spinlock;
373
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600376static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600378static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379static void notify_smsm_cb_clients_worker(struct work_struct *work);
380static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600381static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382static struct smsm_state_info *smsm_states;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +0530383static int spinlocks_initialized;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -0600384
385/**
386 * Variables to indicate smd module initialization.
387 * Dependents to register for smd module init notifier.
388 */
389static int smd_module_inited;
390static RAW_NOTIFIER_HEAD(smd_module_init_notifier_list);
391static DEFINE_MUTEX(smd_module_init_notifier_lock);
392static void smd_module_init_notify(uint32_t state, void *data);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530393static int smd_stream_write_avail(struct smd_channel *ch);
394static int smd_stream_read_avail(struct smd_channel *ch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395
396static inline void smd_write_intr(unsigned int val,
397 const void __iomem *addr)
398{
399 wmb();
400 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700401}
402
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530403static inline void log_notify(uint32_t subsystem, smd_channel_t *ch)
404{
405 const char *subsys = smd_edge_to_subsystem(subsystem);
406
Jay Chokshi83b4f6132013-02-14 16:20:56 -0800407 (void) subsys;
408
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530409 if (!ch)
410 SMx_POWER_INFO("Apps->%s\n", subsys);
411 else
412 SMx_POWER_INFO(
413 "Apps->%s ch%d '%s': tx%d/rx%d %dr/%dw : %dr/%dw\n",
414 subsys, ch->n, ch->name,
415 ch->fifo_size -
416 (smd_stream_write_avail(ch) + 1),
417 smd_stream_read_avail(ch),
418 ch->half_ch->get_tail(ch->send),
419 ch->half_ch->get_head(ch->send),
420 ch->half_ch->get_tail(ch->recv),
421 ch->half_ch->get_head(ch->recv)
422 );
423}
424
425static inline void notify_modem_smd(smd_channel_t *ch)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700426{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530427 static const struct interrupt_config_item *intr
428 = &private_intr_config[SMD_MODEM].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530429
430 log_notify(SMD_APPS_MODEM, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700431 if (intr->out_base) {
432 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530433 smd_write_intr(intr->out_bit_pos,
434 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700435 } else {
436 ++interrupt_stats[SMD_MODEM].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530437 MSM_TRIG_A2M_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700438 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700439}
440
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530441static inline void notify_dsp_smd(smd_channel_t *ch)
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700442{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530443 static const struct interrupt_config_item *intr
444 = &private_intr_config[SMD_Q6].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530445
446 log_notify(SMD_APPS_QDSP, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700447 if (intr->out_base) {
448 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530449 smd_write_intr(intr->out_bit_pos,
450 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700451 } else {
452 ++interrupt_stats[SMD_Q6].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530453 MSM_TRIG_A2Q6_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700454 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700455}
456
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530457static inline void notify_dsps_smd(smd_channel_t *ch)
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530458{
459 static const struct interrupt_config_item *intr
460 = &private_intr_config[SMD_DSPS].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530461
462 log_notify(SMD_APPS_DSPS, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700463 if (intr->out_base) {
464 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530465 smd_write_intr(intr->out_bit_pos,
466 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700467 } else {
468 ++interrupt_stats[SMD_DSPS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530469 MSM_TRIG_A2DSPS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700470 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530471}
472
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530473static inline void notify_wcnss_smd(struct smd_channel *ch)
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530474{
475 static const struct interrupt_config_item *intr
476 = &private_intr_config[SMD_WCNSS].smd;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530477
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530478 log_notify(SMD_APPS_WCNSS, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700479 if (intr->out_base) {
480 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530481 smd_write_intr(intr->out_bit_pos,
482 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700483 } else {
484 ++interrupt_stats[SMD_WCNSS].smd_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530485 MSM_TRIG_A2WCNSS_SMD_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700486 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530487}
488
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530489static inline void notify_rpm_smd(smd_channel_t *ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600490{
491 static const struct interrupt_config_item *intr
492 = &private_intr_config[SMD_RPM].smd;
493
494 if (intr->out_base) {
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530495 log_notify(SMD_APPS_RPM, ch);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600496 ++interrupt_stats[SMD_RPM].smd_out_config_count;
497 smd_write_intr(intr->out_bit_pos,
498 intr->out_base + intr->out_offset);
499 }
500}
501
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530502static inline void notify_modem_smsm(void)
503{
504 static const struct interrupt_config_item *intr
505 = &private_intr_config[SMD_MODEM].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700506 if (intr->out_base) {
507 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530508 smd_write_intr(intr->out_bit_pos,
509 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700510 } else {
511 ++interrupt_stats[SMD_MODEM].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530512 MSM_TRIG_A2M_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700513 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530514}
515
516static inline void notify_dsp_smsm(void)
517{
518 static const struct interrupt_config_item *intr
519 = &private_intr_config[SMD_Q6].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700520 if (intr->out_base) {
521 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530522 smd_write_intr(intr->out_bit_pos,
523 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700524 } else {
525 ++interrupt_stats[SMD_Q6].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530526 MSM_TRIG_A2Q6_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700527 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530528}
529
530static inline void notify_dsps_smsm(void)
531{
532 static const struct interrupt_config_item *intr
533 = &private_intr_config[SMD_DSPS].smsm;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700534 if (intr->out_base) {
535 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530536 smd_write_intr(intr->out_bit_pos,
537 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700538 } else {
539 ++interrupt_stats[SMD_DSPS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530540 MSM_TRIG_A2DSPS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700541 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530542}
543
544static inline void notify_wcnss_smsm(void)
545{
546 static const struct interrupt_config_item *intr
547 = &private_intr_config[SMD_WCNSS].smsm;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530548
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700549 if (intr->out_base) {
550 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530551 smd_write_intr(intr->out_bit_pos,
552 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700553 } else {
554 ++interrupt_stats[SMD_WCNSS].smsm_out_hardcode_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530555 MSM_TRIG_A2WCNSS_SMSM_INT;
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700556 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530557}
558
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
560{
561 /* older protocol don't use smsm_intr_mask,
562 but still communicates with modem */
563 if (!smsm_info.intr_mask ||
564 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
565 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530566 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700567
568 if (smsm_info.intr_mask &&
569 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
570 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 uint32_t mux_val;
572
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600573 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 mux_val = __raw_readl(
575 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
576 mux_val++;
577 __raw_writel(mux_val,
578 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
579 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530580 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 }
582
583 if (smsm_info.intr_mask &&
584 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
585 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530586 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 }
588
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600589 if (smsm_info.intr_mask &&
590 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
591 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530592 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600593 }
594
Eric Holmbergda31d042012-03-28 14:01:02 -0600595 /*
596 * Notify local SMSM callback clients without wakelock since this
597 * code is used by power management during power-down/-up sequencing
598 * on DEM-based targets. Grabbing a wakelock in this case will
599 * abort the power-down sequencing.
600 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600601 if (smsm_info.intr_mask &&
602 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
603 & notify_mask)) {
604 smsm_cb_snapshot(0);
605 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700606}
607
Eric Holmberg144c2de2012-10-04 13:37:28 -0600608static int smsm_pm_notifier(struct notifier_block *nb,
609 unsigned long event, void *unused)
610{
611 switch (event) {
612 case PM_SUSPEND_PREPARE:
613 smsm_change_state(SMSM_APPS_STATE, SMSM_PROC_AWAKE, 0);
614 break;
615
616 case PM_POST_SUSPEND:
617 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_PROC_AWAKE);
618 break;
619 }
620 return NOTIFY_DONE;
621}
622
623static struct notifier_block smsm_pm_nb = {
624 .notifier_call = smsm_pm_notifier,
625 .priority = 0,
626};
627
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700629{
630 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700632
633 x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
634 if (x != 0) {
635 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636 SMD_INFO("smem: DIAG '%s'\n", x);
637 }
638
639 x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
640 if (x != 0) {
641 x[size - 1] = 0;
642 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700643 }
644}
645
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700647static void handle_modem_crash(void)
648{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700650 smd_diag();
651
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652 /* hard reboot if possible FIXME
653 if (msm_reset_hook)
654 msm_reset_hook();
655 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700656
657 /* in this case the modem or watchdog should reboot us */
658 for (;;)
659 ;
660}
661
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700662int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700663{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664 /* if the modem's not ready yet, we have to hope for the best */
665 if (!smsm_info.state)
666 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700667
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700669 handle_modem_crash();
670 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700671 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700672 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700673}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700675
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700676/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700677 * irq handler and code that mutates the channel
678 * list or fiddles with channel state
679 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700681DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700682
683/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700684 * operations to avoid races while creating or
685 * destroying smd_channel structures
686 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700687static DEFINE_MUTEX(smd_creation_mutex);
688
689static int smd_initialized;
690
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691struct smd_shared_v1 {
692 struct smd_half_channel ch0;
693 unsigned char data0[SMD_BUF_SIZE];
694 struct smd_half_channel ch1;
695 unsigned char data1[SMD_BUF_SIZE];
696};
697
698struct smd_shared_v2 {
699 struct smd_half_channel ch0;
700 struct smd_half_channel ch1;
701};
702
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600703struct smd_shared_v2_word_access {
704 struct smd_half_channel_word_access ch0;
705 struct smd_half_channel_word_access ch1;
706};
707
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700708struct edge_to_pid {
709 uint32_t local_pid;
710 uint32_t remote_pid;
Eric Holmberg17992c12012-02-29 12:54:44 -0700711 char subsys_name[SMD_MAX_CH_NAME_LEN];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712};
713
714/**
715 * Maps edge type to local and remote processor ID's.
716 */
717static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700718 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
Stephen Boyd77db8bb2012-06-27 15:15:16 -0700719 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "adsp"},
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700720 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
721 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
722 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
723 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
724 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
725 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
726 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
727 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
728 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
729 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
730 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
731 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
732 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600733 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
734 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
735 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
736 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700737};
738
739struct restart_notifier_block {
740 unsigned processor;
741 char *name;
742 struct notifier_block nb;
743};
744
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -0600745static int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
747
748static LIST_HEAD(smd_ch_closed_list);
749static LIST_HEAD(smd_ch_closing_list);
750static LIST_HEAD(smd_ch_to_close_list);
751static LIST_HEAD(smd_ch_list_modem);
752static LIST_HEAD(smd_ch_list_dsp);
753static LIST_HEAD(smd_ch_list_dsps);
754static LIST_HEAD(smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600755static LIST_HEAD(smd_ch_list_rpm);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700756
757static unsigned char smd_ch_allocated[64];
758static struct work_struct probe_work;
759
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700760static void finalize_channel_close_fn(struct work_struct *work);
761static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
762static struct workqueue_struct *channel_close_wq;
763
764static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
765
766/* on smp systems, the probe might get called from multiple cores,
767 hence use a lock */
768static DEFINE_MUTEX(smd_probe_lock);
769
770static void smd_channel_probe_worker(struct work_struct *work)
771{
772 struct smd_alloc_elm *shared;
773 unsigned n;
774 uint32_t type;
775
776 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
777
778 if (!shared) {
779 pr_err("%s: allocation table not initialized\n", __func__);
780 return;
781 }
782
783 mutex_lock(&smd_probe_lock);
784 for (n = 0; n < 64; n++) {
785 if (smd_ch_allocated[n])
786 continue;
787
788 /* channel should be allocated only if APPS
789 processor is involved */
790 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600791 if (type >= ARRAY_SIZE(edge_to_pids) ||
792 edge_to_pids[type].local_pid != SMD_APPS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793 continue;
794 if (!shared[n].ref_count)
795 continue;
796 if (!shared[n].name[0])
797 continue;
798
799 if (!smd_alloc_channel(&shared[n]))
800 smd_ch_allocated[n] = 1;
801 else
802 SMD_INFO("Probe skipping ch %d, not allocated\n", n);
803 }
804 mutex_unlock(&smd_probe_lock);
805}
806
807/**
808 * Lookup processor ID and determine if it belongs to the proved edge
809 * type.
810 *
811 * @shared2: Pointer to v2 shared channel structure
812 * @type: Edge type
813 * @pid: Processor ID of processor on edge
814 * @local_ch: Channel that belongs to processor @pid
815 * @remote_ch: Other side of edge contained @pid
Jeff Hugo00be6282012-09-07 11:24:32 -0600816 * @is_word_access_ch: Bool, is this a word aligned access channel
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700817 *
818 * Returns 0 for not on edge, 1 for found on edge
819 */
Jeff Hugo00be6282012-09-07 11:24:32 -0600820static int pid_is_on_edge(void *shared2,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821 uint32_t type, uint32_t pid,
Jeff Hugo00be6282012-09-07 11:24:32 -0600822 void **local_ch,
823 void **remote_ch,
824 int is_word_access_ch
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825 )
826{
827 int ret = 0;
828 struct edge_to_pid *edge;
Jeff Hugo00be6282012-09-07 11:24:32 -0600829 void *ch0;
830 void *ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831
832 *local_ch = 0;
833 *remote_ch = 0;
834
835 if (!shared2 || (type >= ARRAY_SIZE(edge_to_pids)))
836 return 0;
837
Jeff Hugo00be6282012-09-07 11:24:32 -0600838 if (is_word_access_ch) {
839 ch0 = &((struct smd_shared_v2_word_access *)(shared2))->ch0;
840 ch1 = &((struct smd_shared_v2_word_access *)(shared2))->ch1;
841 } else {
842 ch0 = &((struct smd_shared_v2 *)(shared2))->ch0;
843 ch1 = &((struct smd_shared_v2 *)(shared2))->ch1;
844 }
845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 edge = &edge_to_pids[type];
847 if (edge->local_pid != edge->remote_pid) {
848 if (pid == edge->local_pid) {
Jeff Hugo00be6282012-09-07 11:24:32 -0600849 *local_ch = ch0;
850 *remote_ch = ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 ret = 1;
852 } else if (pid == edge->remote_pid) {
Jeff Hugo00be6282012-09-07 11:24:32 -0600853 *local_ch = ch1;
854 *remote_ch = ch0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855 ret = 1;
856 }
857 }
858
859 return ret;
860}
861
Eric Holmberg17992c12012-02-29 12:54:44 -0700862/*
863 * Returns a pointer to the subsystem name or NULL if no
864 * subsystem name is available.
865 *
866 * @type - Edge definition
867 */
868const char *smd_edge_to_subsystem(uint32_t type)
869{
870 const char *subsys = NULL;
871
872 if (type < ARRAY_SIZE(edge_to_pids)) {
873 subsys = edge_to_pids[type].subsys_name;
874 if (subsys[0] == 0x0)
875 subsys = NULL;
876 }
877 return subsys;
878}
879EXPORT_SYMBOL(smd_edge_to_subsystem);
880
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700881/*
882 * Returns a pointer to the subsystem name given the
883 * remote processor ID.
Arun Kumar Neelakantama35c7a72012-10-18 15:45:15 +0530884 * subsystem is not necessarily PIL-loadable
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700885 *
886 * @pid Remote processor ID
887 * @returns Pointer to subsystem name or NULL if not found
888 */
889const char *smd_pid_to_subsystem(uint32_t pid)
890{
891 const char *subsys = NULL;
892 int i;
893
894 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
Arun Kumar Neelakantama35c7a72012-10-18 15:45:15 +0530895 if (pid == edge_to_pids[i].remote_pid) {
896 if (edge_to_pids[i].subsys_name[0] != 0x0) {
897 subsys = edge_to_pids[i].subsys_name;
898 break;
899 } else if (pid == SMD_RPM) {
900 subsys = "rpm";
901 break;
902 }
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700903 }
904 }
905
906 return subsys;
907}
908EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700909
Jeff Hugo00be6282012-09-07 11:24:32 -0600910static void smd_reset_edge(void *void_ch, unsigned new_state,
911 int is_word_access_ch)
Eric Holmberg2a563c32011-10-05 14:51:43 -0600912{
Jeff Hugo00be6282012-09-07 11:24:32 -0600913 if (is_word_access_ch) {
914 struct smd_half_channel_word_access *ch =
915 (struct smd_half_channel_word_access *)(void_ch);
916 if (ch->state != SMD_SS_CLOSED) {
917 ch->state = new_state;
918 ch->fDSR = 0;
919 ch->fCTS = 0;
920 ch->fCD = 0;
921 ch->fSTATE = 1;
922 }
923 } else {
924 struct smd_half_channel *ch =
925 (struct smd_half_channel *)(void_ch);
926 if (ch->state != SMD_SS_CLOSED) {
927 ch->state = new_state;
928 ch->fDSR = 0;
929 ch->fCTS = 0;
930 ch->fCD = 0;
931 ch->fSTATE = 1;
932 }
Eric Holmberg2a563c32011-10-05 14:51:43 -0600933 }
934}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700935
936static void smd_channel_reset_state(struct smd_alloc_elm *shared,
937 unsigned new_state, unsigned pid)
938{
939 unsigned n;
Jeff Hugo00be6282012-09-07 11:24:32 -0600940 void *shared2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700941 uint32_t type;
Jeff Hugo00be6282012-09-07 11:24:32 -0600942 void *local_ch;
943 void *remote_ch;
944 int is_word_access;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945
946 for (n = 0; n < SMD_CHANNELS; n++) {
947 if (!shared[n].ref_count)
948 continue;
949 if (!shared[n].name[0])
950 continue;
951
952 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo00be6282012-09-07 11:24:32 -0600953 is_word_access = is_word_access_ch(type);
954 if (is_word_access)
955 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
956 sizeof(struct smd_shared_v2_word_access));
957 else
958 shared2 = smem_alloc(SMEM_SMD_BASE_ID + n,
959 sizeof(struct smd_shared_v2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960 if (!shared2)
961 continue;
962
Jeff Hugo00be6282012-09-07 11:24:32 -0600963 if (pid_is_on_edge(shared2, type, pid, &local_ch, &remote_ch,
964 is_word_access))
965 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700966
Eric Holmberg2a563c32011-10-05 14:51:43 -0600967 /*
968 * ModemFW is in the same subsystem as ModemSW, but has
969 * separate SMD edges that need to be reset.
970 */
971 if (pid == SMSM_MODEM &&
972 pid_is_on_edge(shared2, type, SMD_MODEM_Q6_FW,
Jeff Hugo00be6282012-09-07 11:24:32 -0600973 &local_ch, &remote_ch, is_word_access))
974 smd_reset_edge(local_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975 }
976}
977
978
979void smd_channel_reset(uint32_t restart_pid)
980{
981 struct smd_alloc_elm *shared;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700982 unsigned long flags;
983
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530984 SMx_POWER_INFO("%s: starting reset\n", __func__);
Eric Holmberg50ad86f2012-09-07 13:54:31 -0600985
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
987 if (!shared) {
988 pr_err("%s: allocation table not initialized\n", __func__);
989 return;
990 }
991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992 /* reset SMSM entry */
993 if (smsm_info.state) {
994 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
995
Eric Holmberg351a63c2011-12-02 17:49:43 -0700996 /* restart SMSM init handshake */
997 if (restart_pid == SMSM_MODEM) {
998 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700999 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
1000 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -07001001 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001002
1003 /* notify SMSM processors */
1004 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -07001005 notify_modem_smsm();
1006 notify_dsp_smsm();
1007 notify_dsps_smsm();
1008 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001009 }
1010
1011 /* change all remote states to CLOSING */
1012 mutex_lock(&smd_probe_lock);
1013 spin_lock_irqsave(&smd_lock, flags);
1014 smd_channel_reset_state(shared, SMD_SS_CLOSING, restart_pid);
1015 spin_unlock_irqrestore(&smd_lock, flags);
1016 mutex_unlock(&smd_probe_lock);
1017
1018 /* notify SMD processors */
1019 mb();
1020 smd_fake_irq_handler(0);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301021 notify_modem_smd(NULL);
1022 notify_dsp_smd(NULL);
1023 notify_dsps_smd(NULL);
1024 notify_wcnss_smd(NULL);
1025 notify_rpm_smd(NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026
1027 /* change all remote states to CLOSED */
1028 mutex_lock(&smd_probe_lock);
1029 spin_lock_irqsave(&smd_lock, flags);
1030 smd_channel_reset_state(shared, SMD_SS_CLOSED, restart_pid);
1031 spin_unlock_irqrestore(&smd_lock, flags);
1032 mutex_unlock(&smd_probe_lock);
1033
1034 /* notify SMD processors */
1035 mb();
1036 smd_fake_irq_handler(0);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301037 notify_modem_smd(NULL);
1038 notify_dsp_smd(NULL);
1039 notify_dsps_smd(NULL);
1040 notify_wcnss_smd(NULL);
1041 notify_rpm_smd(NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301043 SMx_POWER_INFO("%s: finished reset\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001044}
1045
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001046/* how many bytes are available for reading */
1047static int smd_stream_read_avail(struct smd_channel *ch)
1048{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001049 return (ch->half_ch->get_head(ch->recv) -
1050 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001051}
1052
1053/* how many bytes we are free to write */
1054static int smd_stream_write_avail(struct smd_channel *ch)
1055{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001056 return ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
1057 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001058}
1059
1060static int smd_packet_read_avail(struct smd_channel *ch)
1061{
1062 if (ch->current_packet) {
1063 int n = smd_stream_read_avail(ch);
1064 if (n > ch->current_packet)
1065 n = ch->current_packet;
1066 return n;
1067 } else {
1068 return 0;
1069 }
1070}
1071
1072static int smd_packet_write_avail(struct smd_channel *ch)
1073{
1074 int n = smd_stream_write_avail(ch);
1075 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1076}
1077
1078static int ch_is_open(struct smd_channel *ch)
1079{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001080 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1081 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1082 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001083}
1084
1085/* provide a pointer and length to readable data in the fifo */
1086static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1087{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001088 unsigned head = ch->half_ch->get_head(ch->recv);
1089 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001090 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001091
1092 if (tail <= head)
1093 return head - tail;
1094 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001095 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001096}
1097
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001098static int read_intr_blocked(struct smd_channel *ch)
1099{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001100 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001101}
1102
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001103/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1104static void ch_read_done(struct smd_channel *ch, unsigned count)
1105{
1106 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001107 ch->half_ch->set_tail(ch->recv,
1108 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001109 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001110 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001111}
1112
1113/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001114 * by smd_*_read() and update_packet_state()
1115 * will read-and-discard if the _data pointer is null
1116 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001117static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001118{
1119 void *ptr;
1120 unsigned n;
1121 unsigned char *data = _data;
1122 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001123 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001124
1125 while (len > 0) {
1126 n = ch_read_buffer(ch, &ptr);
1127 if (n == 0)
1128 break;
1129
1130 if (n > len)
1131 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001132 if (_data) {
1133 if (user_buf) {
1134 r = copy_to_user(data, ptr, n);
1135 if (r > 0) {
1136 pr_err("%s: "
1137 "copy_to_user could not copy "
1138 "%i bytes.\n",
1139 __func__,
1140 r);
1141 }
1142 } else
1143 memcpy(data, ptr, n);
1144 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001145
1146 data += n;
1147 len -= n;
1148 ch_read_done(ch, n);
1149 }
1150
1151 return orig_len - len;
1152}
1153
1154static void update_stream_state(struct smd_channel *ch)
1155{
1156 /* streams have no special state requiring updating */
1157}
1158
1159static void update_packet_state(struct smd_channel *ch)
1160{
1161 unsigned hdr[5];
1162 int r;
1163
1164 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001165 while (ch->current_packet == 0) {
1166 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001167
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168 /* don't bother unless we can get the full header */
1169 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1170 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001171
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001172 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1173 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001175 ch->current_packet = hdr[0];
1176 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001177}
1178
1179/* provide a pointer and length to next free space in the fifo */
1180static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1181{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001182 unsigned head = ch->half_ch->get_head(ch->send);
1183 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001184 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001185
1186 if (head < tail) {
1187 return tail - head - 1;
1188 } else {
1189 if (tail == 0)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001190 return ch->fifo_size - head - 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001191 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001192 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001193 }
1194}
1195
1196/* advace the fifo write pointer after freespace
1197 * from ch_write_buffer is filled
1198 */
1199static void ch_write_done(struct smd_channel *ch, unsigned count)
1200{
1201 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001202 ch->half_ch->set_head(ch->send,
1203 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001204 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001205 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001206}
1207
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001208static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001209{
1210 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001211 ch->half_ch->set_fDSR(ch->send, 1);
1212 ch->half_ch->set_fCTS(ch->send, 1);
1213 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001214 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001215 ch->half_ch->set_fDSR(ch->send, 0);
1216 ch->half_ch->set_fCTS(ch->send, 0);
1217 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001218 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001219 ch->half_ch->set_state(ch->send, n);
1220 ch->half_ch->set_fSTATE(ch->send, 1);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301221 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001222}
1223
1224static void do_smd_probe(void)
1225{
1226 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
1227 if (shared->heap_info.free_offset != last_heap_free) {
1228 last_heap_free = shared->heap_info.free_offset;
1229 schedule_work(&probe_work);
1230 }
1231}
1232
1233static void smd_state_change(struct smd_channel *ch,
1234 unsigned last, unsigned next)
1235{
1236 ch->last_state = next;
1237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001239
1240 switch (next) {
1241 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001242 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1243 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1244 ch->half_ch->set_tail(ch->recv, 0);
1245 ch->half_ch->set_head(ch->send, 0);
1246 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001247 ch_set_state(ch, SMD_SS_OPENING);
1248 }
1249 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001250 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001251 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001252 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001253 ch->notify(ch->priv, SMD_EVENT_OPEN);
1254 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001255 break;
1256 case SMD_SS_FLUSHING:
1257 case SMD_SS_RESET:
1258 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001259 break;
1260 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001261 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001262 ch_set_state(ch, SMD_SS_CLOSING);
1263 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001264 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001265 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1266 }
1267 break;
1268 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001269 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270 list_move(&ch->ch_list,
1271 &smd_ch_to_close_list);
1272 queue_work(channel_close_wq,
1273 &finalize_channel_close_work);
1274 }
1275 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001276 }
1277}
1278
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279static void handle_smd_irq_closing_list(void)
1280{
1281 unsigned long flags;
1282 struct smd_channel *ch;
1283 struct smd_channel *index;
1284 unsigned tmp;
1285
1286 spin_lock_irqsave(&smd_lock, flags);
1287 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001288 if (ch->half_ch->get_fSTATE(ch->recv))
1289 ch->half_ch->set_fSTATE(ch->recv, 0);
1290 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001291 if (tmp != ch->last_state)
1292 smd_state_change(ch, ch->last_state, tmp);
1293 }
1294 spin_unlock_irqrestore(&smd_lock, flags);
1295}
1296
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301297static void handle_smd_irq(struct list_head *list,
1298 void (*notify)(smd_channel_t *ch))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001299{
1300 unsigned long flags;
1301 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001302 unsigned ch_flags;
1303 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001304 unsigned char state_change;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001305
1306 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001307 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001308 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001309 ch_flags = 0;
1310 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001311 if (ch->half_ch->get_fHEAD(ch->recv)) {
1312 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001313 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001314 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001315 if (ch->half_ch->get_fTAIL(ch->recv)) {
1316 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001317 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001318 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001319 if (ch->half_ch->get_fSTATE(ch->recv)) {
1320 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001321 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001322 }
1323 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001324 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001325 if (tmp != ch->last_state) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07001326 SMx_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
1327 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001328 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329 state_change = 1;
1330 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001331 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001332 ch->update_state(ch);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301333 SMx_POWER_INFO(
1334 "SMD ch%d '%s' Data event 0x%x tx%d/rx%d %dr/%dw : %dr/%dw\n",
1335 ch->n, ch->name,
1336 ch_flags,
1337 ch->fifo_size -
1338 (smd_stream_write_avail(ch) + 1),
1339 smd_stream_read_avail(ch),
1340 ch->half_ch->get_tail(ch->send),
1341 ch->half_ch->get_head(ch->send),
1342 ch->half_ch->get_tail(ch->recv),
1343 ch->half_ch->get_head(ch->recv)
1344 );
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001345 ch->notify(ch->priv, SMD_EVENT_DATA);
1346 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001347 if (ch_flags & 0x4 && !state_change) {
1348 SMx_POWER_INFO("SMD ch%d '%s' State update\n",
1349 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001350 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001351 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001352 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001353 spin_unlock_irqrestore(&smd_lock, flags);
1354 do_smd_probe();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001355}
1356
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301357static inline void log_irq(uint32_t subsystem)
1358{
1359 const char *subsys = smd_edge_to_subsystem(subsystem);
1360
Jay Chokshi83b4f6132013-02-14 16:20:56 -08001361 (void) subsys;
1362
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301363 SMx_POWER_INFO("SMD Int %s->Apps\n", subsys);
1364}
1365
Brian Swetland37521a32009-07-01 18:30:47 -07001366static irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001367{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301368 log_irq(SMD_APPS_MODEM);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001369 ++interrupt_stats[SMD_MODEM].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001370 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001371 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001372 return IRQ_HANDLED;
1373}
1374
1375static irqreturn_t smd_dsp_irq_handler(int irq, void *data)
1376{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301377 log_irq(SMD_APPS_QDSP);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001378 ++interrupt_stats[SMD_Q6].smd_in_count;
Brian Swetland37521a32009-07-01 18:30:47 -07001379 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001380 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001381 return IRQ_HANDLED;
1382}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001383
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001384static irqreturn_t smd_dsps_irq_handler(int irq, void *data)
1385{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301386 log_irq(SMD_APPS_DSPS);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001387 ++interrupt_stats[SMD_DSPS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001388 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1389 handle_smd_irq_closing_list();
1390 return IRQ_HANDLED;
1391}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001392
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001393static irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
1394{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301395 log_irq(SMD_APPS_WCNSS);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001396 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001397 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
1398 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001399 return IRQ_HANDLED;
1400}
1401
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001402static irqreturn_t smd_rpm_irq_handler(int irq, void *data)
1403{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301404 log_irq(SMD_APPS_RPM);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001405 ++interrupt_stats[SMD_RPM].smd_in_count;
1406 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
1407 handle_smd_irq_closing_list();
1408 return IRQ_HANDLED;
1409}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001410
1411static void smd_fake_irq_handler(unsigned long arg)
1412{
Brian Swetland37521a32009-07-01 18:30:47 -07001413 handle_smd_irq(&smd_ch_list_modem, notify_modem_smd);
1414 handle_smd_irq(&smd_ch_list_dsp, notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415 handle_smd_irq(&smd_ch_list_dsps, notify_dsps_smd);
1416 handle_smd_irq(&smd_ch_list_wcnss, notify_wcnss_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001417 handle_smd_irq(&smd_ch_list_rpm, notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001419}
1420
1421static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1422
Brian Swetland37521a32009-07-01 18:30:47 -07001423static inline int smd_need_int(struct smd_channel *ch)
1424{
1425 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001426 if (ch->half_ch->get_fHEAD(ch->recv) ||
1427 ch->half_ch->get_fTAIL(ch->recv) ||
1428 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001429 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001430 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001431 return 1;
1432 }
1433 return 0;
1434}
1435
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001436void smd_sleep_exit(void)
1437{
1438 unsigned long flags;
1439 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001440 int need_int = 0;
1441
1442 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland37521a32009-07-01 18:30:47 -07001443 list_for_each_entry(ch, &smd_ch_list_modem, ch_list) {
1444 if (smd_need_int(ch)) {
1445 need_int = 1;
1446 break;
1447 }
1448 }
1449 list_for_each_entry(ch, &smd_ch_list_dsp, ch_list) {
1450 if (smd_need_int(ch)) {
1451 need_int = 1;
1452 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001453 }
1454 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001455 list_for_each_entry(ch, &smd_ch_list_dsps, ch_list) {
1456 if (smd_need_int(ch)) {
1457 need_int = 1;
1458 break;
1459 }
1460 }
1461 list_for_each_entry(ch, &smd_ch_list_wcnss, ch_list) {
1462 if (smd_need_int(ch)) {
1463 need_int = 1;
1464 break;
1465 }
1466 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001467 spin_unlock_irqrestore(&smd_lock, flags);
1468 do_smd_probe();
Brian Swetland37521a32009-07-01 18:30:47 -07001469
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001470 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001471 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001472 tasklet_schedule(&smd_fake_irq_tasklet);
1473 }
1474}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001475EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001477static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001478{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001479 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1480 return 0;
1481 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001482 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001483
1484 /* for cases where xfer type is 0 */
1485 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001486 return 0;
1487
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001488 /* for cases where xfer type is 0 */
1489 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1490 return 0;
1491
1492 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001493 return 1;
1494 else
1495 return 0;
1496}
1497
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001498static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1499 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001500{
1501 void *ptr;
1502 const unsigned char *buf = _data;
1503 unsigned xfer;
1504 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001505 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001506
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001507 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001508 if (len < 0)
1509 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001510 else if (len == 0)
1511 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001512
1513 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001514 if (!ch_is_open(ch)) {
1515 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001516 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001517 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001518 if (xfer > len)
1519 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001520 if (user_buf) {
1521 r = copy_from_user(ptr, buf, xfer);
1522 if (r > 0) {
1523 pr_err("%s: "
1524 "copy_from_user could not copy %i "
1525 "bytes.\n",
1526 __func__,
1527 r);
1528 }
1529 } else
1530 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001531 ch_write_done(ch, xfer);
1532 len -= xfer;
1533 buf += xfer;
1534 if (len == 0)
1535 break;
1536 }
1537
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001538 if (orig_len - len)
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301539 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001540
1541 return orig_len - len;
1542}
1543
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001544static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1545 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001546{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001547 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001548 unsigned hdr[5];
1549
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001550 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001551 if (len < 0)
1552 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001553 else if (len == 0)
1554 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001555
1556 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1557 return -ENOMEM;
1558
1559 hdr[0] = len;
1560 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1561
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001562
1563 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1564 if (ret < 0 || ret != sizeof(hdr)) {
1565 SMD_DBG("%s failed to write pkt header: "
1566 "%d returned\n", __func__, ret);
1567 return -1;
1568 }
1569
1570
1571 ret = smd_stream_write(ch, _data, len, user_buf);
1572 if (ret < 0 || ret != len) {
1573 SMD_DBG("%s failed to write pkt data: "
1574 "%d returned\n", __func__, ret);
1575 return ret;
1576 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001577
1578 return len;
1579}
1580
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001581static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001582{
1583 int r;
1584
1585 if (len < 0)
1586 return -EINVAL;
1587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001589 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001590 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301591 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001592
1593 return r;
1594}
1595
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001596static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001597{
1598 unsigned long flags;
1599 int r;
1600
1601 if (len < 0)
1602 return -EINVAL;
1603
1604 if (len > ch->current_packet)
1605 len = ch->current_packet;
1606
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001607 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001608 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001609 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301610 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001611
1612 spin_lock_irqsave(&smd_lock, flags);
1613 ch->current_packet -= r;
1614 update_packet_state(ch);
1615 spin_unlock_irqrestore(&smd_lock, flags);
1616
1617 return r;
1618}
1619
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001620static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1621 int user_buf)
1622{
1623 int r;
1624
1625 if (len < 0)
1626 return -EINVAL;
1627
1628 if (len > ch->current_packet)
1629 len = ch->current_packet;
1630
1631 r = ch_read(ch, data, len, user_buf);
1632 if (r > 0)
1633 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301634 ch->notify_other_cpu(ch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001635
1636 ch->current_packet -= r;
1637 update_packet_state(ch);
1638
1639 return r;
1640}
1641
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301642#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001643static int smd_alloc_v2(struct smd_channel *ch)
1644{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001645 void *buffer;
1646 unsigned buffer_sz;
1647
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001648 if (is_word_access_ch(ch->type)) {
1649 struct smd_shared_v2_word_access *shared2;
1650 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1651 sizeof(*shared2));
1652 if (!shared2) {
1653 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1654 return -EINVAL;
1655 }
1656 ch->send = &shared2->ch0;
1657 ch->recv = &shared2->ch1;
1658 } else {
1659 struct smd_shared_v2 *shared2;
1660 shared2 = smem_alloc(SMEM_SMD_BASE_ID + ch->n,
1661 sizeof(*shared2));
1662 if (!shared2) {
1663 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1664 return -EINVAL;
1665 }
1666 ch->send = &shared2->ch0;
1667 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001668 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001669 ch->half_ch = get_half_ch_funcs(ch->type);
1670
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001671 buffer = smem_get_entry(SMEM_SMD_FIFO_BASE_ID + ch->n, &buffer_sz);
1672 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301673 SMD_INFO("smem_get_entry failed\n");
1674 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001675 }
1676
1677 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301678 if (buffer_sz & (buffer_sz - 1)) {
1679 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1680 return -EINVAL;
1681 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001682 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683 ch->send_data = buffer;
1684 ch->recv_data = buffer + buffer_sz;
1685 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001686
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001687 return 0;
1688}
1689
1690static int smd_alloc_v1(struct smd_channel *ch)
1691{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301692 return -EINVAL;
1693}
1694
1695#else /* define v1 for older targets */
1696static int smd_alloc_v2(struct smd_channel *ch)
1697{
1698 return -EINVAL;
1699}
1700
1701static int smd_alloc_v1(struct smd_channel *ch)
1702{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001703 struct smd_shared_v1 *shared1;
1704 shared1 = smem_alloc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1));
1705 if (!shared1) {
1706 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301707 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001708 }
1709 ch->send = &shared1->ch0;
1710 ch->recv = &shared1->ch1;
1711 ch->send_data = shared1->data0;
1712 ch->recv_data = shared1->data1;
1713 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001714 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001715 return 0;
1716}
1717
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301718#endif
1719
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001720static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001721{
1722 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001723
1724 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1725 if (ch == 0) {
1726 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001727 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001728 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001729 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001730 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001731
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001732 if (smd_alloc_v2(ch) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001733 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001734 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001735 }
1736
1737 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001738
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001739 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001740 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001741 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001742 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001743 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001744 else if (ch->type == SMD_APPS_DSPS)
1745 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001746 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001747 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001748 else if (ch->type == SMD_APPS_RPM)
1749 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001750
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001751 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001752 ch->read = smd_packet_read;
1753 ch->write = smd_packet_write;
1754 ch->read_avail = smd_packet_read_avail;
1755 ch->write_avail = smd_packet_write_avail;
1756 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001757 ch->read_from_cb = smd_packet_read_from_cb;
1758 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001759 } else {
1760 ch->read = smd_stream_read;
1761 ch->write = smd_stream_write;
1762 ch->read_avail = smd_stream_read_avail;
1763 ch->write_avail = smd_stream_write_avail;
1764 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001765 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001766 }
1767
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001768 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1769 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001770
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001771 ch->pdev.name = ch->name;
1772 ch->pdev.id = ch->type;
1773
1774 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1775 ch->name, ch->n);
1776
1777 mutex_lock(&smd_creation_mutex);
1778 list_add(&ch->ch_list, &smd_ch_closed_list);
1779 mutex_unlock(&smd_creation_mutex);
1780
1781 platform_device_register(&ch->pdev);
1782 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1783 /* create a platform driver to be used by smd_tty driver
1784 * so that it can access the loopback port
1785 */
1786 loopback_tty_pdev.id = ch->type;
1787 platform_device_register(&loopback_tty_pdev);
1788 }
1789 return 0;
1790}
1791
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301792static inline void notify_loopback_smd(smd_channel_t *ch_notif)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001793{
1794 unsigned long flags;
1795 struct smd_channel *ch;
1796
1797 spin_lock_irqsave(&smd_lock, flags);
1798 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1799 ch->notify(ch->priv, SMD_EVENT_DATA);
1800 }
1801 spin_unlock_irqrestore(&smd_lock, flags);
1802}
1803
1804static int smd_alloc_loopback_channel(void)
1805{
1806 static struct smd_half_channel smd_loopback_ctl;
1807 static char smd_loopback_data[SMD_BUF_SIZE];
1808 struct smd_channel *ch;
1809
1810 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1811 if (ch == 0) {
1812 pr_err("%s: out of memory\n", __func__);
1813 return -1;
1814 }
1815 ch->n = SMD_LOOPBACK_CID;
1816
1817 ch->send = &smd_loopback_ctl;
1818 ch->recv = &smd_loopback_ctl;
1819 ch->send_data = smd_loopback_data;
1820 ch->recv_data = smd_loopback_data;
1821 ch->fifo_size = SMD_BUF_SIZE;
1822
1823 ch->fifo_mask = ch->fifo_size - 1;
1824 ch->type = SMD_LOOPBACK_TYPE;
1825 ch->notify_other_cpu = notify_loopback_smd;
1826
1827 ch->read = smd_stream_read;
1828 ch->write = smd_stream_write;
1829 ch->read_avail = smd_stream_read_avail;
1830 ch->write_avail = smd_stream_write_avail;
1831 ch->update_state = update_stream_state;
1832 ch->read_from_cb = smd_stream_read;
1833
1834 memset(ch->name, 0, 20);
1835 memcpy(ch->name, "local_loopback", 14);
1836
1837 ch->pdev.name = ch->name;
1838 ch->pdev.id = ch->type;
1839
1840 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001841
1842 mutex_lock(&smd_creation_mutex);
1843 list_add(&ch->ch_list, &smd_ch_closed_list);
1844 mutex_unlock(&smd_creation_mutex);
1845
1846 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001847 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001848}
1849
1850static void do_nothing_notify(void *priv, unsigned flags)
1851{
1852}
1853
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001854static void finalize_channel_close_fn(struct work_struct *work)
1855{
1856 unsigned long flags;
1857 struct smd_channel *ch;
1858 struct smd_channel *index;
1859
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001860 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001861 spin_lock_irqsave(&smd_lock, flags);
1862 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1863 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001864 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001865 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1866 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001867 }
1868 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001869 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001870}
1871
1872struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001873{
1874 struct smd_channel *ch;
1875
1876 mutex_lock(&smd_creation_mutex);
1877 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001878 if (!strcmp(name, ch->name) &&
1879 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001880 list_del(&ch->ch_list);
1881 mutex_unlock(&smd_creation_mutex);
1882 return ch;
1883 }
1884 }
1885 mutex_unlock(&smd_creation_mutex);
1886
1887 return NULL;
1888}
1889
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001890int smd_named_open_on_edge(const char *name, uint32_t edge,
1891 smd_channel_t **_ch,
1892 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001893{
1894 struct smd_channel *ch;
1895 unsigned long flags;
1896
1897 if (smd_initialized == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001898 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001899 return -ENODEV;
1900 }
1901
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001902 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1903
1904 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001905 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001906 /* check closing list for port */
1907 spin_lock_irqsave(&smd_lock, flags);
1908 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1909 if (!strncmp(name, ch->name, 20) &&
1910 (edge == ch->type)) {
1911 /* channel exists, but is being closed */
1912 spin_unlock_irqrestore(&smd_lock, flags);
1913 return -EAGAIN;
1914 }
1915 }
1916
1917 /* check closing workqueue list for port */
1918 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1919 if (!strncmp(name, ch->name, 20) &&
1920 (edge == ch->type)) {
1921 /* channel exists, but is being closed */
1922 spin_unlock_irqrestore(&smd_lock, flags);
1923 return -EAGAIN;
1924 }
1925 }
1926 spin_unlock_irqrestore(&smd_lock, flags);
1927
1928 /* one final check to handle closing->closed race condition */
1929 ch = smd_get_channel(name, edge);
1930 if (!ch)
1931 return -ENODEV;
1932 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001933
1934 if (notify == 0)
1935 notify = do_nothing_notify;
1936
1937 ch->notify = notify;
1938 ch->current_packet = 0;
1939 ch->last_state = SMD_SS_CLOSED;
1940 ch->priv = priv;
1941
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001942 if (edge == SMD_LOOPBACK_TYPE) {
1943 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001944 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1945 ch->half_ch->set_fDSR(ch->send, 1);
1946 ch->half_ch->set_fCTS(ch->send, 1);
1947 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001948 }
1949
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001950 *_ch = ch;
1951
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001952 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1953
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001954 spin_lock_irqsave(&smd_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001955 if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_MODEM)
Brian Swetland37521a32009-07-01 18:30:47 -07001956 list_add(&ch->ch_list, &smd_ch_list_modem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001957 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_QDSP)
Brian Swetland37521a32009-07-01 18:30:47 -07001958 list_add(&ch->ch_list, &smd_ch_list_dsp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001959 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_DSPS)
1960 list_add(&ch->ch_list, &smd_ch_list_dsps);
1961 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_WCNSS)
1962 list_add(&ch->ch_list, &smd_ch_list_wcnss);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001963 else if (SMD_CHANNEL_TYPE(ch->type) == SMD_APPS_RPM)
1964 list_add(&ch->ch_list, &smd_ch_list_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001965 else
1966 list_add(&ch->ch_list, &smd_ch_list_loopback);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001967
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001968 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1969
1970 if (edge != SMD_LOOPBACK_TYPE)
1971 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1972
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001973 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001974
1975 return 0;
1976}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001977EXPORT_SYMBOL(smd_named_open_on_edge);
1978
1979
1980int smd_open(const char *name, smd_channel_t **_ch,
1981 void *priv, void (*notify)(void *, unsigned))
1982{
1983 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
1984 notify);
1985}
1986EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001987
1988int smd_close(smd_channel_t *ch)
1989{
1990 unsigned long flags;
1991
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001992 if (ch == 0)
1993 return -1;
1994
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001995 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001996
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001997 spin_lock_irqsave(&smd_lock, flags);
1998 list_del(&ch->ch_list);
1999 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002000 ch->half_ch->set_fDSR(ch->send, 0);
2001 ch->half_ch->set_fCTS(ch->send, 0);
2002 ch->half_ch->set_fCD(ch->send, 0);
2003 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002004 } else
2005 ch_set_state(ch, SMD_SS_CLOSED);
2006
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002007 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002008 list_add(&ch->ch_list, &smd_ch_closing_list);
2009 spin_unlock_irqrestore(&smd_lock, flags);
2010 } else {
2011 spin_unlock_irqrestore(&smd_lock, flags);
2012 ch->notify = do_nothing_notify;
2013 mutex_lock(&smd_creation_mutex);
2014 list_add(&ch->ch_list, &smd_ch_closed_list);
2015 mutex_unlock(&smd_creation_mutex);
2016 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002017
2018 return 0;
2019}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002020EXPORT_SYMBOL(smd_close);
2021
2022int smd_write_start(smd_channel_t *ch, int len)
2023{
2024 int ret;
2025 unsigned hdr[5];
2026
2027 if (!ch) {
2028 pr_err("%s: Invalid channel specified\n", __func__);
2029 return -ENODEV;
2030 }
2031 if (!ch->is_pkt_ch) {
2032 pr_err("%s: non-packet channel specified\n", __func__);
2033 return -EACCES;
2034 }
2035 if (len < 1) {
2036 pr_err("%s: invalid length: %d\n", __func__, len);
2037 return -EINVAL;
2038 }
2039
2040 if (ch->pending_pkt_sz) {
2041 pr_err("%s: packet of size: %d in progress\n", __func__,
2042 ch->pending_pkt_sz);
2043 return -EBUSY;
2044 }
2045 ch->pending_pkt_sz = len;
2046
2047 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
2048 ch->pending_pkt_sz = 0;
2049 SMD_DBG("%s: no space to write packet header\n", __func__);
2050 return -EAGAIN;
2051 }
2052
2053 hdr[0] = len;
2054 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
2055
2056
2057 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
2058 if (ret < 0 || ret != sizeof(hdr)) {
2059 ch->pending_pkt_sz = 0;
2060 pr_err("%s: packet header failed to write\n", __func__);
2061 return -EPERM;
2062 }
2063 return 0;
2064}
2065EXPORT_SYMBOL(smd_write_start);
2066
2067int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
2068{
2069 int bytes_written;
2070
2071 if (!ch) {
2072 pr_err("%s: Invalid channel specified\n", __func__);
2073 return -ENODEV;
2074 }
2075 if (len < 1) {
2076 pr_err("%s: invalid length: %d\n", __func__, len);
2077 return -EINVAL;
2078 }
2079
2080 if (!ch->pending_pkt_sz) {
2081 pr_err("%s: no transaction in progress\n", __func__);
2082 return -ENOEXEC;
2083 }
2084 if (ch->pending_pkt_sz - len < 0) {
2085 pr_err("%s: segment of size: %d will make packet go over "
2086 "length\n", __func__, len);
2087 return -EINVAL;
2088 }
2089
2090 bytes_written = smd_stream_write(ch, data, len, user_buf);
2091
2092 ch->pending_pkt_sz -= bytes_written;
2093
2094 return bytes_written;
2095}
2096EXPORT_SYMBOL(smd_write_segment);
2097
2098int smd_write_end(smd_channel_t *ch)
2099{
2100
2101 if (!ch) {
2102 pr_err("%s: Invalid channel specified\n", __func__);
2103 return -ENODEV;
2104 }
2105 if (ch->pending_pkt_sz) {
2106 pr_err("%s: current packet not completely written\n", __func__);
2107 return -E2BIG;
2108 }
2109
2110 return 0;
2111}
2112EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002113
2114int smd_read(smd_channel_t *ch, void *data, int len)
2115{
Jack Pham1b236d12012-03-19 15:27:18 -07002116 if (!ch) {
2117 pr_err("%s: Invalid channel specified\n", __func__);
2118 return -ENODEV;
2119 }
2120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002121 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002122}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002123EXPORT_SYMBOL(smd_read);
2124
2125int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2126{
Jack Pham1b236d12012-03-19 15:27:18 -07002127 if (!ch) {
2128 pr_err("%s: Invalid channel specified\n", __func__);
2129 return -ENODEV;
2130 }
2131
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002132 return ch->read(ch, data, len, 1);
2133}
2134EXPORT_SYMBOL(smd_read_user_buffer);
2135
2136int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2137{
Jack Pham1b236d12012-03-19 15:27:18 -07002138 if (!ch) {
2139 pr_err("%s: Invalid channel specified\n", __func__);
2140 return -ENODEV;
2141 }
2142
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002143 return ch->read_from_cb(ch, data, len, 0);
2144}
2145EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002146
2147int smd_write(smd_channel_t *ch, const void *data, int len)
2148{
Jack Pham1b236d12012-03-19 15:27:18 -07002149 if (!ch) {
2150 pr_err("%s: Invalid channel specified\n", __func__);
2151 return -ENODEV;
2152 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002153
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002154 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002155}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002156EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002158int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002159{
Jack Pham1b236d12012-03-19 15:27:18 -07002160 if (!ch) {
2161 pr_err("%s: Invalid channel specified\n", __func__);
2162 return -ENODEV;
2163 }
2164
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002165 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002166}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002167EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002168
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002169int smd_read_avail(smd_channel_t *ch)
2170{
Jack Pham1b236d12012-03-19 15:27:18 -07002171 if (!ch) {
2172 pr_err("%s: Invalid channel specified\n", __func__);
2173 return -ENODEV;
2174 }
2175
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002176 return ch->read_avail(ch);
2177}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002178EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002179
2180int smd_write_avail(smd_channel_t *ch)
2181{
Jack Pham1b236d12012-03-19 15:27:18 -07002182 if (!ch) {
2183 pr_err("%s: Invalid channel specified\n", __func__);
2184 return -ENODEV;
2185 }
2186
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002187 return ch->write_avail(ch);
2188}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002189EXPORT_SYMBOL(smd_write_avail);
2190
2191void smd_enable_read_intr(smd_channel_t *ch)
2192{
2193 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002194 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002195}
2196EXPORT_SYMBOL(smd_enable_read_intr);
2197
2198void smd_disable_read_intr(smd_channel_t *ch)
2199{
2200 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002201 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002202}
2203EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002204
Eric Holmbergdeace152012-07-25 12:17:11 -06002205/**
2206 * Enable/disable receive interrupts for the remote processor used by a
2207 * particular channel.
2208 * @ch: open channel handle to use for the edge
2209 * @mask: 1 = mask interrupts; 0 = unmask interrupts
2210 * @returns: 0 for success; < 0 for failure
2211 *
2212 * Note that this enables/disables all interrupts from the remote subsystem for
2213 * all channels. As such, it should be used with care and only for specific
2214 * use cases such as power-collapse sequencing.
2215 */
2216int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
2217{
2218 struct irq_chip *irq_chip;
2219 struct irq_data *irq_data;
2220 struct interrupt_config_item *int_cfg;
2221
2222 if (!ch)
2223 return -EINVAL;
2224
2225 if (ch->type >= ARRAY_SIZE(edge_to_pids))
2226 return -ENODEV;
2227
2228 int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
2229
2230 if (int_cfg->irq_id < 0)
2231 return -ENODEV;
2232
2233 irq_chip = irq_get_chip(int_cfg->irq_id);
2234 if (!irq_chip)
2235 return -ENODEV;
2236
2237 irq_data = irq_get_irq_data(int_cfg->irq_id);
2238 if (!irq_data)
2239 return -ENODEV;
2240
2241 if (mask) {
2242 SMx_POWER_INFO("SMD Masking interrupts from %s\n",
2243 edge_to_pids[ch->type].subsys_name);
2244 irq_chip->irq_mask(irq_data);
2245 } else {
2246 SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
2247 edge_to_pids[ch->type].subsys_name);
2248 irq_chip->irq_unmask(irq_data);
2249 }
2250
2251 return 0;
2252}
2253EXPORT_SYMBOL(smd_mask_receive_interrupt);
2254
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002255int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2256{
2257 return -1;
2258}
2259
2260int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2261{
2262 return -1;
2263}
2264
2265int smd_cur_packet_size(smd_channel_t *ch)
2266{
Jack Pham1b236d12012-03-19 15:27:18 -07002267 if (!ch) {
2268 pr_err("%s: Invalid channel specified\n", __func__);
2269 return -ENODEV;
2270 }
2271
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002272 return ch->current_packet;
2273}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002274EXPORT_SYMBOL(smd_cur_packet_size);
2275
2276int smd_tiocmget(smd_channel_t *ch)
2277{
Jack Pham1b236d12012-03-19 15:27:18 -07002278 if (!ch) {
2279 pr_err("%s: Invalid channel specified\n", __func__);
2280 return -ENODEV;
2281 }
2282
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002283 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2284 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2285 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2286 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2287 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2288 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002289}
2290EXPORT_SYMBOL(smd_tiocmget);
2291
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002292/* this api will be called while holding smd_lock */
2293int
2294smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002295{
Jack Pham1b236d12012-03-19 15:27:18 -07002296 if (!ch) {
2297 pr_err("%s: Invalid channel specified\n", __func__);
2298 return -ENODEV;
2299 }
2300
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002301 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002302 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002303
2304 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002305 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002306
2307 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002308 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002309
2310 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002311 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002312
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002313 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002314 barrier();
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05302315 ch->notify_other_cpu(ch);
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002316
2317 return 0;
2318}
2319EXPORT_SYMBOL(smd_tiocmset_from_cb);
2320
2321int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2322{
2323 unsigned long flags;
2324
Jack Pham1b236d12012-03-19 15:27:18 -07002325 if (!ch) {
2326 pr_err("%s: Invalid channel specified\n", __func__);
2327 return -ENODEV;
2328 }
2329
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002330 spin_lock_irqsave(&smd_lock, flags);
2331 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002332 spin_unlock_irqrestore(&smd_lock, flags);
2333
2334 return 0;
2335}
2336EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002337
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002338int smd_is_pkt_avail(smd_channel_t *ch)
2339{
Jeff Hugoa8549f12012-08-13 20:36:18 -06002340 unsigned long flags;
2341
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002342 if (!ch || !ch->is_pkt_ch)
2343 return -EINVAL;
2344
2345 if (ch->current_packet)
2346 return 1;
2347
Jeff Hugoa8549f12012-08-13 20:36:18 -06002348 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002349 update_packet_state(ch);
Jeff Hugoa8549f12012-08-13 20:36:18 -06002350 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002351
2352 return ch->current_packet ? 1 : 0;
2353}
2354EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002355
2356
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002357/* -------------------------------------------------------------------------- */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002358
Jeff Hugobdc734d2012-03-26 16:05:39 -06002359/*
2360 * Shared Memory Range Check
2361 *
2362 * Takes a physical address and an offset and checks if the resulting physical
2363 * address would fit into one of the aux smem regions. If so, returns the
2364 * corresponding virtual address. Otherwise returns NULL. Expects the array
2365 * of smem regions to be in ascending physical address order.
2366 *
2367 * @base: physical base address to check
2368 * @offset: offset from the base to get the final address
2369 */
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08002370static void *smem_range_check(phys_addr_t base, unsigned offset)
Jeff Hugobdc734d2012-03-26 16:05:39 -06002371{
2372 int i;
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08002373 phys_addr_t phys_addr;
2374 resource_size_t size;
Jeff Hugobdc734d2012-03-26 16:05:39 -06002375
2376 for (i = 0; i < num_smem_areas; ++i) {
2377 phys_addr = smem_areas[i].phys_addr;
2378 size = smem_areas[i].size;
2379 if (base < phys_addr)
2380 return NULL;
2381 if (base > phys_addr + size)
2382 continue;
2383 if (base >= phys_addr && base + offset < phys_addr + size)
2384 return smem_areas[i].virt_addr + offset;
2385 }
2386
2387 return NULL;
2388}
2389
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002390/* smem_alloc returns the pointer to smem item if it is already allocated.
2391 * Otherwise, it returns NULL.
2392 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002393void *smem_alloc(unsigned id, unsigned size)
2394{
2395 return smem_find(id, size);
2396}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002397EXPORT_SYMBOL(smem_alloc);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002398
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002399/* smem_alloc2 returns the pointer to smem item. If it is not allocated,
2400 * it allocates it and then returns the pointer to it.
2401 */
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302402void *smem_alloc2(unsigned id, unsigned size_in)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002403{
2404 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2405 struct smem_heap_entry *toc = shared->heap_toc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002406 unsigned long flags;
2407 void *ret = NULL;
2408
2409 if (!shared->heap_info.initialized) {
2410 pr_err("%s: smem heap info not initialized\n", __func__);
2411 return NULL;
2412 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002413
2414 if (id >= SMEM_NUM_ITEMS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002415 return NULL;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002416
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002417 size_in = ALIGN(size_in, 8);
2418 remote_spin_lock_irqsave(&remote_spinlock, flags);
2419 if (toc[id].allocated) {
2420 SMD_DBG("%s: %u already allocated\n", __func__, id);
2421 if (size_in != toc[id].size)
2422 pr_err("%s: wrong size %u (expected %u)\n",
2423 __func__, toc[id].size, size_in);
2424 else
2425 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2426 } else if (id > SMEM_FIXED_ITEM_LAST) {
2427 SMD_DBG("%s: allocating %u\n", __func__, id);
2428 if (shared->heap_info.heap_remaining >= size_in) {
2429 toc[id].offset = shared->heap_info.free_offset;
2430 toc[id].size = size_in;
2431 wmb();
2432 toc[id].allocated = 1;
2433
2434 shared->heap_info.free_offset += size_in;
2435 shared->heap_info.heap_remaining -= size_in;
2436 ret = (void *)(MSM_SHARED_RAM_BASE + toc[id].offset);
2437 } else
2438 pr_err("%s: not enough memory %u (required %u)\n",
2439 __func__, shared->heap_info.heap_remaining,
2440 size_in);
2441 }
2442 wmb();
2443 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
2444 return ret;
2445}
Angshuman Sarkar4eade0d2011-08-17 14:06:23 +05302446EXPORT_SYMBOL(smem_alloc2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002447
2448void *smem_get_entry(unsigned id, unsigned *size)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002449{
2450 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2451 struct smem_heap_entry *toc = shared->heap_toc;
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302452 int use_spinlocks = spinlocks_initialized;
2453 void *ret = 0;
2454 unsigned long flags = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002455
2456 if (id >= SMEM_NUM_ITEMS)
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302457 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002458
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302459 if (use_spinlocks)
2460 remote_spin_lock_irqsave(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002461 /* toc is in device memory and cannot be speculatively accessed */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002462 if (toc[id].allocated) {
2463 *size = toc[id].size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002464 barrier();
Jeff Hugobdc734d2012-03-26 16:05:39 -06002465 if (!(toc[id].reserved & BASE_ADDR_MASK))
2466 ret = (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
2467 else
2468 ret = smem_range_check(
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08002469 toc[id].reserved & BASE_ADDR_MASK,
Jeff Hugobdc734d2012-03-26 16:05:39 -06002470 toc[id].offset);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002471 } else {
2472 *size = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002473 }
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302474 if (use_spinlocks)
2475 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002476
Angshuman Sarkar7ee0dca2011-08-22 21:37:34 +05302477 return ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002478}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002479EXPORT_SYMBOL(smem_get_entry);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002480
2481void *smem_find(unsigned id, unsigned size_in)
2482{
2483 unsigned size;
2484 void *ptr;
2485
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002486 ptr = smem_get_entry(id, &size);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002487 if (!ptr)
2488 return 0;
2489
2490 size_in = ALIGN(size_in, 8);
2491 if (size_in != size) {
2492 pr_err("smem_find(%d, %d): wrong size %d\n",
2493 id, size_in, size);
2494 return 0;
2495 }
2496
2497 return ptr;
2498}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002499EXPORT_SYMBOL(smem_find);
2500
2501static int smsm_cb_init(void)
2502{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002503 struct smsm_state_info *state_info;
2504 int n;
2505 int ret = 0;
2506
2507 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2508 GFP_KERNEL);
2509
2510 if (!smsm_states) {
2511 pr_err("%s: SMSM init failed\n", __func__);
2512 return -ENOMEM;
2513 }
2514
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002515 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2516 if (!smsm_cb_wq) {
2517 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2518 kfree(smsm_states);
2519 return -EFAULT;
2520 }
2521
Eric Holmbergc8002902011-09-16 13:55:57 -06002522 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002523 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2524 state_info = &smsm_states[n];
2525 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002526 state_info->intr_mask_set = 0x0;
2527 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002528 INIT_LIST_HEAD(&state_info->callbacks);
2529 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002530 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002531
2532 return ret;
2533}
2534
2535static int smsm_init(void)
2536{
2537 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2538 int i;
2539 struct smsm_size_info_type *smsm_size_info;
Eric Holmberge5266d32013-02-25 18:29:27 -07002540 unsigned long flags;
2541 unsigned long j_start;
2542
2543 /* Verify that remote spinlock is not deadlocked */
2544 j_start = jiffies;
2545 while (!remote_spin_trylock_irqsave(&remote_spinlock, flags)) {
2546 if (jiffies_to_msecs(jiffies - j_start) > RSPIN_INIT_WAIT_MS) {
2547 panic("%s: Remote processor %d will not release spinlock\n",
2548 __func__, remote_spin_owner(&remote_spinlock));
2549 }
2550 }
2551 remote_spin_unlock_irqrestore(&remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002552
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002553 smsm_size_info = smem_alloc(SMEM_SMSM_SIZE_INFO,
2554 sizeof(struct smsm_size_info_type));
2555 if (smsm_size_info) {
2556 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2557 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2558 }
2559
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002560 i = kfifo_alloc(&smsm_snapshot_fifo,
2561 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2562 GFP_KERNEL);
2563 if (i) {
2564 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2565 return i;
2566 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002567 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2568 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002569
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002570 if (!smsm_info.state) {
2571 smsm_info.state = smem_alloc2(ID_SHARED_STATE,
2572 SMSM_NUM_ENTRIES *
2573 sizeof(uint32_t));
2574
2575 if (smsm_info.state) {
2576 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2577 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2578 __raw_writel(0, \
2579 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2580 }
2581 }
2582
2583 if (!smsm_info.intr_mask) {
2584 smsm_info.intr_mask = smem_alloc2(SMEM_SMSM_CPU_INTR_MASK,
2585 SMSM_NUM_ENTRIES *
2586 SMSM_NUM_HOSTS *
2587 sizeof(uint32_t));
2588
Eric Holmberge8a39322012-04-03 15:14:02 -06002589 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002590 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002591 __raw_writel(0x0,
2592 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2593
2594 /* Configure legacy modem bits */
2595 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2596 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2597 SMSM_APPS));
2598 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002599 }
2600
2601 if (!smsm_info.intr_mux)
2602 smsm_info.intr_mux = smem_alloc2(SMEM_SMD_SMSM_INTR_MUX,
2603 SMSM_NUM_INTR_MUX *
2604 sizeof(uint32_t));
2605
2606 i = smsm_cb_init();
2607 if (i)
2608 return i;
2609
2610 wmb();
Eric Holmberg144c2de2012-10-04 13:37:28 -06002611
2612 smsm_pm_notifier(&smsm_pm_nb, PM_POST_SUSPEND, NULL);
2613 i = register_pm_notifier(&smsm_pm_nb);
2614 if (i)
2615 pr_err("%s: power state notif error %d\n", __func__, i);
2616
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002617 return 0;
2618}
2619
2620void smsm_reset_modem(unsigned mode)
2621{
2622 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2623 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2624 } else if (mode == SMSM_MODEM_WAIT) {
2625 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2626 } else { /* reset_mode is SMSM_RESET or default */
2627 mode = SMSM_RESET;
2628 }
2629
2630 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2631}
2632EXPORT_SYMBOL(smsm_reset_modem);
2633
2634void smsm_reset_modem_cont(void)
2635{
2636 unsigned long flags;
2637 uint32_t state;
2638
2639 if (!smsm_info.state)
2640 return;
2641
2642 spin_lock_irqsave(&smem_lock, flags);
2643 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2644 & ~SMSM_MODEM_WAIT;
2645 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2646 wmb();
2647 spin_unlock_irqrestore(&smem_lock, flags);
2648}
2649EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002650
Eric Holmbergda31d042012-03-28 14:01:02 -06002651static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002652{
2653 int n;
2654 uint32_t new_state;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002655 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002656 int ret;
2657
2658 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002659 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002660 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2661 return;
2662 }
2663
Eric Holmberg96b55f62012-04-03 19:10:46 -06002664 /*
2665 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2666 * following sequence must be followed:
2667 * 1) increment snapshot count
2668 * 2) insert data into FIFO
2669 *
2670 * Potentially in parallel, the worker:
2671 * a) verifies >= 1 snapshots are in FIFO
2672 * b) processes snapshot
2673 * c) decrements reference count
2674 *
2675 * This order ensures that 1 will always occur before abc.
2676 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002677 if (use_wakelock) {
2678 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2679 if (smsm_snapshot_count == 0) {
2680 SMx_POWER_INFO("SMSM snapshot wake lock\n");
2681 wake_lock(&smsm_snapshot_wakelock);
2682 }
2683 ++smsm_snapshot_count;
2684 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2685 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002686
2687 /* queue state entries */
2688 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2689 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2690
2691 ret = kfifo_in(&smsm_snapshot_fifo,
2692 &new_state, sizeof(new_state));
2693 if (ret != sizeof(new_state)) {
2694 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2695 goto restore_snapshot_count;
2696 }
2697 }
2698
2699 /* queue wakelock usage flag */
2700 ret = kfifo_in(&smsm_snapshot_fifo,
2701 &use_wakelock, sizeof(use_wakelock));
2702 if (ret != sizeof(use_wakelock)) {
2703 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2704 goto restore_snapshot_count;
2705 }
2706
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002707 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002708 return;
2709
2710restore_snapshot_count:
2711 if (use_wakelock) {
2712 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2713 if (smsm_snapshot_count) {
2714 --smsm_snapshot_count;
2715 if (smsm_snapshot_count == 0) {
2716 SMx_POWER_INFO("SMSM snapshot wake unlock\n");
2717 wake_unlock(&smsm_snapshot_wakelock);
2718 }
2719 } else {
2720 pr_err("%s: invalid snapshot count\n", __func__);
2721 }
2722 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2723 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002724}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002725
2726static irqreturn_t smsm_irq_handler(int irq, void *data)
2727{
2728 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002730 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002731 uint32_t mux_val;
2732 static uint32_t prev_smem_q6_apps_smsm;
2733
2734 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2735 mux_val = __raw_readl(
2736 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2737 if (mux_val != prev_smem_q6_apps_smsm)
2738 prev_smem_q6_apps_smsm = mux_val;
2739 }
2740
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002741 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002742 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002743 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002744 return IRQ_HANDLED;
2745 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002746
2747 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002748 if (!smsm_info.state) {
2749 SMSM_INFO("<SM NO STATE>\n");
2750 } else {
2751 unsigned old_apps, apps;
2752 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002753
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002754 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002755
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002756 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2757 if (apps & SMSM_RESET) {
2758 /* If we get an interrupt and the apps SMSM_RESET
2759 bit is already set, the modem is acking the
2760 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002761 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302762 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002763 /* Issue a fake irq to handle any
2764 * smd state changes during reset
2765 */
2766 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002767
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002768 /* queue modem restart notify chain */
2769 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002770
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002771 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002772 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302773 if (!disable_smsm_reset_handshake) {
2774 apps |= SMSM_RESET;
2775 flush_cache_all();
2776 outer_flush_all();
2777 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002778 modem_queue_start_reset_notify();
2779
2780 } else if (modm & SMSM_INIT) {
2781 if (!(apps & SMSM_INIT)) {
2782 apps |= SMSM_INIT;
2783 modem_queue_smsm_init_notify();
2784 }
2785
2786 if (modm & SMSM_SMDINIT)
2787 apps |= SMSM_SMDINIT;
2788 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2789 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2790 apps |= SMSM_RUN;
2791 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2792 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2793 modem_queue_start_reset_notify();
2794 }
2795
2796 if (old_apps != apps) {
2797 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2798 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2799 do_smd_probe();
2800 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2801 }
2802
Eric Holmbergda31d042012-03-28 14:01:02 -06002803 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002804 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002805 spin_unlock_irqrestore(&smem_lock, flags);
2806 return IRQ_HANDLED;
2807}
2808
Eric Holmberg98c6c642012-02-24 11:29:35 -07002809static irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002810{
Eric Holmberg98c6c642012-02-24 11:29:35 -07002811 SMx_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002812 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002813 return smsm_irq_handler(irq, data);
2814}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002815
Eric Holmberg98c6c642012-02-24 11:29:35 -07002816static irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
2817{
2818 SMx_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002819 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002820 return smsm_irq_handler(irq, data);
2821}
2822
2823static irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
2824{
2825 SMx_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002826 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002827 return smsm_irq_handler(irq, data);
2828}
2829
2830static irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
2831{
2832 SMx_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002833 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002834 return smsm_irq_handler(irq, data);
2835}
2836
Eric Holmberge8a39322012-04-03 15:14:02 -06002837/*
2838 * Changes the global interrupt mask. The set and clear masks are re-applied
2839 * every time the global interrupt mask is updated for callback registration
2840 * and de-registration.
2841 *
2842 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2843 * mask and the set mask, the result will be that the interrupt is set.
2844 *
2845 * @smsm_entry SMSM entry to change
2846 * @clear_mask 1 = clear bit, 0 = no-op
2847 * @set_mask 1 = set bit, 0 = no-op
2848 *
2849 * @returns 0 for success, < 0 for error
2850 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002851int smsm_change_intr_mask(uint32_t smsm_entry,
2852 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002853{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002854 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002855 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002856
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002857 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2858 pr_err("smsm_change_state: Invalid entry %d\n",
2859 smsm_entry);
2860 return -EINVAL;
2861 }
2862
2863 if (!smsm_info.intr_mask) {
2864 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002865 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002866 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002867
2868 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002869 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2870 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002871
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002872 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2873 new_mask = (old_mask & ~clear_mask) | set_mask;
2874 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002875
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002876 wmb();
2877 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002878
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002879 return 0;
2880}
2881EXPORT_SYMBOL(smsm_change_intr_mask);
2882
2883int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2884{
2885 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2886 pr_err("smsm_change_state: Invalid entry %d\n",
2887 smsm_entry);
2888 return -EINVAL;
2889 }
2890
2891 if (!smsm_info.intr_mask) {
2892 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2893 return -EIO;
2894 }
2895
2896 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2897 return 0;
2898}
2899EXPORT_SYMBOL(smsm_get_intr_mask);
2900
2901int smsm_change_state(uint32_t smsm_entry,
2902 uint32_t clear_mask, uint32_t set_mask)
2903{
2904 unsigned long flags;
2905 uint32_t old_state, new_state;
2906
2907 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2908 pr_err("smsm_change_state: Invalid entry %d",
2909 smsm_entry);
2910 return -EINVAL;
2911 }
2912
2913 if (!smsm_info.state) {
2914 pr_err("smsm_change_state <SM NO STATE>\n");
2915 return -EIO;
2916 }
2917 spin_lock_irqsave(&smem_lock, flags);
2918
2919 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2920 new_state = (old_state & ~clear_mask) | set_mask;
2921 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
2922 SMSM_DBG("smsm_change_state %x\n", new_state);
2923 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002924
2925 spin_unlock_irqrestore(&smem_lock, flags);
2926
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002927 return 0;
2928}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002929EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002931uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002932{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002933 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002934
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002935 /* needs interface change to return error code */
2936 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2937 pr_err("smsm_change_state: Invalid entry %d",
2938 smsm_entry);
2939 return 0;
2940 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002941
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002942 if (!smsm_info.state) {
2943 pr_err("smsm_get_state <SM NO STATE>\n");
2944 } else {
2945 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2946 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002947
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002948 return rv;
2949}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002950EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002951
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002952/**
2953 * Performs SMSM callback client notifiction.
2954 */
2955void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002956{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002957 struct smsm_state_cb_info *cb_info;
2958 struct smsm_state_info *state_info;
2959 int n;
2960 uint32_t new_state;
2961 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002962 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002963 int ret;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002964 unsigned long flags;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002965
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002966 if (!smd_initialized)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002967 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002968
Eric Holmbergda31d042012-03-28 14:01:02 -06002969 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002970 mutex_lock(&smsm_lock);
2971 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2972 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002973
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002974 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2975 sizeof(new_state));
2976 if (ret != sizeof(new_state)) {
2977 pr_err("%s: snapshot underflow %d\n",
2978 __func__, ret);
2979 mutex_unlock(&smsm_lock);
2980 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002981 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002982
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002983 state_changes = state_info->last_value ^ new_state;
2984 if (state_changes) {
Eric Holmberg98c6c642012-02-24 11:29:35 -07002985 SMx_POWER_INFO("SMSM Change %d: %08x->%08x\n",
2986 n, state_info->last_value,
2987 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002988 list_for_each_entry(cb_info,
2989 &state_info->callbacks, cb_list) {
2990
2991 if (cb_info->mask & state_changes)
2992 cb_info->notify(cb_info->data,
2993 state_info->last_value,
2994 new_state);
2995 }
2996 state_info->last_value = new_state;
2997 }
2998 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002999
Eric Holmbergda31d042012-03-28 14:01:02 -06003000 /* read wakelock flag */
3001 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
3002 sizeof(use_wakelock));
3003 if (ret != sizeof(use_wakelock)) {
3004 pr_err("%s: snapshot underflow %d\n",
3005 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06003006 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06003007 return;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06003008 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06003009 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06003010
3011 if (use_wakelock) {
3012 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
3013 if (smsm_snapshot_count) {
3014 --smsm_snapshot_count;
3015 if (smsm_snapshot_count == 0) {
3016 SMx_POWER_INFO("SMSM snapshot"
3017 " wake unlock\n");
3018 wake_unlock(&smsm_snapshot_wakelock);
3019 }
3020 } else {
3021 pr_err("%s: invalid snapshot count\n",
3022 __func__);
3023 }
3024 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
3025 flags);
3026 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003027 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003028}
3029
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003030
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003031/**
3032 * Registers callback for SMSM state notifications when the specified
3033 * bits change.
3034 *
3035 * @smsm_entry Processor entry to deregister
3036 * @mask Bits to deregister (if result is 0, callback is removed)
3037 * @notify Notification function to deregister
3038 * @data Opaque data passed in to callback
3039 *
3040 * @returns Status code
3041 * <0 error code
3042 * 0 inserted new entry
3043 * 1 updated mask of existing entry
3044 */
3045int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
3046 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003047{
Eric Holmberge8a39322012-04-03 15:14:02 -06003048 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003049 struct smsm_state_cb_info *cb_info;
3050 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06003051 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003052 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003054 if (smsm_entry >= SMSM_NUM_ENTRIES)
3055 return -EINVAL;
3056
Eric Holmbergc8002902011-09-16 13:55:57 -06003057 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003058
3059 if (!smsm_states) {
3060 /* smsm not yet initialized */
3061 ret = -ENODEV;
3062 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003063 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003064
Eric Holmberge8a39322012-04-03 15:14:02 -06003065 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003066 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06003067 &state->callbacks, cb_list) {
3068 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003069 (cb_info->data == data)) {
3070 cb_info->mask |= mask;
3071 cb_found = cb_info;
3072 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003073 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003074 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003075 }
3076
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003077 if (!cb_found) {
3078 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
3079 GFP_ATOMIC);
3080 if (!cb_info) {
3081 ret = -ENOMEM;
3082 goto cleanup;
3083 }
3084
3085 cb_info->mask = mask;
3086 cb_info->notify = notify;
3087 cb_info->data = data;
3088 INIT_LIST_HEAD(&cb_info->cb_list);
3089 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06003090 &state->callbacks);
3091 new_mask |= mask;
3092 }
3093
3094 /* update interrupt notification mask */
3095 if (smsm_entry == SMSM_MODEM_STATE)
3096 new_mask |= LEGACY_MODEM_SMSM_MASK;
3097
3098 if (smsm_info.intr_mask) {
3099 unsigned long flags;
3100
3101 spin_lock_irqsave(&smem_lock, flags);
3102 new_mask = (new_mask & ~state->intr_mask_clear)
3103 | state->intr_mask_set;
3104 __raw_writel(new_mask,
3105 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3106 wmb();
3107 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003108 }
3109
3110cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06003111 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003112 return ret;
3113}
3114EXPORT_SYMBOL(smsm_state_cb_register);
3115
3116
3117/**
3118 * Deregisters for SMSM state notifications for the specified bits.
3119 *
3120 * @smsm_entry Processor entry to deregister
3121 * @mask Bits to deregister (if result is 0, callback is removed)
3122 * @notify Notification function to deregister
3123 * @data Opaque data passed in to callback
3124 *
3125 * @returns Status code
3126 * <0 error code
3127 * 0 not found
3128 * 1 updated mask
3129 * 2 removed callback
3130 */
3131int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
3132 void (*notify)(void *, uint32_t, uint32_t), void *data)
3133{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003134 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06003135 struct smsm_state_cb_info *cb_tmp;
3136 struct smsm_state_info *state;
3137 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003138 int ret = 0;
3139
3140 if (smsm_entry >= SMSM_NUM_ENTRIES)
3141 return -EINVAL;
3142
Eric Holmbergc8002902011-09-16 13:55:57 -06003143 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003144
3145 if (!smsm_states) {
3146 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003147 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003148 return -ENODEV;
3149 }
3150
Eric Holmberge8a39322012-04-03 15:14:02 -06003151 state = &smsm_states[smsm_entry];
3152 list_for_each_entry_safe(cb_info, cb_tmp,
3153 &state->callbacks, cb_list) {
3154 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003155 (cb_info->data == data)) {
3156 cb_info->mask &= ~mask;
3157 ret = 1;
3158 if (!cb_info->mask) {
3159 /* no mask bits set, remove callback */
3160 list_del(&cb_info->cb_list);
3161 kfree(cb_info);
3162 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003163 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003164 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003165 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003166 new_mask |= cb_info->mask;
3167 }
3168
3169 /* update interrupt notification mask */
3170 if (smsm_entry == SMSM_MODEM_STATE)
3171 new_mask |= LEGACY_MODEM_SMSM_MASK;
3172
3173 if (smsm_info.intr_mask) {
3174 unsigned long flags;
3175
3176 spin_lock_irqsave(&smem_lock, flags);
3177 new_mask = (new_mask & ~state->intr_mask_clear)
3178 | state->intr_mask_set;
3179 __raw_writel(new_mask,
3180 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3181 wmb();
3182 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003183 }
3184
Eric Holmbergc8002902011-09-16 13:55:57 -06003185 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003186 return ret;
3187}
3188EXPORT_SYMBOL(smsm_state_cb_deregister);
3189
Eric Holmberg6275b602012-11-19 13:05:04 -07003190/**
3191 * smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
3192 *
3193 * @returns: pointer to SMEM remote spinlock
3194 */
3195remote_spinlock_t *smem_get_remote_spinlock(void)
3196{
3197 return &remote_spinlock;
3198}
3199EXPORT_SYMBOL(smem_get_remote_spinlock);
3200
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003201int smd_module_init_notifier_register(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003202{
3203 int ret;
3204 if (!nb)
3205 return -EINVAL;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003206 mutex_lock(&smd_module_init_notifier_lock);
3207 ret = raw_notifier_chain_register(&smd_module_init_notifier_list, nb);
3208 if (smd_module_inited)
3209 nb->notifier_call(nb, 0, NULL);
3210 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003211 return ret;
3212}
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003213EXPORT_SYMBOL(smd_module_init_notifier_register);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003214
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003215int smd_module_init_notifier_unregister(struct notifier_block *nb)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003216{
3217 int ret;
3218 if (!nb)
3219 return -EINVAL;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003220 mutex_lock(&smd_module_init_notifier_lock);
3221 ret = raw_notifier_chain_unregister(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003222 nb);
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003223 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003224 return ret;
3225}
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003226EXPORT_SYMBOL(smd_module_init_notifier_unregister);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003227
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003228static void smd_module_init_notify(uint32_t state, void *data)
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003229{
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003230 mutex_lock(&smd_module_init_notifier_lock);
3231 smd_module_inited = 1;
3232 raw_notifier_call_chain(&smd_module_init_notifier_list,
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003233 state, data);
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003234 mutex_unlock(&smd_module_init_notifier_lock);
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -06003235}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003236
3237int smd_core_init(void)
3238{
3239 int r;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003240 unsigned long flags = IRQF_TRIGGER_RISING;
3241 SMD_INFO("smd_core_init()\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003242
Brian Swetland37521a32009-07-01 18:30:47 -07003243 r = request_irq(INT_A9_M2A_0, smd_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003244 flags, "smd_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003245 if (r < 0)
3246 return r;
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303247 interrupt_stats[SMD_MODEM].smd_interrupt_id = INT_A9_M2A_0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003248 r = enable_irq_wake(INT_A9_M2A_0);
3249 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003250 pr_err("smd_core_init: "
3251 "enable_irq_wake failed for INT_A9_M2A_0\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003252
Eric Holmberg98c6c642012-02-24 11:29:35 -07003253 r = request_irq(INT_A9_M2A_5, smsm_modem_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003254 flags, "smsm_dev", 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003255 if (r < 0) {
3256 free_irq(INT_A9_M2A_0, 0);
3257 return r;
3258 }
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303259 interrupt_stats[SMD_MODEM].smsm_interrupt_id = INT_A9_M2A_5;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003260 r = enable_irq_wake(INT_A9_M2A_5);
3261 if (r < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003262 pr_err("smd_core_init: "
3263 "enable_irq_wake failed for INT_A9_M2A_5\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003264
Brian Swetland37521a32009-07-01 18:30:47 -07003265#if defined(CONFIG_QDSP6)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003266#if (INT_ADSP_A11 == INT_ADSP_A11_SMSM)
3267 flags |= IRQF_SHARED;
3268#endif
Brian Swetland37521a32009-07-01 18:30:47 -07003269 r = request_irq(INT_ADSP_A11, smd_dsp_irq_handler,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003270 flags, "smd_dev", smd_dsp_irq_handler);
Brian Swetland37521a32009-07-01 18:30:47 -07003271 if (r < 0) {
3272 free_irq(INT_A9_M2A_0, 0);
3273 free_irq(INT_A9_M2A_5, 0);
3274 return r;
3275 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003276
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303277 interrupt_stats[SMD_Q6].smd_interrupt_id = INT_ADSP_A11;
Eric Holmberg98c6c642012-02-24 11:29:35 -07003278 r = request_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler,
3279 flags, "smsm_dev", smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003280 if (r < 0) {
3281 free_irq(INT_A9_M2A_0, 0);
3282 free_irq(INT_A9_M2A_5, 0);
3283 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
3284 return r;
3285 }
3286
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303287 interrupt_stats[SMD_Q6].smsm_interrupt_id = INT_ADSP_A11_SMSM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003288 r = enable_irq_wake(INT_ADSP_A11);
3289 if (r < 0)
3290 pr_err("smd_core_init: "
3291 "enable_irq_wake failed for INT_ADSP_A11\n");
3292
3293#if (INT_ADSP_A11 != INT_ADSP_A11_SMSM)
3294 r = enable_irq_wake(INT_ADSP_A11_SMSM);
3295 if (r < 0)
3296 pr_err("smd_core_init: enable_irq_wake "
3297 "failed for INT_ADSP_A11_SMSM\n");
3298#endif
3299 flags &= ~IRQF_SHARED;
Brian Swetland37521a32009-07-01 18:30:47 -07003300#endif
3301
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003302#if defined(CONFIG_DSPS)
3303 r = request_irq(INT_DSPS_A11, smd_dsps_irq_handler,
3304 flags, "smd_dev", smd_dsps_irq_handler);
3305 if (r < 0) {
3306 free_irq(INT_A9_M2A_0, 0);
3307 free_irq(INT_A9_M2A_5, 0);
3308 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003309 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003310 return r;
3311 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003312
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303313 interrupt_stats[SMD_DSPS].smd_interrupt_id = INT_DSPS_A11;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003314 r = enable_irq_wake(INT_DSPS_A11);
3315 if (r < 0)
3316 pr_err("smd_core_init: "
3317 "enable_irq_wake failed for INT_ADSP_A11\n");
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07003318#endif
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003320#if defined(CONFIG_WCNSS)
3321 r = request_irq(INT_WCNSS_A11, smd_wcnss_irq_handler,
3322 flags, "smd_dev", smd_wcnss_irq_handler);
3323 if (r < 0) {
3324 free_irq(INT_A9_M2A_0, 0);
3325 free_irq(INT_A9_M2A_5, 0);
3326 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003327 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003328 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3329 return r;
3330 }
3331
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303332 interrupt_stats[SMD_WCNSS].smd_interrupt_id = INT_WCNSS_A11;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003333 r = enable_irq_wake(INT_WCNSS_A11);
3334 if (r < 0)
3335 pr_err("smd_core_init: "
3336 "enable_irq_wake failed for INT_WCNSS_A11\n");
3337
Eric Holmberg98c6c642012-02-24 11:29:35 -07003338 r = request_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler,
3339 flags, "smsm_dev", smsm_wcnss_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003340 if (r < 0) {
3341 free_irq(INT_A9_M2A_0, 0);
3342 free_irq(INT_A9_M2A_5, 0);
3343 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003344 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003345 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3346 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
3347 return r;
3348 }
3349
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303350 interrupt_stats[SMD_WCNSS].smsm_interrupt_id = INT_WCNSS_A11_SMSM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003351 r = enable_irq_wake(INT_WCNSS_A11_SMSM);
3352 if (r < 0)
3353 pr_err("smd_core_init: "
3354 "enable_irq_wake failed for INT_WCNSS_A11_SMSM\n");
3355#endif
3356
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003357#if defined(CONFIG_DSPS_SMSM)
Eric Holmberg98c6c642012-02-24 11:29:35 -07003358 r = request_irq(INT_DSPS_A11_SMSM, smsm_dsps_irq_handler,
3359 flags, "smsm_dev", smsm_dsps_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003360 if (r < 0) {
3361 free_irq(INT_A9_M2A_0, 0);
3362 free_irq(INT_A9_M2A_5, 0);
3363 free_irq(INT_ADSP_A11, smd_dsp_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003364 free_irq(INT_ADSP_A11_SMSM, smsm_dsp_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003365 free_irq(INT_DSPS_A11, smd_dsps_irq_handler);
3366 free_irq(INT_WCNSS_A11, smd_wcnss_irq_handler);
Eric Holmberg98c6c642012-02-24 11:29:35 -07003367 free_irq(INT_WCNSS_A11_SMSM, smsm_wcnss_irq_handler);
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003368 return r;
3369 }
3370
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303371 interrupt_stats[SMD_DSPS].smsm_interrupt_id = INT_DSPS_A11_SMSM;
Jeff Hugo6a8057c2011-08-16 13:47:12 -06003372 r = enable_irq_wake(INT_DSPS_A11_SMSM);
3373 if (r < 0)
3374 pr_err("smd_core_init: "
3375 "enable_irq_wake failed for INT_DSPS_A11_SMSM\n");
3376#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003377 SMD_INFO("smd_core_init() done\n");
3378
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003379 return 0;
3380}
3381
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303382static int intr_init(struct interrupt_config_item *private_irq,
3383 struct smd_irq_config *platform_irq,
3384 struct platform_device *pdev
3385 )
3386{
3387 int irq_id;
3388 int ret;
3389 int ret_wake;
3390
3391 private_irq->out_bit_pos = platform_irq->out_bit_pos;
3392 private_irq->out_offset = platform_irq->out_offset;
3393 private_irq->out_base = platform_irq->out_base;
3394
3395 irq_id = platform_get_irq_byname(
3396 pdev,
3397 platform_irq->irq_name
3398 );
3399 SMD_DBG("smd: %s: register irq: %s id: %d\n", __func__,
3400 platform_irq->irq_name, irq_id);
3401 ret = request_irq(irq_id,
3402 private_irq->irq_handler,
3403 platform_irq->flags,
3404 platform_irq->device_name,
3405 (void *)platform_irq->dev_id
3406 );
3407 if (ret < 0) {
3408 platform_irq->irq_id = ret;
Eric Holmbergdeace152012-07-25 12:17:11 -06003409 private_irq->irq_id = ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303410 } else {
3411 platform_irq->irq_id = irq_id;
Eric Holmbergdeace152012-07-25 12:17:11 -06003412 private_irq->irq_id = irq_id;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303413 ret_wake = enable_irq_wake(irq_id);
3414 if (ret_wake < 0) {
3415 pr_err("smd: enable_irq_wake failed on %s",
3416 platform_irq->irq_name);
3417 }
3418 }
3419
3420 return ret;
3421}
3422
Jeff Hugobdc734d2012-03-26 16:05:39 -06003423int sort_cmp_func(const void *a, const void *b)
3424{
3425 struct smem_area *left = (struct smem_area *)(a);
3426 struct smem_area *right = (struct smem_area *)(b);
3427
3428 return left->phys_addr - right->phys_addr;
3429}
3430
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303431int smd_core_platform_init(struct platform_device *pdev)
3432{
3433 int i;
3434 int ret;
3435 uint32_t num_ss;
3436 struct smd_platform *smd_platform_data;
3437 struct smd_subsystem_config *smd_ss_config_list;
3438 struct smd_subsystem_config *cfg;
3439 int err_ret = 0;
Jeff Hugobdc734d2012-03-26 16:05:39 -06003440 struct smd_smem_regions *smd_smem_areas;
3441 int smem_idx = 0;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303442
3443 smd_platform_data = pdev->dev.platform_data;
3444 num_ss = smd_platform_data->num_ss_configs;
3445 smd_ss_config_list = smd_platform_data->smd_ss_configs;
3446
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06003447 if (smd_platform_data->smd_ssr_config)
3448 disable_smsm_reset_handshake = smd_platform_data->
3449 smd_ssr_config->disable_smsm_reset_handshake;
3450
Jeff Hugobdc734d2012-03-26 16:05:39 -06003451 smd_smem_areas = smd_platform_data->smd_smem_areas;
3452 if (smd_smem_areas) {
3453 num_smem_areas = smd_platform_data->num_smem_areas;
3454 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3455 GFP_KERNEL);
3456 if (!smem_areas) {
3457 pr_err("%s: smem_areas kmalloc failed\n", __func__);
3458 err_ret = -ENOMEM;
3459 goto smem_areas_alloc_fail;
3460 }
3461
3462 for (smem_idx = 0; smem_idx < num_smem_areas; ++smem_idx) {
3463 smem_areas[smem_idx].phys_addr =
3464 smd_smem_areas[smem_idx].phys_addr;
3465 smem_areas[smem_idx].size =
3466 smd_smem_areas[smem_idx].size;
3467 smem_areas[smem_idx].virt_addr = ioremap_nocache(
3468 (unsigned long)(smem_areas[smem_idx].phys_addr),
3469 smem_areas[smem_idx].size);
3470 if (!smem_areas[smem_idx].virt_addr) {
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003471 pr_err("%s: ioremap_nocache() of addr: %pa size: %pa\n",
3472 __func__,
3473 &smem_areas[smem_idx].phys_addr,
3474 &smem_areas[smem_idx].size);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003475 err_ret = -ENOMEM;
3476 ++smem_idx;
3477 goto smem_failed;
3478 }
3479 }
3480 sort(smem_areas, num_smem_areas,
3481 sizeof(struct smem_area),
3482 sort_cmp_func, NULL);
3483 }
3484
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303485 for (i = 0; i < num_ss; i++) {
3486 cfg = &smd_ss_config_list[i];
3487
3488 ret = intr_init(
3489 &private_intr_config[cfg->irq_config_id].smd,
3490 &cfg->smd_int,
3491 pdev
3492 );
3493
3494 if (ret < 0) {
3495 err_ret = ret;
3496 pr_err("smd: register irq failed on %s\n",
3497 cfg->smd_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003498 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303499 }
3500
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303501 interrupt_stats[cfg->irq_config_id].smd_interrupt_id
3502 = cfg->smd_int.irq_id;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003503 /* only init smsm structs if this edge supports smsm */
3504 if (cfg->smsm_int.irq_id)
3505 ret = intr_init(
3506 &private_intr_config[cfg->irq_config_id].smsm,
3507 &cfg->smsm_int,
3508 pdev
3509 );
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303510
3511 if (ret < 0) {
3512 err_ret = ret;
3513 pr_err("smd: register irq failed on %s\n",
3514 cfg->smsm_int.irq_name);
Jeff Hugobdc734d2012-03-26 16:05:39 -06003515 goto intr_failed;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303516 }
Eric Holmberg17992c12012-02-29 12:54:44 -07003517
Arun Kumar Neelakantamac7c02d2012-10-16 22:17:55 +05303518 if (cfg->smsm_int.irq_id)
3519 interrupt_stats[cfg->irq_config_id].smsm_interrupt_id
3520 = cfg->smsm_int.irq_id;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06003521 if (cfg->subsys_name)
3522 strlcpy(edge_to_pids[cfg->edge].subsys_name,
Eric Holmberg17992c12012-02-29 12:54:44 -07003523 cfg->subsys_name, SMD_MAX_CH_NAME_LEN);
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303524 }
3525
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303526
3527 SMD_INFO("smd_core_platform_init() done\n");
3528 return 0;
3529
Jeff Hugobdc734d2012-03-26 16:05:39 -06003530intr_failed:
3531 pr_err("smd: deregistering IRQs\n");
3532 for (i = 0; i < num_ss; ++i) {
3533 cfg = &smd_ss_config_list[i];
3534
3535 if (cfg->smd_int.irq_id >= 0)
3536 free_irq(cfg->smd_int.irq_id,
3537 (void *)cfg->smd_int.dev_id
3538 );
3539 if (cfg->smsm_int.irq_id >= 0)
3540 free_irq(cfg->smsm_int.irq_id,
3541 (void *)cfg->smsm_int.dev_id
3542 );
3543 }
3544smem_failed:
3545 for (smem_idx = smem_idx - 1; smem_idx >= 0; --smem_idx)
3546 iounmap(smem_areas[smem_idx].virt_addr);
3547 kfree(smem_areas);
3548smem_areas_alloc_fail:
3549 return err_ret;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303550}
3551
Jeff Hugo412356e2012-09-27 17:14:23 -06003552static int __devinit parse_smd_devicetree(struct device_node *node,
3553 void *irq_out_base)
3554{
3555 uint32_t edge;
3556 char *key;
3557 int ret;
3558 uint32_t irq_offset;
3559 uint32_t irq_bitmask;
3560 uint32_t irq_line;
3561 unsigned long irq_flags = IRQF_TRIGGER_RISING;
3562 const char *pilstr;
3563 struct interrupt_config_item *private_irq;
3564
3565 key = "qcom,smd-edge";
3566 ret = of_property_read_u32(node, key, &edge);
3567 if (ret)
3568 goto missing_key;
3569 SMD_DBG("%s: %s = %d", __func__, key, edge);
3570
3571 key = "qcom,smd-irq-offset";
3572 ret = of_property_read_u32(node, key, &irq_offset);
3573 if (ret)
3574 goto missing_key;
3575 SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
3576
3577 key = "qcom,smd-irq-bitmask";
3578 ret = of_property_read_u32(node, key, &irq_bitmask);
3579 if (ret)
3580 goto missing_key;
3581 SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
3582
3583 key = "interrupts";
3584 irq_line = irq_of_parse_and_map(node, 0);
3585 if (!irq_line)
3586 goto missing_key;
3587 SMD_DBG("%s: %s = %d", __func__, key, irq_line);
3588
3589 key = "qcom,pil-string";
3590 pilstr = of_get_property(node, key, NULL);
3591 if (pilstr)
3592 SMD_DBG("%s: %s = %s", __func__, key, pilstr);
3593
3594 key = "qcom,irq-no-suspend";
3595 ret = of_property_read_bool(node, key);
3596 if (ret)
3597 irq_flags |= IRQF_NO_SUSPEND;
3598
3599 private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smd;
3600 private_irq->out_bit_pos = irq_bitmask;
3601 private_irq->out_offset = irq_offset;
3602 private_irq->out_base = irq_out_base;
3603 private_irq->irq_id = irq_line;
3604
3605 ret = request_irq(irq_line,
3606 private_irq->irq_handler,
3607 irq_flags,
3608 "smd_dev",
3609 NULL);
3610 if (ret < 0) {
3611 pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
3612 return ret;
3613 } else {
3614 ret = enable_irq_wake(irq_line);
3615 if (ret < 0)
3616 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
3617 irq_line);
3618 }
3619
3620 if (pilstr)
3621 strlcpy(edge_to_pids[edge].subsys_name, pilstr,
3622 SMD_MAX_CH_NAME_LEN);
3623
3624 return 0;
3625
3626missing_key:
3627 pr_err("%s: missing key: %s", __func__, key);
3628 return -ENODEV;
3629}
3630
3631static int __devinit parse_smsm_devicetree(struct device_node *node,
3632 void *irq_out_base)
3633{
3634 uint32_t edge;
3635 char *key;
3636 int ret;
3637 uint32_t irq_offset;
3638 uint32_t irq_bitmask;
3639 uint32_t irq_line;
3640 struct interrupt_config_item *private_irq;
3641
3642 key = "qcom,smsm-edge";
3643 ret = of_property_read_u32(node, key, &edge);
3644 if (ret)
3645 goto missing_key;
3646 SMD_DBG("%s: %s = %d", __func__, key, edge);
3647
3648 key = "qcom,smsm-irq-offset";
3649 ret = of_property_read_u32(node, key, &irq_offset);
3650 if (ret)
3651 goto missing_key;
3652 SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
3653
3654 key = "qcom,smsm-irq-bitmask";
3655 ret = of_property_read_u32(node, key, &irq_bitmask);
3656 if (ret)
3657 goto missing_key;
3658 SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
3659
3660 key = "interrupts";
3661 irq_line = irq_of_parse_and_map(node, 0);
3662 if (!irq_line)
3663 goto missing_key;
3664 SMD_DBG("%s: %s = %d", __func__, key, irq_line);
3665
3666 private_irq = &private_intr_config[edge_to_pids[edge].remote_pid].smsm;
3667 private_irq->out_bit_pos = irq_bitmask;
3668 private_irq->out_offset = irq_offset;
3669 private_irq->out_base = irq_out_base;
3670 private_irq->irq_id = irq_line;
3671
3672 ret = request_irq(irq_line,
3673 private_irq->irq_handler,
3674 IRQF_TRIGGER_RISING,
3675 "smsm_dev",
3676 NULL);
3677 if (ret < 0) {
3678 pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
3679 return ret;
3680 } else {
3681 ret = enable_irq_wake(irq_line);
3682 if (ret < 0)
3683 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
3684 irq_line);
3685 }
3686
3687 return 0;
3688
3689missing_key:
3690 pr_err("%s: missing key: %s", __func__, key);
3691 return -ENODEV;
3692}
3693
3694static void __devinit unparse_smd_devicetree(struct device_node *node)
3695{
3696 uint32_t irq_line;
3697
3698 irq_line = irq_of_parse_and_map(node, 0);
3699
3700 free_irq(irq_line, NULL);
3701}
3702
3703static void __devinit unparse_smsm_devicetree(struct device_node *node)
3704{
3705 uint32_t irq_line;
3706
3707 irq_line = irq_of_parse_and_map(node, 0);
3708
3709 free_irq(irq_line, NULL);
3710}
3711
3712static int __devinit smd_core_devicetree_init(struct platform_device *pdev)
3713{
3714 char *key;
3715 struct resource *r;
3716 void *irq_out_base;
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003717 phys_addr_t aux_mem_base;
3718 resource_size_t aux_mem_size;
Jeff Hugo412356e2012-09-27 17:14:23 -06003719 int temp_string_size = 11; /* max 3 digit count */
3720 char temp_string[temp_string_size];
3721 int count;
3722 struct device_node *node;
3723 int ret;
3724 const char *compatible;
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003725 struct ramdump_segment *ramdump_segments_tmp;
Jeff Hugo412356e2012-09-27 17:14:23 -06003726 int subnode_num = 0;
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003727 resource_size_t irq_out_size;
Jeff Hugo412356e2012-09-27 17:14:23 -06003728
3729 disable_smsm_reset_handshake = 1;
3730
3731 key = "irq-reg-base";
3732 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
3733 if (!r) {
3734 pr_err("%s: missing '%s'\n", __func__, key);
3735 return -ENODEV;
3736 }
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003737 irq_out_size = resource_size(r);
3738 irq_out_base = ioremap_nocache(r->start, irq_out_size);
3739 if (!irq_out_base) {
3740 pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
3741 __func__, &r->start, &irq_out_size);
3742 return -ENOMEM;
3743 }
Jeff Hugo412356e2012-09-27 17:14:23 -06003744 SMD_DBG("%s: %s = %p", __func__, key, irq_out_base);
3745
3746 count = 1;
3747 while (1) {
3748 scnprintf(temp_string, temp_string_size, "aux-mem%d", count);
3749 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3750 temp_string);
3751 if (!r)
3752 break;
3753
3754 ++num_smem_areas;
3755 ++count;
3756 if (count > 999) {
3757 pr_err("%s: max num aux mem regions reached\n",
3758 __func__);
3759 break;
3760 }
3761 }
3762
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003763 /* initialize SSR ramdump regions */
3764 key = "smem";
3765 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
3766 if (!r) {
3767 pr_err("%s: missing '%s'\n", __func__, key);
3768 return -ENODEV;
3769 }
3770 ramdump_segments_tmp = kmalloc_array(num_smem_areas + 1,
3771 sizeof(struct ramdump_segment), GFP_KERNEL);
3772
3773 if (!ramdump_segments_tmp) {
3774 pr_err("%s: ramdump segment kmalloc failed\n", __func__);
3775 ret = -ENOMEM;
3776 goto free_smem_areas;
3777 }
3778 ramdump_segments_tmp[0].address = r->start;
3779 ramdump_segments_tmp[0].size = resource_size(r);
3780
Jeff Hugo412356e2012-09-27 17:14:23 -06003781 if (num_smem_areas) {
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003782
Jeff Hugo412356e2012-09-27 17:14:23 -06003783 smem_areas = kmalloc(sizeof(struct smem_area) * num_smem_areas,
3784 GFP_KERNEL);
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003785
Jeff Hugo412356e2012-09-27 17:14:23 -06003786 if (!smem_areas) {
3787 pr_err("%s: smem areas kmalloc failed\n", __func__);
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003788 ret = -ENOMEM;
3789 goto free_smem_areas;
Jeff Hugo412356e2012-09-27 17:14:23 -06003790 }
3791 count = 1;
3792 while (1) {
3793 scnprintf(temp_string, temp_string_size, "aux-mem%d",
3794 count);
3795 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3796 temp_string);
3797 if (!r)
3798 break;
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003799 aux_mem_base = r->start;
3800 aux_mem_size = resource_size(r);
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003801
3802 /*
3803 * Add to ram-dumps segments.
3804 * ramdump_segments_tmp[0] is the main SMEM region,
3805 * so auxiliary segments are indexed by count
3806 * instead of count - 1.
3807 */
3808 ramdump_segments_tmp[count].address = aux_mem_base;
3809 ramdump_segments_tmp[count].size = aux_mem_size;
3810
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003811 SMD_DBG("%s: %s = %pa %pa", __func__, temp_string,
3812 &aux_mem_base, &aux_mem_size);
Jeff Hugo412356e2012-09-27 17:14:23 -06003813 smem_areas[count - 1].phys_addr = aux_mem_base;
3814 smem_areas[count - 1].size = aux_mem_size;
3815 smem_areas[count - 1].virt_addr = ioremap_nocache(
3816 (unsigned long)(smem_areas[count-1].phys_addr),
3817 smem_areas[count - 1].size);
3818 if (!smem_areas[count - 1].virt_addr) {
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003819 pr_err("%s: ioremap_nocache() of addr:%pa size: %pa\n",
Jeff Hugo412356e2012-09-27 17:14:23 -06003820 __func__,
Stepan Moskovchenkod6ee8262013-02-06 11:26:05 -08003821 &smem_areas[count - 1].phys_addr,
3822 &smem_areas[count - 1].size);
Jeff Hugo412356e2012-09-27 17:14:23 -06003823 ret = -ENOMEM;
3824 goto free_smem_areas;
3825 }
3826
3827 ++count;
3828 if (count > 999) {
3829 pr_err("%s: max num aux mem regions reached\n",
3830 __func__);
3831 break;
3832 }
3833 }
3834 sort(smem_areas, num_smem_areas,
3835 sizeof(struct smem_area),
3836 sort_cmp_func, NULL);
3837 }
3838
3839 for_each_child_of_node(pdev->dev.of_node, node) {
3840 compatible = of_get_property(node, "compatible", NULL);
Brent Hronikf4442e12013-04-17 15:13:11 -06003841 if (!compatible) {
3842 pr_err("%s: invalid child node: compatible null\n",
3843 __func__);
3844 ret = -ENODEV;
3845 goto rollback_subnodes;
3846 }
Jeff Hugo412356e2012-09-27 17:14:23 -06003847 if (!strcmp(compatible, "qcom,smd")) {
3848 ret = parse_smd_devicetree(node, irq_out_base);
3849 if (ret)
3850 goto rollback_subnodes;
3851 } else if (!strcmp(compatible, "qcom,smsm")) {
3852 ret = parse_smsm_devicetree(node, irq_out_base);
3853 if (ret)
3854 goto rollback_subnodes;
3855 } else {
3856 pr_err("%s: invalid child node named: %s\n", __func__,
3857 compatible);
3858 ret = -ENODEV;
3859 goto rollback_subnodes;
3860 }
3861 ++subnode_num;
3862 }
3863
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003864 smem_ramdump_segments = ramdump_segments_tmp;
Jeff Hugo412356e2012-09-27 17:14:23 -06003865 return 0;
3866
3867rollback_subnodes:
3868 count = 0;
3869 for_each_child_of_node(pdev->dev.of_node, node) {
3870 if (count >= subnode_num)
3871 break;
3872 ++count;
3873 compatible = of_get_property(node, "compatible", NULL);
3874 if (!strcmp(compatible, "qcom,smd"))
3875 unparse_smd_devicetree(node);
3876 else
3877 unparse_smsm_devicetree(node);
3878 }
3879free_smem_areas:
3880 num_smem_areas = 0;
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003881 kfree(ramdump_segments_tmp);
Jeff Hugo412356e2012-09-27 17:14:23 -06003882 kfree(smem_areas);
3883 smem_areas = NULL;
3884 return ret;
3885}
3886
Gregory Bean4416e9e2010-07-28 10:22:12 -07003887static int __devinit msm_smd_probe(struct platform_device *pdev)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003888{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303889 int ret;
Daniel Walker0aec66d2010-03-18 12:31:08 -07003890
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303891 SMD_INFO("smd probe\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003892 INIT_WORK(&probe_work, smd_channel_probe_worker);
3893
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003894 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3895 if (IS_ERR(channel_close_wq)) {
3896 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3897 return -ENOMEM;
3898 }
3899
3900 if (smsm_init()) {
3901 pr_err("smsm_init() failed\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003902 return -1;
3903 }
3904
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303905 if (pdev) {
3906 if (pdev->dev.of_node) {
Jeff Hugo412356e2012-09-27 17:14:23 -06003907 ret = smd_core_devicetree_init(pdev);
3908 if (ret) {
3909 pr_err("%s: device tree init failed\n",
3910 __func__);
3911 return ret;
3912 }
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003913 smd_dev = &pdev->dev;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +05303914 } else if (pdev->dev.platform_data) {
3915 ret = smd_core_platform_init(pdev);
3916 if (ret) {
3917 pr_err(
3918 "SMD: smd_core_platform_init() failed\n");
3919 return -ENODEV;
3920 }
3921 } else {
3922 ret = smd_core_init();
3923 if (ret) {
3924 pr_err("smd_core_init() failed\n");
3925 return -ENODEV;
3926 }
3927 }
3928 } else {
3929 pr_err("SMD: PDEV not found\n");
3930 return -ENODEV;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003931 }
Iliyan Malchev1207bab2009-11-15 18:16:43 -08003932
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003933 smd_initialized = 1;
3934
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003935 smd_alloc_loopback_channel();
Eric Holmbergc33d4ab2011-10-24 10:28:25 -06003936 smsm_irq_handler(0, 0);
3937 tasklet_schedule(&smd_fake_irq_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003938
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003939 return 0;
3940}
3941
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003942static int restart_notifier_cb(struct notifier_block *this,
3943 unsigned long code,
3944 void *data);
3945
3946static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003947 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3948 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
Sameer Thalappil52381142012-10-04 17:22:24 -07003949 {SMD_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
Eric Holmbergca7ead22011-12-01 17:21:15 -07003950 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003951 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Jeff Hugod3cf6ec2012-09-26 15:30:10 -06003952 {SMD_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003953};
3954
3955static int restart_notifier_cb(struct notifier_block *this,
3956 unsigned long code,
3957 void *data)
3958{
Jeff Hugo73f356f2012-12-14 17:56:19 -07003959 /*
3960 * Some SMD or SMSM clients assume SMD/SMSM SSR handling will be
3961 * done in the AFTER_SHUTDOWN level. If this ever changes, extra
3962 * care should be taken to verify no clients are broken.
3963 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003964 if (code == SUBSYS_AFTER_SHUTDOWN) {
3965 struct restart_notifier_block *notifier;
3966
3967 notifier = container_of(this,
3968 struct restart_notifier_block, nb);
3969 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3970 __func__, notifier->processor,
3971 notifier->name);
3972
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003973 remote_spin_release(&remote_spinlock, notifier->processor);
3974 remote_spin_release_all(notifier->processor);
3975
3976 if (smem_ramdump_dev) {
3977 int ret;
3978
3979 SMD_INFO("%s: saving ramdump\n", __func__);
3980 /*
3981 * XPU protection does not currently allow the
3982 * auxiliary memory regions to be dumped. If this
3983 * changes, then num_smem_areas + 1 should be passed
3984 * into do_elf_ramdump() to dump all regions.
3985 */
3986 ret = do_elf_ramdump(smem_ramdump_dev,
3987 smem_ramdump_segments, 1);
3988 if (ret < 0)
3989 pr_err("%s: unable to dump smem %d\n", __func__,
3990 ret);
3991 }
3992
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003993 smd_channel_reset(notifier->processor);
3994 }
3995
3996 return NOTIFY_DONE;
3997}
3998
3999static __init int modem_restart_late_init(void)
4000{
4001 int i;
4002 void *handle;
4003 struct restart_notifier_block *nb;
4004
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06004005 smem_ramdump_dev = create_ramdump_device("smem-smd", smd_dev);
4006 if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
4007 pr_err("%s: Unable to create smem ramdump device.\n",
4008 __func__);
4009 smem_ramdump_dev = NULL;
4010 }
4011
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004012 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
4013 nb = &restart_notifiers[i];
4014 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
4015 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
4016 __func__, nb->name, handle);
4017 }
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06004018
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004019 return 0;
4020}
4021late_initcall(modem_restart_late_init);
4022
Jeff Hugo412356e2012-09-27 17:14:23 -06004023static struct of_device_id msm_smem_match_table[] = {
4024 { .compatible = "qcom,smem" },
4025 {},
4026};
4027
Brian Swetland2eb44eb2008-09-29 16:00:48 -07004028static struct platform_driver msm_smd_driver = {
4029 .probe = msm_smd_probe,
4030 .driver = {
4031 .name = MODULE_NAME,
4032 .owner = THIS_MODULE,
Jeff Hugo412356e2012-09-27 17:14:23 -06004033 .of_match_table = msm_smem_match_table,
Brian Swetland2eb44eb2008-09-29 16:00:48 -07004034 },
4035};
4036
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06004037int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07004038{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06004039 static bool registered;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06004040 int rc;
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06004041
4042 if (registered)
4043 return 0;
4044
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05304045 smd_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smd");
4046 if (!smd_log_ctx) {
4047 pr_err("%s: unable to create logging context\n", __func__);
4048 msm_smd_debug_mask = 0;
4049 }
4050
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06004051 registered = true;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06004052 rc = remote_spin_lock_init(&remote_spinlock, SMEM_SPINLOCK_SMEM_ALLOC);
4053 if (rc) {
4054 pr_err("%s: remote spinlock init failed %d\n", __func__, rc);
4055 return rc;
4056 }
4057 spinlocks_initialized = 1;
4058
4059 rc = platform_driver_register(&msm_smd_driver);
4060 if (rc) {
4061 pr_err("%s: msm_smd_driver register failed %d\n",
4062 __func__, rc);
4063 return rc;
4064 }
4065
4066 smd_module_init_notify(0, NULL);
4067
4068 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07004069}
4070
4071module_init(msm_smd_init);
4072
4073MODULE_DESCRIPTION("MSM Shared Memory Core");
4074MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
4075MODULE_LICENSE("GPL");