blob: cb9697df1b4f2da1235922ca3249b648fd210879 [file] [log] [blame]
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001/* arch/arm/mach-msm/smd.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Eric Holmberg6275b602012-11-19 13:05:04 -07004 * Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
Brian Swetland2eb44eb2008-09-29 16:00:48 -07005 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/platform_device.h>
19#include <linux/module.h>
20#include <linux/fs.h>
21#include <linux/cdev.h>
22#include <linux/device.h>
23#include <linux/wait.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/slab.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070028#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/io.h>
30#include <linux/termios.h>
31#include <linux/ctype.h>
32#include <linux/remote_spinlock.h>
33#include <linux/uaccess.h>
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070034#include <linux/kfifo.h>
Eric Holmberg59a9f9412012-03-19 10:04:22 -060035#include <linux/wakelock.h>
Karthikeyan Ramasubramanian7069c482012-03-22 09:21:20 -060036#include <linux/notifier.h>
Eric Holmberg144c2de2012-10-04 13:37:28 -060037#include <linux/suspend.h>
Jeff Hugo412356e2012-09-27 17:14:23 -060038#include <linux/of.h>
39#include <linux/of_irq.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070040
Brian Swetland2eb44eb2008-09-29 16:00:48 -070041#include <mach/msm_smd.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042#include <mach/msm_iomap.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070043#include <mach/system.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#include <mach/subsystem_notif.h>
Angshuman Sarkaread67bd2011-09-21 20:13:12 +053045#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070046#include <mach/proc_comm.h>
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +053047#include <mach/msm_ipc_logging.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070048#include <mach/ramdump.h>
Eric Holmberg51edef72013-04-11 14:28:33 -060049#include <mach/board.h>
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060050#include <mach/msm_smem.h>
Seemanta Dutta4e2d49c2013-04-05 16:28:11 -070051
Ram Somani8b9589f2012-04-03 12:07:18 +053052#include <asm/cacheflush.h>
Brian Swetland2eb44eb2008-09-29 16:00:48 -070053
54#include "smd_private.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055#include "modem_notifier.h"
Jeff Hugo5ba15fe2013-05-06 14:24:24 -060056#include "smem_private.h"
Brian Swetland2eb44eb2008-09-29 16:00:48 -070057
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058#define SMD_VERSION 0x00020000
Eric Holmbergc7e8daf2011-12-28 11:49:21 -070059#define SMSM_SNAPSHOT_CNT 64
Eric Holmberg53516dd2013-09-26 15:59:33 -060060#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4 + sizeof(uint64_t))
Eric Holmberge5266d32013-02-25 18:29:27 -070061#define RSPIN_INIT_WAIT_MS 1000
Eric Holmberg424d9552013-04-05 15:23:25 -060062#define SMD_FIFO_FULL_RESERVE 4
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063
64uint32_t SMSM_NUM_ENTRIES = 8;
65uint32_t SMSM_NUM_HOSTS = 3;
Brian Swetland2eb44eb2008-09-29 16:00:48 -070066
Eric Holmberge8a39322012-04-03 15:14:02 -060067/* Legacy SMSM interrupt notifications */
68#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT \
69 | SMSM_RUN | SMSM_SYSTEM_DOWNLOAD)
Brian Swetland2eb44eb2008-09-29 16:00:48 -070070
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071struct smsm_shared_info {
72 uint32_t *state;
73 uint32_t *intr_mask;
74 uint32_t *intr_mux;
75};
76
77static struct smsm_shared_info smsm_info;
Eric Holmberg59a9f9412012-03-19 10:04:22 -060078static struct kfifo smsm_snapshot_fifo;
79static struct wake_lock smsm_snapshot_wakelock;
80static int smsm_snapshot_count;
81static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082
83struct smsm_size_info_type {
84 uint32_t num_hosts;
85 uint32_t num_entries;
86 uint32_t reserved0;
87 uint32_t reserved1;
88};
89
90struct smsm_state_cb_info {
91 struct list_head cb_list;
92 uint32_t mask;
93 void *data;
94 void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
95};
96
97struct smsm_state_info {
98 struct list_head callbacks;
99 uint32_t last_value;
Eric Holmberge8a39322012-04-03 15:14:02 -0600100 uint32_t intr_mask_set;
101 uint32_t intr_mask_clear;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102};
103
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530104static irqreturn_t smsm_irq_handler(int irq, void *data);
105
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +0530106/*
107 * Interrupt configuration consists of static configuration for the supported
108 * processors that is done here along with interrupt configuration that is
109 * added by the separate initialization modules (device tree, platform data, or
110 * hard coded).
111 */
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530112static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
113 [SMD_MODEM] = {
114 .smd.irq_handler = smd_modem_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700115 .smsm.irq_handler = smsm_modem_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530116 },
117 [SMD_Q6] = {
118 .smd.irq_handler = smd_dsp_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700119 .smsm.irq_handler = smsm_dsp_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530120 },
121 [SMD_DSPS] = {
122 .smd.irq_handler = smd_dsps_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700123 .smsm.irq_handler = smsm_dsps_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530124 },
125 [SMD_WCNSS] = {
126 .smd.irq_handler = smd_wcnss_irq_handler,
Eric Holmberg98c6c642012-02-24 11:29:35 -0700127 .smsm.irq_handler = smsm_wcnss_irq_handler,
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530128 },
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600129 [SMD_RPM] = {
130 .smd.irq_handler = smd_rpm_irq_handler,
131 .smsm.irq_handler = NULL, /* does not support smsm */
132 },
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530133};
Jeff Hugobdc734d2012-03-26 16:05:39 -0600134
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700135struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530136
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
138#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
139 entry * SMSM_NUM_HOSTS + host)
140#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
141
142/* Internal definitions which are not exported in some targets */
143enum {
144 SMSM_APPS_DEM_I = 3,
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700145};
146
Jeff Hugo903d58a2013-08-29 14:57:00 -0600147int msm_smd_debug_mask = MSM_SMD_POWER_INFO | MSM_SMD_INFO |
148 MSM_SMSM_POWER_INFO;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700149module_param_named(debug_mask, msm_smd_debug_mask,
150 int, S_IRUGO | S_IWUSR | S_IWGRP);
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +0530151void *smd_log_ctx;
Jeff Hugo903d58a2013-08-29 14:57:00 -0600152void *smsm_log_ctx;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530153#define NUM_LOG_PAGES 4
154
Jeff Hugo903d58a2013-08-29 14:57:00 -0600155#define IPC_LOG_SMD(level, x...) do { \
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530156 if (smd_log_ctx) \
157 ipc_log_string(smd_log_ctx, x); \
158 else \
159 printk(level x); \
160 } while (0)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700161
Jeff Hugo903d58a2013-08-29 14:57:00 -0600162#define IPC_LOG_SMSM(level, x...) do { \
163 if (smsm_log_ctx) \
164 ipc_log_string(smsm_log_ctx, x); \
165 else \
166 printk(level x); \
167 } while (0)
168
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169#if defined(CONFIG_MSM_SMD_DEBUG)
170#define SMD_DBG(x...) do { \
171 if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
Jeff Hugo903d58a2013-08-29 14:57:00 -0600172 IPC_LOG_SMD(KERN_DEBUG, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173 } while (0)
174
175#define SMSM_DBG(x...) do { \
176 if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
Jeff Hugo903d58a2013-08-29 14:57:00 -0600177 IPC_LOG_SMSM(KERN_DEBUG, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 } while (0)
179
180#define SMD_INFO(x...) do { \
181 if (msm_smd_debug_mask & MSM_SMD_INFO) \
Jeff Hugo903d58a2013-08-29 14:57:00 -0600182 IPC_LOG_SMD(KERN_INFO, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 } while (0)
184
185#define SMSM_INFO(x...) do { \
186 if (msm_smd_debug_mask & MSM_SMSM_INFO) \
Jeff Hugo903d58a2013-08-29 14:57:00 -0600187 IPC_LOG_SMSM(KERN_INFO, x); \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 } while (0)
Jeff Hugo903d58a2013-08-29 14:57:00 -0600189
190#define SMD_POWER_INFO(x...) do { \
191 if (msm_smd_debug_mask & MSM_SMD_POWER_INFO) \
192 IPC_LOG_SMD(KERN_INFO, x); \
193 } while (0)
194
195#define SMSM_POWER_INFO(x...) do { \
196 if (msm_smd_debug_mask & MSM_SMSM_POWER_INFO) \
197 IPC_LOG_SMSM(KERN_INFO, x); \
Eric Holmberg98c6c642012-02-24 11:29:35 -0700198 } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199#else
200#define SMD_DBG(x...) do { } while (0)
201#define SMSM_DBG(x...) do { } while (0)
202#define SMD_INFO(x...) do { } while (0)
203#define SMSM_INFO(x...) do { } while (0)
Jeff Hugo903d58a2013-08-29 14:57:00 -0600204#define SMD_POWER_INFO(x...) do { } while (0)
205#define SMSM_POWER_INFO(x...) do { } while (0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700206#endif
207
Eric Holmberg51edef72013-04-11 14:28:33 -0600208/**
209 * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
210 *
211 * @type: type to check for overflow
212 * @a: left value to use
213 * @b: right value to use
214 * @returns: true if a + b will result in overflow; false otherwise
215 */
216#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
217 (((type)~0 - (a)) < (b) ? true : false)
218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219static inline void smd_write_intr(unsigned int val,
220 const void __iomem *addr);
Jeff Hugoee40b152012-02-09 17:39:47 -0700221#ifndef INT_ADSP_A11_SMSM
222#define INT_ADSP_A11_SMSM -1
223#endif
Jeff Hugoee40b152012-02-09 17:39:47 -0700224
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225#define SMD_LOOPBACK_CID 100
226
227static LIST_HEAD(smd_ch_list_loopback);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228static void smd_fake_irq_handler(unsigned long arg);
Eric Holmbergda31d042012-03-28 14:01:02 -0600229static void smsm_cb_snapshot(uint32_t use_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -0600231static struct workqueue_struct *smsm_cb_wq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232static void notify_smsm_cb_clients_worker(struct work_struct *work);
233static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
Eric Holmbergc8002902011-09-16 13:55:57 -0600234static DEFINE_MUTEX(smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235static struct smsm_state_info *smsm_states;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -0600236
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530237static int smd_stream_write_avail(struct smd_channel *ch);
238static int smd_stream_read_avail(struct smd_channel *ch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239
Brent Hronikada96e72013-04-16 11:51:54 -0600240static bool pid_is_on_edge(uint32_t edge_num, unsigned pid);
241
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242static inline void smd_write_intr(unsigned int val,
243 const void __iomem *addr)
244{
245 wmb();
246 __raw_writel(val, addr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700247}
248
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530249static inline void log_notify(uint32_t subsystem, smd_channel_t *ch)
250{
251 const char *subsys = smd_edge_to_subsystem(subsystem);
252
Jay Chokshi83b4f6132013-02-14 16:20:56 -0800253 (void) subsys;
254
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530255 if (!ch)
Jeff Hugo903d58a2013-08-29 14:57:00 -0600256 SMD_POWER_INFO("Apps->%s\n", subsys);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530257 else
Jeff Hugo903d58a2013-08-29 14:57:00 -0600258 SMD_POWER_INFO(
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530259 "Apps->%s ch%d '%s': tx%d/rx%d %dr/%dw : %dr/%dw\n",
260 subsys, ch->n, ch->name,
261 ch->fifo_size -
262 (smd_stream_write_avail(ch) + 1),
263 smd_stream_read_avail(ch),
264 ch->half_ch->get_tail(ch->send),
265 ch->half_ch->get_head(ch->send),
266 ch->half_ch->get_tail(ch->recv),
267 ch->half_ch->get_head(ch->recv)
268 );
269}
270
271static inline void notify_modem_smd(smd_channel_t *ch)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700272{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530273 static const struct interrupt_config_item *intr
274 = &private_intr_config[SMD_MODEM].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530275
276 log_notify(SMD_APPS_MODEM, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700277 if (intr->out_base) {
278 ++interrupt_stats[SMD_MODEM].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530279 smd_write_intr(intr->out_bit_pos,
280 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700281 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700282}
283
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530284static inline void notify_dsp_smd(smd_channel_t *ch)
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700285{
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530286 static const struct interrupt_config_item *intr
287 = &private_intr_config[SMD_Q6].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530288
289 log_notify(SMD_APPS_QDSP, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700290 if (intr->out_base) {
291 ++interrupt_stats[SMD_Q6].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530292 smd_write_intr(intr->out_bit_pos,
293 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700294 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700295}
296
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530297static inline void notify_dsps_smd(smd_channel_t *ch)
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530298{
299 static const struct interrupt_config_item *intr
300 = &private_intr_config[SMD_DSPS].smd;
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530301
302 log_notify(SMD_APPS_DSPS, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700303 if (intr->out_base) {
304 ++interrupt_stats[SMD_DSPS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530305 smd_write_intr(intr->out_bit_pos,
306 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700307 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530308}
309
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530310static inline void notify_wcnss_smd(struct smd_channel *ch)
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530311{
312 static const struct interrupt_config_item *intr
313 = &private_intr_config[SMD_WCNSS].smd;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530314
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530315 log_notify(SMD_APPS_WCNSS, ch);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700316 if (intr->out_base) {
317 ++interrupt_stats[SMD_WCNSS].smd_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530318 smd_write_intr(intr->out_bit_pos,
319 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700320 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530321}
322
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530323static inline void notify_rpm_smd(smd_channel_t *ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600324{
325 static const struct interrupt_config_item *intr
326 = &private_intr_config[SMD_RPM].smd;
327
328 if (intr->out_base) {
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +0530329 log_notify(SMD_APPS_RPM, ch);
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600330 ++interrupt_stats[SMD_RPM].smd_out_config_count;
331 smd_write_intr(intr->out_bit_pos,
332 intr->out_base + intr->out_offset);
333 }
334}
335
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530336static inline void notify_modem_smsm(void)
337{
338 static const struct interrupt_config_item *intr
339 = &private_intr_config[SMD_MODEM].smsm;
Brent Hronik2e3ad2f2013-06-17 10:36:49 -0600340
Jeff Hugo903d58a2013-08-29 14:57:00 -0600341 SMSM_POWER_INFO("SMSM Apps->%s", "MODEM");
Brent Hronik2e3ad2f2013-06-17 10:36:49 -0600342
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700343 if (intr->out_base) {
344 ++interrupt_stats[SMD_MODEM].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530345 smd_write_intr(intr->out_bit_pos,
346 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700347 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530348}
349
350static inline void notify_dsp_smsm(void)
351{
352 static const struct interrupt_config_item *intr
353 = &private_intr_config[SMD_Q6].smsm;
Brent Hronik2e3ad2f2013-06-17 10:36:49 -0600354
Jeff Hugo903d58a2013-08-29 14:57:00 -0600355 SMSM_POWER_INFO("SMSM Apps->%s", "ADSP");
Brent Hronik2e3ad2f2013-06-17 10:36:49 -0600356
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700357 if (intr->out_base) {
358 ++interrupt_stats[SMD_Q6].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530359 smd_write_intr(intr->out_bit_pos,
360 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700361 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530362}
363
364static inline void notify_dsps_smsm(void)
365{
366 static const struct interrupt_config_item *intr
367 = &private_intr_config[SMD_DSPS].smsm;
Brent Hronik2e3ad2f2013-06-17 10:36:49 -0600368
Jeff Hugo903d58a2013-08-29 14:57:00 -0600369 SMSM_POWER_INFO("SMSM Apps->%s", "DSPS");
Brent Hronik2e3ad2f2013-06-17 10:36:49 -0600370
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700371 if (intr->out_base) {
372 ++interrupt_stats[SMD_DSPS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530373 smd_write_intr(intr->out_bit_pos,
374 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700375 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530376}
377
378static inline void notify_wcnss_smsm(void)
379{
380 static const struct interrupt_config_item *intr
381 = &private_intr_config[SMD_WCNSS].smsm;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530382
Jeff Hugo903d58a2013-08-29 14:57:00 -0600383 SMSM_POWER_INFO("SMSM Apps->%s", "WCNSS");
Brent Hronik2e3ad2f2013-06-17 10:36:49 -0600384
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700385 if (intr->out_base) {
386 ++interrupt_stats[SMD_WCNSS].smsm_out_config_count;
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530387 smd_write_intr(intr->out_bit_pos,
388 intr->out_base + intr->out_offset);
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700389 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530390}
391
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700392static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
393{
394 /* older protocol don't use smsm_intr_mask,
395 but still communicates with modem */
396 if (!smsm_info.intr_mask ||
397 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
398 & notify_mask))
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530399 notify_modem_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400
401 if (smsm_info.intr_mask &&
402 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
403 & notify_mask)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404 uint32_t mux_val;
405
Eric Holmberg6282c5d2011-10-27 17:30:57 -0600406 if (cpu_is_qsd8x50() && smsm_info.intr_mux) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 mux_val = __raw_readl(
408 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
409 mux_val++;
410 __raw_writel(mux_val,
411 SMSM_INTR_MUX_ADDR(SMEM_APPS_Q6_SMSM));
412 }
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530413 notify_dsp_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414 }
415
416 if (smsm_info.intr_mask &&
417 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
418 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530419 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 }
421
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600422 if (smsm_info.intr_mask &&
423 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
424 & notify_mask)) {
Angshuman Sarkarbad32df2012-02-01 19:52:52 +0530425 notify_dsps_smsm();
Jeff Hugo6a8057c2011-08-16 13:47:12 -0600426 }
427
Eric Holmbergda31d042012-03-28 14:01:02 -0600428 /*
429 * Notify local SMSM callback clients without wakelock since this
430 * code is used by power management during power-down/-up sequencing
431 * on DEM-based targets. Grabbing a wakelock in this case will
432 * abort the power-down sequencing.
433 */
Eric Holmberg51676a12012-07-10 18:45:23 -0600434 if (smsm_info.intr_mask &&
435 (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
436 & notify_mask)) {
437 smsm_cb_snapshot(0);
438 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700439}
440
Eric Holmberg144c2de2012-10-04 13:37:28 -0600441static int smsm_pm_notifier(struct notifier_block *nb,
442 unsigned long event, void *unused)
443{
444 switch (event) {
445 case PM_SUSPEND_PREPARE:
446 smsm_change_state(SMSM_APPS_STATE, SMSM_PROC_AWAKE, 0);
447 break;
448
449 case PM_POST_SUSPEND:
450 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_PROC_AWAKE);
451 break;
452 }
453 return NOTIFY_DONE;
454}
455
456static struct notifier_block smsm_pm_nb = {
457 .notifier_call = smsm_pm_notifier,
458 .priority = 0,
459};
460
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461void smd_diag(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700462{
463 char *x;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700464 int size;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700465
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600466 x = smem_find_to_proc(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG, 0,
467 SMEM_ANY_HOST_FLAG);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700468 if (x != 0) {
469 x[SZ_DIAG_ERR_MSG - 1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470 SMD_INFO("smem: DIAG '%s'\n", x);
471 }
472
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600473 x = smem_get_entry_to_proc(SMEM_ERR_CRASH_LOG, &size, 0,
474 SMEM_ANY_HOST_FLAG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700475 if (x != 0) {
476 x[size - 1] = 0;
477 pr_err("smem: CRASH LOG\n'%s'\n", x);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700478 }
479}
480
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700482static void handle_modem_crash(void)
483{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 pr_err("MODEM/AMSS has CRASHED\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700485 smd_diag();
486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 /* hard reboot if possible FIXME
488 if (msm_reset_hook)
489 msm_reset_hook();
490 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700491
492 /* in this case the modem or watchdog should reboot us */
493 for (;;)
494 ;
495}
496
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497int smsm_check_for_modem_crash(void)
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700498{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 /* if the modem's not ready yet, we have to hope for the best */
500 if (!smsm_info.state)
501 return 0;
Arve Hjønnevåg28379412009-05-20 16:52:36 -0700502
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 if (__raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE)) & SMSM_RESET) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700504 handle_modem_crash();
505 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700506 }
Brian Swetland5b0f5a32009-04-26 18:38:49 -0700507 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700508}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509EXPORT_SYMBOL(smsm_check_for_modem_crash);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700510
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700511/* the spinlock is used to synchronize between the
Brian Swetland03e00cd2009-07-01 17:58:37 -0700512 * irq handler and code that mutates the channel
513 * list or fiddles with channel state
514 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515static DEFINE_SPINLOCK(smd_lock);
Brian Swetland03e00cd2009-07-01 17:58:37 -0700516DEFINE_SPINLOCK(smem_lock);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700517
518/* the mutex is used during open() and close()
Brian Swetland03e00cd2009-07-01 17:58:37 -0700519 * operations to avoid races while creating or
520 * destroying smd_channel structures
521 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700522static DEFINE_MUTEX(smd_creation_mutex);
523
524static int smd_initialized;
525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526struct smd_shared_v1 {
527 struct smd_half_channel ch0;
528 unsigned char data0[SMD_BUF_SIZE];
529 struct smd_half_channel ch1;
530 unsigned char data1[SMD_BUF_SIZE];
531};
532
533struct smd_shared_v2 {
534 struct smd_half_channel ch0;
535 struct smd_half_channel ch1;
536};
537
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600538struct smd_shared_v2_word_access {
539 struct smd_half_channel_word_access ch0;
540 struct smd_half_channel_word_access ch1;
541};
542
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543/**
544 * Maps edge type to local and remote processor ID's.
545 */
546static struct edge_to_pid edge_to_pids[] = {
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700547 [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
Stephen Boyd77db8bb2012-06-27 15:15:16 -0700548 [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "adsp"},
Eric Holmberg5a55b4a2012-03-01 14:41:54 -0700549 [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
550 [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
551 [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
552 [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
553 [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
554 [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
555 [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
556 [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
557 [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
558 [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
559 [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
560 [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
561 [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600562 [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
563 [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
564 [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
565 [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
FNU Ramendrae2570d12013-07-18 14:12:03 -0600566 [SMD_TZ_RPM] = {SMD_TZ, SMD_RPM},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700567};
568
569struct restart_notifier_block {
570 unsigned processor;
571 char *name;
572 struct notifier_block nb;
573};
574
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +0530575int disable_smsm_reset_handshake;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
577
578static LIST_HEAD(smd_ch_closed_list);
579static LIST_HEAD(smd_ch_closing_list);
580static LIST_HEAD(smd_ch_to_close_list);
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700581
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600582struct remote_proc_info {
583 unsigned remote_pid;
584 unsigned free_space;
585 struct work_struct probe_work;
586 struct list_head ch_list;
587 /* 2 total supported tables of channels */
588 unsigned char ch_allocated[SMEM_NUM_SMD_STREAM_CHANNELS * 2];
589};
590
591static struct remote_proc_info remote_info[NUM_SMD_SUBSYSTEMS];
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700592
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593static void finalize_channel_close_fn(struct work_struct *work);
594static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
595static struct workqueue_struct *channel_close_wq;
596
Jeff Hugoa2c5e752013-08-14 10:33:06 -0600597#define PRI_ALLOC_TBL 1
598#define SEC_ALLOC_TBL 2
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600599static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm, int table_id,
600 struct remote_proc_info *r_info);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601
Jeff Hugo7cc06b12013-06-17 16:13:18 -0600602static bool smd_edge_inited(int edge)
603{
604 return edge_to_pids[edge].initialized;
605}
606
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607/* on smp systems, the probe might get called from multiple cores,
608 hence use a lock */
609static DEFINE_MUTEX(smd_probe_lock);
610
Jeff Hugoa2c5e752013-08-14 10:33:06 -0600611/**
612 * scan_alloc_table - Scans a specified SMD channel allocation table in SMEM for
613 * newly created channels that need to be made locally
614 * visable
615 *
616 * @shared: pointer to the table array in SMEM
617 * @smd_ch_allocated: pointer to an array indicating already allocated channels
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600618 * @table_id: identifier for this channel allocation table
619 * @num_entries: number of entries in this allocation table
620 * @r_info: pointer to the info structure of the remote proc we care about
Jeff Hugoa2c5e752013-08-14 10:33:06 -0600621 *
622 * The smd_probe_lock must be locked by the calling function. Shared and
623 * smd_ch_allocated are assumed to be valid pointers.
624 */
625static void scan_alloc_table(struct smd_alloc_elm *shared,
626 char *smd_ch_allocated,
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600627 int table_id,
628 unsigned num_entries,
629 struct remote_proc_info *r_info)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631 unsigned n;
632 uint32_t type;
633
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600634 for (n = 0; n < num_entries; n++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635 if (smd_ch_allocated[n])
636 continue;
637
Jeff Hugoa2c5e752013-08-14 10:33:06 -0600638 /*
639 * channel should be allocated only if APPS processor is
640 * involved
641 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600643 if (!pid_is_on_edge(type, SMD_APPS) ||
644 !pid_is_on_edge(type, r_info->remote_pid))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645 continue;
646 if (!shared[n].ref_count)
647 continue;
648 if (!shared[n].name[0])
649 continue;
650
Jeff Hugo7cc06b12013-06-17 16:13:18 -0600651 if (!smd_initialized && !smd_edge_inited(type)) {
Jeff Hugoa2c5e752013-08-14 10:33:06 -0600652 SMD_INFO(
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600653 "Probe skipping proc %d, tbl %d, ch %d, edge not inited\n",
654 r_info->remote_pid, table_id, n);
Jeff Hugo7cc06b12013-06-17 16:13:18 -0600655 continue;
656 }
657
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600658 if (!smd_alloc_channel(&shared[n], table_id, r_info))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659 smd_ch_allocated[n] = 1;
660 else
Jeff Hugoa2c5e752013-08-14 10:33:06 -0600661 SMD_INFO(
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600662 "Probe skipping proc %d, tbl %d, ch %d, not allocated\n",
663 r_info->remote_pid, table_id, n);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664 }
Jeff Hugoa2c5e752013-08-14 10:33:06 -0600665}
666
667/**
668 * smd_channel_probe_worker() - Scan for newly created SMD channels and init
669 * local structures so the channels are visable to
670 * local clients
671 *
672 * @work: work_struct corresponding to an instance of this function running on
673 * a workqueue.
674 */
675static void smd_channel_probe_worker(struct work_struct *work)
676{
677 struct smd_alloc_elm *shared;
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600678 struct remote_proc_info *r_info;
679 unsigned tbl_size;
Jeff Hugoa2c5e752013-08-14 10:33:06 -0600680
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600681 r_info = container_of(work, struct remote_proc_info, probe_work);
682
683 shared = smem_get_entry_to_proc(ID_CH_ALLOC_TBL, &tbl_size,
684 r_info->remote_pid, 0);
Jeff Hugoa2c5e752013-08-14 10:33:06 -0600685
686 if (!shared) {
687 pr_err("%s: allocation table not initialized\n", __func__);
688 return;
689 }
690
691 mutex_lock(&smd_probe_lock);
692
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600693 scan_alloc_table(shared, r_info->ch_allocated, PRI_ALLOC_TBL,
694 tbl_size / sizeof(*shared),
695 r_info);
Jeff Hugoa2c5e752013-08-14 10:33:06 -0600696
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600697 shared = smem_get_entry_to_proc(SMEM_CHANNEL_ALLOC_TBL_2, &tbl_size,
698 r_info->remote_pid, 0);
Jeff Hugoa2c5e752013-08-14 10:33:06 -0600699 if (shared)
700 scan_alloc_table(shared,
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600701 &(r_info->ch_allocated[SMEM_NUM_SMD_STREAM_CHANNELS]),
702 SEC_ALLOC_TBL,
703 tbl_size / sizeof(*shared),
704 r_info);
Jeff Hugoa2c5e752013-08-14 10:33:06 -0600705
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 mutex_unlock(&smd_probe_lock);
707}
708
709/**
Brent Hronikada96e72013-04-16 11:51:54 -0600710 * get_remote_ch() - gathers remote channel info
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 *
712 * @shared2: Pointer to v2 shared channel structure
713 * @type: Edge type
714 * @pid: Processor ID of processor on edge
Brent Hronikada96e72013-04-16 11:51:54 -0600715 * @remote_ch: Channel that belongs to processor @pid
Jeff Hugo00be6282012-09-07 11:24:32 -0600716 * @is_word_access_ch: Bool, is this a word aligned access channel
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700717 *
Brent Hronikada96e72013-04-16 11:51:54 -0600718 * @returns: 0 on success, error code on failure
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719 */
Brent Hronikada96e72013-04-16 11:51:54 -0600720static int get_remote_ch(void *shared2,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721 uint32_t type, uint32_t pid,
Jeff Hugo00be6282012-09-07 11:24:32 -0600722 void **remote_ch,
723 int is_word_access_ch
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 )
725{
Brent Hronikada96e72013-04-16 11:51:54 -0600726 if (!remote_ch || !shared2 || !pid_is_on_edge(type, pid) ||
727 !pid_is_on_edge(type, SMD_APPS))
728 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729
Brent Hronikada96e72013-04-16 11:51:54 -0600730 if (is_word_access_ch)
731 *remote_ch =
732 &((struct smd_shared_v2_word_access *)(shared2))->ch1;
733 else
734 *remote_ch = &((struct smd_shared_v2 *)(shared2))->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700735
Brent Hronikada96e72013-04-16 11:51:54 -0600736 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700737}
738
Arun Kumar Neelakantam99e88722013-02-28 14:13:21 +0530739/**
740 * smd_remote_ss_to_edge() - return edge type from remote ss type
741 * @name: remote subsystem name
742 *
743 * Returns the edge type connected between the local subsystem(APPS)
744 * and remote subsystem @name.
745 */
746int smd_remote_ss_to_edge(const char *name)
747{
748 int i;
749
750 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
751 if (edge_to_pids[i].subsys_name[0] != 0x0) {
752 if (!strncmp(edge_to_pids[i].subsys_name, name,
753 strlen(name)))
754 return i;
755 }
756 }
757
758 return -EINVAL;
759}
760EXPORT_SYMBOL(smd_remote_ss_to_edge);
761
Eric Holmberg17992c12012-02-29 12:54:44 -0700762/*
763 * Returns a pointer to the subsystem name or NULL if no
764 * subsystem name is available.
765 *
766 * @type - Edge definition
767 */
768const char *smd_edge_to_subsystem(uint32_t type)
769{
770 const char *subsys = NULL;
771
772 if (type < ARRAY_SIZE(edge_to_pids)) {
773 subsys = edge_to_pids[type].subsys_name;
774 if (subsys[0] == 0x0)
775 subsys = NULL;
776 }
777 return subsys;
778}
779EXPORT_SYMBOL(smd_edge_to_subsystem);
780
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700781/*
782 * Returns a pointer to the subsystem name given the
783 * remote processor ID.
Arun Kumar Neelakantama35c7a72012-10-18 15:45:15 +0530784 * subsystem is not necessarily PIL-loadable
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700785 *
786 * @pid Remote processor ID
787 * @returns Pointer to subsystem name or NULL if not found
788 */
789const char *smd_pid_to_subsystem(uint32_t pid)
790{
791 const char *subsys = NULL;
792 int i;
793
794 for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
Arun Kumar Neelakantama35c7a72012-10-18 15:45:15 +0530795 if (pid == edge_to_pids[i].remote_pid) {
796 if (edge_to_pids[i].subsys_name[0] != 0x0) {
797 subsys = edge_to_pids[i].subsys_name;
798 break;
799 } else if (pid == SMD_RPM) {
800 subsys = "rpm";
801 break;
802 }
Eric Holmberg7ad623a2012-03-01 14:41:10 -0700803 }
804 }
805
806 return subsys;
807}
808EXPORT_SYMBOL(smd_pid_to_subsystem);
Eric Holmberg17992c12012-02-29 12:54:44 -0700809
Jeff Hugo00be6282012-09-07 11:24:32 -0600810static void smd_reset_edge(void *void_ch, unsigned new_state,
811 int is_word_access_ch)
Eric Holmberg2a563c32011-10-05 14:51:43 -0600812{
Jeff Hugo00be6282012-09-07 11:24:32 -0600813 if (is_word_access_ch) {
814 struct smd_half_channel_word_access *ch =
815 (struct smd_half_channel_word_access *)(void_ch);
816 if (ch->state != SMD_SS_CLOSED) {
817 ch->state = new_state;
818 ch->fDSR = 0;
819 ch->fCTS = 0;
820 ch->fCD = 0;
821 ch->fSTATE = 1;
822 }
823 } else {
824 struct smd_half_channel *ch =
825 (struct smd_half_channel *)(void_ch);
826 if (ch->state != SMD_SS_CLOSED) {
827 ch->state = new_state;
828 ch->fDSR = 0;
829 ch->fCTS = 0;
830 ch->fCD = 0;
831 ch->fSTATE = 1;
832 }
Eric Holmberg2a563c32011-10-05 14:51:43 -0600833 }
834}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700835
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600836/**
837 * smd_channel_reset_state() - find channels in an allocation table and set them
838 * to the specified state
839 *
840 * @shared: Pointer to the allocation table to scan
841 * @table_id: ID of the table
842 * @new_state: New state that channels should be set to
843 * @pid: Processor ID of the remote processor for the channels
844 * @num_entries: Number of entries in the table
845 *
846 * Scan the indicated table for channels between Apps and @pid. If a valid
847 * channel is found, set the remote side of the channel to @new_state.
848 */
Jeff Hugo6f075a7642013-08-29 16:27:57 -0600849static void smd_channel_reset_state(struct smd_alloc_elm *shared, int table_id,
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600850 unsigned new_state, unsigned pid, unsigned num_entries)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851{
852 unsigned n;
Jeff Hugo00be6282012-09-07 11:24:32 -0600853 void *shared2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854 uint32_t type;
Jeff Hugo00be6282012-09-07 11:24:32 -0600855 void *remote_ch;
856 int is_word_access;
Jeff Hugo6f075a7642013-08-29 16:27:57 -0600857 unsigned base_id;
858
859 switch (table_id) {
860 case PRI_ALLOC_TBL:
861 base_id = SMEM_SMD_BASE_ID;
862 break;
863 case SEC_ALLOC_TBL:
864 base_id = SMEM_SMD_BASE_ID_2;
865 break;
866 default:
867 SMD_INFO("%s: invalid table_id:%d\n", __func__, table_id);
868 return;
869 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700870
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600871 for (n = 0; n < num_entries; n++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700872 if (!shared[n].ref_count)
873 continue;
874 if (!shared[n].name[0])
875 continue;
876
877 type = SMD_CHANNEL_TYPE(shared[n].type);
Jeff Hugo00be6282012-09-07 11:24:32 -0600878 is_word_access = is_word_access_ch(type);
879 if (is_word_access)
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600880 shared2 = smem_find_to_proc(base_id + n,
881 sizeof(struct smd_shared_v2_word_access), pid,
882 0);
Jeff Hugo00be6282012-09-07 11:24:32 -0600883 else
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600884 shared2 = smem_find_to_proc(base_id + n,
885 sizeof(struct smd_shared_v2), pid, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700886 if (!shared2)
887 continue;
888
Brent Hronikada96e72013-04-16 11:51:54 -0600889 if (!get_remote_ch(shared2, type, pid,
890 &remote_ch, is_word_access))
891 smd_reset_edge(remote_ch, new_state, is_word_access);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892 }
893}
894
Brent Hronikada96e72013-04-16 11:51:54 -0600895/**
896 * pid_is_on_edge() - checks to see if the processor with id pid is on the
897 * edge specified by edge_num
898 *
899 * @edge_num: the number of the edge which is being tested
900 * @pid: the id of the processor being tested
901 *
902 * @returns: true if on edge, false otherwise
903 */
904static bool pid_is_on_edge(uint32_t edge_num, unsigned pid)
905{
906 struct edge_to_pid edge;
907
908 if (edge_num >= ARRAY_SIZE(edge_to_pids))
909 return 0;
910
911 edge = edge_to_pids[edge_num];
912 return (edge.local_pid == pid || edge.remote_pid == pid);
913}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914
915void smd_channel_reset(uint32_t restart_pid)
916{
Jeff Hugo6f075a7642013-08-29 16:27:57 -0600917 struct smd_alloc_elm *shared_pri;
918 struct smd_alloc_elm *shared_sec;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700919 unsigned long flags;
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600920 unsigned pri_size;
921 unsigned sec_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700922
Jeff Hugo903d58a2013-08-29 14:57:00 -0600923 SMD_POWER_INFO("%s: starting reset\n", __func__);
Eric Holmberg50ad86f2012-09-07 13:54:31 -0600924
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600925 shared_pri = smem_get_entry_to_proc(ID_CH_ALLOC_TBL, &pri_size,
926 restart_pid, 0);
Jeff Hugo6f075a7642013-08-29 16:27:57 -0600927 if (!shared_pri) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928 pr_err("%s: allocation table not initialized\n", __func__);
929 return;
930 }
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600931 shared_sec = smem_get_entry_to_proc(SMEM_CHANNEL_ALLOC_TBL_2, &sec_size,
932 restart_pid, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700934 /* reset SMSM entry */
935 if (smsm_info.state) {
936 writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
937
Eric Holmberg351a63c2011-12-02 17:49:43 -0700938 /* restart SMSM init handshake */
939 if (restart_pid == SMSM_MODEM) {
940 smsm_change_state(SMSM_APPS_STATE,
Eric Holmberg6b2f80e2012-01-09 12:22:52 -0700941 SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
942 0);
Eric Holmberg351a63c2011-12-02 17:49:43 -0700943 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944
945 /* notify SMSM processors */
946 smsm_irq_handler(0, 0);
Eric Holmbergb7726442012-03-01 15:31:56 -0700947 notify_modem_smsm();
948 notify_dsp_smsm();
949 notify_dsps_smsm();
950 notify_wcnss_smsm();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700951 }
952
953 /* change all remote states to CLOSING */
954 mutex_lock(&smd_probe_lock);
955 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugo6f075a7642013-08-29 16:27:57 -0600956 smd_channel_reset_state(shared_pri, PRI_ALLOC_TBL, SMD_SS_CLOSING,
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600957 restart_pid, pri_size / sizeof(*shared_pri));
Jeff Hugo6f075a7642013-08-29 16:27:57 -0600958 if (shared_sec)
959 smd_channel_reset_state(shared_sec, SEC_ALLOC_TBL,
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600960 SMD_SS_CLOSING, restart_pid,
961 sec_size / sizeof(*shared_sec));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700962 spin_unlock_irqrestore(&smd_lock, flags);
963 mutex_unlock(&smd_probe_lock);
964
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700965 mb();
966 smd_fake_irq_handler(0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967
968 /* change all remote states to CLOSED */
969 mutex_lock(&smd_probe_lock);
970 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugo6f075a7642013-08-29 16:27:57 -0600971 smd_channel_reset_state(shared_pri, PRI_ALLOC_TBL, SMD_SS_CLOSED,
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600972 restart_pid, pri_size / sizeof(*shared_pri));
Jeff Hugo6f075a7642013-08-29 16:27:57 -0600973 if (shared_sec)
974 smd_channel_reset_state(shared_sec, SEC_ALLOC_TBL,
Jeff Hugof4df2ff2013-08-28 17:45:50 -0600975 SMD_SS_CLOSED, restart_pid,
976 sec_size / sizeof(*shared_sec));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700977 spin_unlock_irqrestore(&smd_lock, flags);
978 mutex_unlock(&smd_probe_lock);
979
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700980 mb();
981 smd_fake_irq_handler(0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700982
Jeff Hugo903d58a2013-08-29 14:57:00 -0600983 SMD_POWER_INFO("%s: finished reset\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984}
985
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700986/* how many bytes are available for reading */
987static int smd_stream_read_avail(struct smd_channel *ch)
988{
Jeff Hugo918b2dc2012-03-21 13:42:09 -0600989 return (ch->half_ch->get_head(ch->recv) -
990 ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -0700991}
992
993/* how many bytes we are free to write */
994static int smd_stream_write_avail(struct smd_channel *ch)
995{
Eric Holmberg424d9552013-04-05 15:23:25 -0600996 int bytes_avail;
997
998 bytes_avail = ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
999 ch->half_ch->get_tail(ch->send)) & ch->fifo_mask) + 1;
1000
1001 if (bytes_avail < SMD_FIFO_FULL_RESERVE)
1002 bytes_avail = 0;
1003 else
1004 bytes_avail -= SMD_FIFO_FULL_RESERVE;
1005 return bytes_avail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001006}
1007
1008static int smd_packet_read_avail(struct smd_channel *ch)
1009{
1010 if (ch->current_packet) {
1011 int n = smd_stream_read_avail(ch);
1012 if (n > ch->current_packet)
1013 n = ch->current_packet;
1014 return n;
1015 } else {
1016 return 0;
1017 }
1018}
1019
1020static int smd_packet_write_avail(struct smd_channel *ch)
1021{
1022 int n = smd_stream_write_avail(ch);
1023 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
1024}
1025
1026static int ch_is_open(struct smd_channel *ch)
1027{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001028 return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
1029 ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
1030 && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001031}
1032
1033/* provide a pointer and length to readable data in the fifo */
1034static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
1035{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001036 unsigned head = ch->half_ch->get_head(ch->recv);
1037 unsigned tail = ch->half_ch->get_tail(ch->recv);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001038 *ptr = (void *) (ch->recv_data + tail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001039
1040 if (tail <= head)
1041 return head - tail;
1042 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001043 return ch->fifo_size - tail;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001044}
1045
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001046static int read_intr_blocked(struct smd_channel *ch)
1047{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001048 return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001049}
1050
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001051/* advance the fifo read pointer after data from ch_read_buffer is consumed */
1052static void ch_read_done(struct smd_channel *ch, unsigned count)
1053{
1054 BUG_ON(count > smd_stream_read_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001055 ch->half_ch->set_tail(ch->recv,
1056 (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001057 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001058 ch->half_ch->set_fTAIL(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001059}
1060
1061/* basic read interface to ch_read_{buffer,done} used
Brian Swetland03e00cd2009-07-01 17:58:37 -07001062 * by smd_*_read() and update_packet_state()
1063 * will read-and-discard if the _data pointer is null
1064 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001065static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001066{
1067 void *ptr;
1068 unsigned n;
1069 unsigned char *data = _data;
1070 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001071 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001072
1073 while (len > 0) {
1074 n = ch_read_buffer(ch, &ptr);
1075 if (n == 0)
1076 break;
1077
1078 if (n > len)
1079 n = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001080 if (_data) {
1081 if (user_buf) {
1082 r = copy_to_user(data, ptr, n);
1083 if (r > 0) {
1084 pr_err("%s: "
1085 "copy_to_user could not copy "
1086 "%i bytes.\n",
1087 __func__,
1088 r);
1089 }
1090 } else
1091 memcpy(data, ptr, n);
1092 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001093
1094 data += n;
1095 len -= n;
1096 ch_read_done(ch, n);
1097 }
1098
1099 return orig_len - len;
1100}
1101
1102static void update_stream_state(struct smd_channel *ch)
1103{
1104 /* streams have no special state requiring updating */
1105}
1106
1107static void update_packet_state(struct smd_channel *ch)
1108{
1109 unsigned hdr[5];
1110 int r;
1111
1112 /* can't do anything if we're in the middle of a packet */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001113 while (ch->current_packet == 0) {
1114 /* discard 0 length packets if any */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001115
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001116 /* don't bother unless we can get the full header */
1117 if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
1118 return;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001119
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001120 r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
1121 BUG_ON(r != SMD_HEADER_SIZE);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001122
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001123 ch->current_packet = hdr[0];
1124 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001125}
1126
Eric Holmberg424d9552013-04-05 15:23:25 -06001127/**
1128 * ch_write_buffer() - Provide a pointer and length for the next segment of
1129 * free space in the FIFO.
1130 * @ch: channel
1131 * @ptr: Address to pointer for the next segment write
1132 * @returns: Maximum size that can be written until the FIFO is either full
1133 * or the end of the FIFO has been reached.
1134 *
1135 * The returned pointer and length are passed to memcpy, so the next segment is
1136 * defined as either the space available between the read index (tail) and the
1137 * write index (head) or the space available to the end of the FIFO.
1138 */
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001139static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
1140{
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001141 unsigned head = ch->half_ch->get_head(ch->send);
1142 unsigned tail = ch->half_ch->get_tail(ch->send);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001143 *ptr = (void *) (ch->send_data + head);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001144
1145 if (head < tail) {
Eric Holmberg424d9552013-04-05 15:23:25 -06001146 return tail - head - SMD_FIFO_FULL_RESERVE;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001147 } else {
Eric Holmberg424d9552013-04-05 15:23:25 -06001148 if (tail < SMD_FIFO_FULL_RESERVE)
1149 return ch->fifo_size + tail - head
1150 - SMD_FIFO_FULL_RESERVE;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001151 else
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001152 return ch->fifo_size - head;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001153 }
1154}
1155
1156/* advace the fifo write pointer after freespace
1157 * from ch_write_buffer is filled
1158 */
1159static void ch_write_done(struct smd_channel *ch, unsigned count)
1160{
1161 BUG_ON(count > smd_stream_write_avail(ch));
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001162 ch->half_ch->set_head(ch->send,
1163 (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 wmb();
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001165 ch->half_ch->set_fHEAD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001166}
1167
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001168static void ch_set_state(struct smd_channel *ch, unsigned n)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001169{
1170 if (n == SMD_SS_OPENED) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001171 ch->half_ch->set_fDSR(ch->send, 1);
1172 ch->half_ch->set_fCTS(ch->send, 1);
1173 ch->half_ch->set_fCD(ch->send, 1);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001174 } else {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001175 ch->half_ch->set_fDSR(ch->send, 0);
1176 ch->half_ch->set_fCTS(ch->send, 0);
1177 ch->half_ch->set_fCD(ch->send, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001178 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001179 ch->half_ch->set_state(ch->send, n);
1180 ch->half_ch->set_fSTATE(ch->send, 1);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301181 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001182}
1183
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001184/**
1185 * do_smd_probe() - Look for newly created SMD channels a specific processor
1186 *
1187 * @remote_pid: remote processor id of the proc that may have created channels
1188 */
1189static void do_smd_probe(unsigned remote_pid)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001190{
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001191 unsigned free_space;
1192
1193 free_space = smem_get_free_space(remote_pid);
1194 if (free_space != remote_info[remote_pid].free_space) {
1195 remote_info[remote_pid].free_space = free_space;
1196 schedule_work(&remote_info[remote_pid].probe_work);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001197 }
1198}
1199
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001200/**
1201 * do_smd_probe() - Look for newly created SMD channels from any remote proc
1202 */
1203static void do_smd_probe_all(void)
1204{
1205 int i;
1206
1207 for (i = 1; i < NUM_SMD_SUBSYSTEMS; ++i)
1208 do_smd_probe(i);
1209}
1210
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001211static void smd_state_change(struct smd_channel *ch,
1212 unsigned last, unsigned next)
1213{
1214 ch->last_state = next;
1215
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001216 SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001217
1218 switch (next) {
1219 case SMD_SS_OPENING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001220 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
1221 ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
1222 ch->half_ch->set_tail(ch->recv, 0);
1223 ch->half_ch->set_head(ch->send, 0);
1224 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001225 ch_set_state(ch, SMD_SS_OPENING);
1226 }
1227 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001228 case SMD_SS_OPENED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001229 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001230 ch_set_state(ch, SMD_SS_OPENED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001231 ch->notify(ch->priv, SMD_EVENT_OPEN);
1232 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001233 break;
1234 case SMD_SS_FLUSHING:
1235 case SMD_SS_RESET:
1236 /* we should force them to close? */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237 break;
1238 case SMD_SS_CLOSED:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001239 if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001240 ch_set_state(ch, SMD_SS_CLOSING);
1241 ch->current_packet = 0;
Eric Holmbergad4fa8d2011-11-11 16:55:13 -07001242 ch->pending_pkt_sz = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001243 ch->notify(ch->priv, SMD_EVENT_CLOSE);
1244 }
1245 break;
1246 case SMD_SS_CLOSING:
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001247 if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248 list_move(&ch->ch_list,
1249 &smd_ch_to_close_list);
1250 queue_work(channel_close_wq,
1251 &finalize_channel_close_work);
1252 }
1253 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001254 }
1255}
1256
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257static void handle_smd_irq_closing_list(void)
1258{
1259 unsigned long flags;
1260 struct smd_channel *ch;
1261 struct smd_channel *index;
1262 unsigned tmp;
1263
1264 spin_lock_irqsave(&smd_lock, flags);
1265 list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001266 if (ch->half_ch->get_fSTATE(ch->recv))
1267 ch->half_ch->set_fSTATE(ch->recv, 0);
1268 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001269 if (tmp != ch->last_state)
1270 smd_state_change(ch, ch->last_state, tmp);
1271 }
1272 spin_unlock_irqrestore(&smd_lock, flags);
1273}
1274
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001275static void handle_smd_irq(struct remote_proc_info *r_info,
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301276 void (*notify)(smd_channel_t *ch))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001277{
1278 unsigned long flags;
1279 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001280 unsigned ch_flags;
1281 unsigned tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001282 unsigned char state_change;
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001283 struct list_head *list;
1284
1285 list = &r_info->ch_list;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001286
1287 spin_lock_irqsave(&smd_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001288 list_for_each_entry(ch, list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001289 state_change = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001290 ch_flags = 0;
1291 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001292 if (ch->half_ch->get_fHEAD(ch->recv)) {
1293 ch->half_ch->set_fHEAD(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001294 ch_flags |= 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001295 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001296 if (ch->half_ch->get_fTAIL(ch->recv)) {
1297 ch->half_ch->set_fTAIL(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001298 ch_flags |= 2;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001299 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001300 if (ch->half_ch->get_fSTATE(ch->recv)) {
1301 ch->half_ch->set_fSTATE(ch->recv, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001302 ch_flags |= 4;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001303 }
1304 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001305 tmp = ch->half_ch->get_state(ch->recv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001306 if (tmp != ch->last_state) {
Jeff Hugo903d58a2013-08-29 14:57:00 -06001307 SMD_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
Eric Holmberg98c6c642012-02-24 11:29:35 -07001308 ch->n, ch->name, ch->last_state, tmp);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001309 smd_state_change(ch, ch->last_state, tmp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310 state_change = 1;
1311 }
Eric Holmberg65a7d432012-02-24 11:28:56 -07001312 if (ch_flags & 0x3) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001313 ch->update_state(ch);
Jeff Hugo903d58a2013-08-29 14:57:00 -06001314 SMD_POWER_INFO(
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301315 "SMD ch%d '%s' Data event 0x%x tx%d/rx%d %dr/%dw : %dr/%dw\n",
1316 ch->n, ch->name,
1317 ch_flags,
1318 ch->fifo_size -
1319 (smd_stream_write_avail(ch) + 1),
1320 smd_stream_read_avail(ch),
1321 ch->half_ch->get_tail(ch->send),
1322 ch->half_ch->get_head(ch->send),
1323 ch->half_ch->get_tail(ch->recv),
1324 ch->half_ch->get_head(ch->recv)
1325 );
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001326 ch->notify(ch->priv, SMD_EVENT_DATA);
1327 }
Eric Holmberg98c6c642012-02-24 11:29:35 -07001328 if (ch_flags & 0x4 && !state_change) {
Jeff Hugo903d58a2013-08-29 14:57:00 -06001329 SMD_POWER_INFO("SMD ch%d '%s' State update\n",
Eric Holmberg98c6c642012-02-24 11:29:35 -07001330 ch->n, ch->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001331 ch->notify(ch->priv, SMD_EVENT_STATUS);
Eric Holmberg98c6c642012-02-24 11:29:35 -07001332 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001333 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001334 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001335 do_smd_probe(r_info->remote_pid);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001336}
1337
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301338static inline void log_irq(uint32_t subsystem)
1339{
1340 const char *subsys = smd_edge_to_subsystem(subsystem);
1341
Jay Chokshi83b4f6132013-02-14 16:20:56 -08001342 (void) subsys;
1343
Jeff Hugo903d58a2013-08-29 14:57:00 -06001344 SMD_POWER_INFO("SMD Int %s->Apps\n", subsys);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301345}
1346
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05301347irqreturn_t smd_modem_irq_handler(int irq, void *data)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001348{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301349 log_irq(SMD_APPS_MODEM);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001350 ++interrupt_stats[SMD_MODEM].smd_in_count;
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001351 handle_smd_irq(&remote_info[SMD_MODEM], notify_modem_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001352 handle_smd_irq_closing_list();
Brian Swetland37521a32009-07-01 18:30:47 -07001353 return IRQ_HANDLED;
1354}
1355
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05301356irqreturn_t smd_dsp_irq_handler(int irq, void *data)
Brian Swetland37521a32009-07-01 18:30:47 -07001357{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301358 log_irq(SMD_APPS_QDSP);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001359 ++interrupt_stats[SMD_Q6].smd_in_count;
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001360 handle_smd_irq(&remote_info[SMD_Q6], notify_dsp_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001361 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001362 return IRQ_HANDLED;
1363}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001364
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05301365irqreturn_t smd_dsps_irq_handler(int irq, void *data)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001366{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301367 log_irq(SMD_APPS_DSPS);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001368 ++interrupt_stats[SMD_DSPS].smd_in_count;
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001369 handle_smd_irq(&remote_info[SMD_DSPS], notify_dsps_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001370 handle_smd_irq_closing_list();
1371 return IRQ_HANDLED;
1372}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001373
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05301374irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001375{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301376 log_irq(SMD_APPS_WCNSS);
Eric Holmberg7ad623a2012-03-01 14:41:10 -07001377 ++interrupt_stats[SMD_WCNSS].smd_in_count;
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001378 handle_smd_irq(&remote_info[SMD_WCNSS], notify_wcnss_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001379 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001380 return IRQ_HANDLED;
1381}
1382
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05301383irqreturn_t smd_rpm_irq_handler(int irq, void *data)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001384{
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301385 log_irq(SMD_APPS_RPM);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001386 ++interrupt_stats[SMD_RPM].smd_in_count;
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001387 handle_smd_irq(&remote_info[SMD_RPM], notify_rpm_smd);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001388 handle_smd_irq_closing_list();
1389 return IRQ_HANDLED;
1390}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001391
1392static void smd_fake_irq_handler(unsigned long arg)
1393{
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001394 handle_smd_irq(&remote_info[SMD_MODEM], notify_modem_smd);
1395 handle_smd_irq(&remote_info[SMD_Q6], notify_dsp_smd);
1396 handle_smd_irq(&remote_info[SMD_DSPS], notify_dsps_smd);
1397 handle_smd_irq(&remote_info[SMD_WCNSS], notify_wcnss_smd);
1398 handle_smd_irq(&remote_info[SMD_RPM], notify_rpm_smd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001399 handle_smd_irq_closing_list();
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001400}
1401
1402static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
1403
Brian Swetland37521a32009-07-01 18:30:47 -07001404static inline int smd_need_int(struct smd_channel *ch)
1405{
1406 if (ch_is_open(ch)) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001407 if (ch->half_ch->get_fHEAD(ch->recv) ||
1408 ch->half_ch->get_fTAIL(ch->recv) ||
1409 ch->half_ch->get_fSTATE(ch->recv))
Brian Swetland37521a32009-07-01 18:30:47 -07001410 return 1;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001411 if (ch->half_ch->get_state(ch->recv) != ch->last_state)
Brian Swetland37521a32009-07-01 18:30:47 -07001412 return 1;
1413 }
1414 return 0;
1415}
1416
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001417void smd_sleep_exit(void)
1418{
1419 unsigned long flags;
1420 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001421 int need_int = 0;
1422
1423 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001424 list_for_each_entry(ch, &remote_info[SMD_MODEM].ch_list, ch_list) {
Brian Swetland37521a32009-07-01 18:30:47 -07001425 if (smd_need_int(ch)) {
1426 need_int = 1;
1427 break;
1428 }
1429 }
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001430 list_for_each_entry(ch, &remote_info[SMD_Q6].ch_list, ch_list) {
Brian Swetland37521a32009-07-01 18:30:47 -07001431 if (smd_need_int(ch)) {
1432 need_int = 1;
1433 break;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001434 }
1435 }
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001436 list_for_each_entry(ch, &remote_info[SMD_DSPS].ch_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001437 if (smd_need_int(ch)) {
1438 need_int = 1;
1439 break;
1440 }
1441 }
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001442 list_for_each_entry(ch, &remote_info[SMD_WCNSS].ch_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001443 if (smd_need_int(ch)) {
1444 need_int = 1;
1445 break;
1446 }
1447 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001448 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001449 do_smd_probe_all();
Brian Swetland37521a32009-07-01 18:30:47 -07001450
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001451 if (need_int) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001452 SMD_DBG("smd_sleep_exit need interrupt\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001453 tasklet_schedule(&smd_fake_irq_tasklet);
1454 }
1455}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001456EXPORT_SYMBOL(smd_sleep_exit);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001457
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001458static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001459{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001460 if (SMD_XFER_TYPE(alloc_elm->type) == 1)
1461 return 0;
1462 else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001463 return 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001464
1465 /* for cases where xfer type is 0 */
1466 if (!strncmp(alloc_elm->name, "DAL", 3))
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001467 return 0;
1468
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001469 /* for cases where xfer type is 0 */
1470 if (!strncmp(alloc_elm->name, "RPCCALL_QDSP", 12))
1471 return 0;
1472
1473 if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001474 return 1;
1475 else
1476 return 0;
1477}
1478
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001479static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
1480 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001481{
1482 void *ptr;
1483 const unsigned char *buf = _data;
1484 unsigned xfer;
1485 int orig_len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001486 int r = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001487
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001488 SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001489 if (len < 0)
1490 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001491 else if (len == 0)
1492 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001493
1494 while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
Eric Holmberg7a717872012-02-03 11:58:04 -07001495 if (!ch_is_open(ch)) {
1496 len = orig_len;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001497 break;
Eric Holmberg7a717872012-02-03 11:58:04 -07001498 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001499 if (xfer > len)
1500 xfer = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001501 if (user_buf) {
1502 r = copy_from_user(ptr, buf, xfer);
1503 if (r > 0) {
1504 pr_err("%s: "
1505 "copy_from_user could not copy %i "
1506 "bytes.\n",
1507 __func__,
1508 r);
1509 }
1510 } else
1511 memcpy(ptr, buf, xfer);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001512 ch_write_done(ch, xfer);
1513 len -= xfer;
1514 buf += xfer;
1515 if (len == 0)
1516 break;
1517 }
1518
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001519 if (orig_len - len)
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301520 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001521
1522 return orig_len - len;
1523}
1524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001525static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
1526 int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001527{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001528 int ret;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001529 unsigned hdr[5];
1530
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001531 SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001532 if (len < 0)
1533 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001534 else if (len == 0)
1535 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001536
1537 if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
1538 return -ENOMEM;
1539
1540 hdr[0] = len;
1541 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
1542
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001543
1544 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
1545 if (ret < 0 || ret != sizeof(hdr)) {
1546 SMD_DBG("%s failed to write pkt header: "
1547 "%d returned\n", __func__, ret);
1548 return -1;
1549 }
1550
1551
1552 ret = smd_stream_write(ch, _data, len, user_buf);
1553 if (ret < 0 || ret != len) {
1554 SMD_DBG("%s failed to write pkt data: "
1555 "%d returned\n", __func__, ret);
1556 return ret;
1557 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001558
1559 return len;
1560}
1561
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001562static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001563{
1564 int r;
1565
1566 if (len < 0)
1567 return -EINVAL;
1568
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001569 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001570 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001571 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301572 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001573
1574 return r;
1575}
1576
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001577static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001578{
1579 unsigned long flags;
1580 int r;
1581
1582 if (len < 0)
1583 return -EINVAL;
1584
1585 if (len > ch->current_packet)
1586 len = ch->current_packet;
1587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588 r = ch_read(ch, data, len, user_buf);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001589 if (r > 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001590 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301591 ch->notify_other_cpu(ch);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001592
1593 spin_lock_irqsave(&smd_lock, flags);
1594 ch->current_packet -= r;
1595 update_packet_state(ch);
1596 spin_unlock_irqrestore(&smd_lock, flags);
1597
1598 return r;
1599}
1600
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001601static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
1602 int user_buf)
1603{
1604 int r;
1605
1606 if (len < 0)
1607 return -EINVAL;
1608
1609 if (len > ch->current_packet)
1610 len = ch->current_packet;
1611
1612 r = ch_read(ch, data, len, user_buf);
1613 if (r > 0)
1614 if (!read_intr_blocked(ch))
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301615 ch->notify_other_cpu(ch);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001616
1617 ch->current_packet -= r;
1618 update_packet_state(ch);
1619
1620 return r;
1621}
1622
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301623#if (defined(CONFIG_MSM_SMD_PKG4) || defined(CONFIG_MSM_SMD_PKG3))
Jeff Hugoa2c5e752013-08-14 10:33:06 -06001624/**
1625 * smd_alloc_v2() - Init local channel structure with information stored in SMEM
1626 *
1627 * @ch: pointer to the local structure for this channel
1628 * @table_id: the id of the table this channel resides in. 1 = first table, 2 =
1629 * second table, etc
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001630 * @r_info: pointer to the info structure of the remote proc for this channel
Jeff Hugoa2c5e752013-08-14 10:33:06 -06001631 * @returns: -EINVAL for failure; 0 for success
1632 *
1633 * ch must point to an allocated instance of struct smd_channel that is zeroed
1634 * out, and has the n and type members already initialized to the correct values
1635 */
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001636static int smd_alloc_v2(struct smd_channel *ch, int table_id,
1637 struct remote_proc_info *r_info)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001638{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001639 void *buffer;
1640 unsigned buffer_sz;
Jeff Hugoa2c5e752013-08-14 10:33:06 -06001641 unsigned base_id;
1642 unsigned fifo_id;
1643
1644 switch (table_id) {
1645 case PRI_ALLOC_TBL:
1646 base_id = SMEM_SMD_BASE_ID;
1647 fifo_id = SMEM_SMD_FIFO_BASE_ID;
1648 break;
1649 case SEC_ALLOC_TBL:
1650 base_id = SMEM_SMD_BASE_ID_2;
1651 fifo_id = SMEM_SMD_FIFO_BASE_ID_2;
1652 break;
1653 default:
1654 SMD_INFO("Invalid table_id:%d passed to smd_alloc\n", table_id);
1655 return -EINVAL;
1656 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001658 if (is_word_access_ch(ch->type)) {
1659 struct smd_shared_v2_word_access *shared2;
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001660 shared2 = smem_alloc_to_proc(base_id + ch->n, sizeof(*shared2),
1661 r_info->remote_pid, 0);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001662 if (!shared2) {
1663 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1664 return -EINVAL;
1665 }
1666 ch->send = &shared2->ch0;
1667 ch->recv = &shared2->ch1;
1668 } else {
1669 struct smd_shared_v2 *shared2;
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001670 shared2 = smem_alloc_to_proc(base_id + ch->n, sizeof(*shared2),
1671 r_info->remote_pid, 0);
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001672 if (!shared2) {
1673 SMD_INFO("smem_alloc failed ch=%d\n", ch->n);
1674 return -EINVAL;
1675 }
1676 ch->send = &shared2->ch0;
1677 ch->recv = &shared2->ch1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 }
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001679 ch->half_ch = get_half_ch_funcs(ch->type);
1680
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001681 buffer = smem_get_entry_to_proc(fifo_id + ch->n, &buffer_sz,
1682 r_info->remote_pid, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683 if (!buffer) {
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301684 SMD_INFO("smem_get_entry failed\n");
1685 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001686 }
1687
1688 /* buffer must be a power-of-two size */
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301689 if (buffer_sz & (buffer_sz - 1)) {
1690 SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
1691 return -EINVAL;
1692 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001693 buffer_sz /= 2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001694 ch->send_data = buffer;
1695 ch->recv_data = buffer + buffer_sz;
1696 ch->fifo_size = buffer_sz;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001697
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001698 return 0;
1699}
1700
1701static int smd_alloc_v1(struct smd_channel *ch)
1702{
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301703 return -EINVAL;
1704}
1705
1706#else /* define v1 for older targets */
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001707static int smd_alloc_v2(struct smd_channel *ch, int table_id,
1708 struct remote_proc_info *r_info)
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301709{
1710 return -EINVAL;
1711}
1712
1713static int smd_alloc_v1(struct smd_channel *ch)
1714{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001715 struct smd_shared_v1 *shared1;
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001716 shared1 = smem_alloc_to_proc(ID_SMD_CHANNELS + ch->n, sizeof(*shared1),
1717 0, SMEM_ANY_HOST_FLAG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001718 if (!shared1) {
1719 pr_err("smd_alloc_channel() cid %d does not exist\n", ch->n);
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301720 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001721 }
1722 ch->send = &shared1->ch0;
1723 ch->recv = &shared1->ch1;
1724 ch->send_data = shared1->data0;
1725 ch->recv_data = shared1->data1;
1726 ch->fifo_size = SMD_BUF_SIZE;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001727 ch->half_ch = get_half_ch_funcs(ch->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001728 return 0;
1729}
1730
Angshuman Sarkarac7d6252011-09-30 18:20:59 +05301731#endif
1732
Jeff Hugoa2c5e752013-08-14 10:33:06 -06001733/**
1734 * smd_alloc_channel() - Create and init local structures for a newly allocated
1735 * SMD channel
1736 *
1737 * @alloc_elm: the allocation element stored in SMEM for this channel
1738 * @table_id: the id of the table this channel resides in. 1 = first table, 2 =
1739 * seconds table, etc
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001740 * @r_info: pointer to the info structure of the remote proc for this channel
Jeff Hugoa2c5e752013-08-14 10:33:06 -06001741 * @returns: -1 for failure; 0 for success
1742 */
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001743static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm, int table_id,
1744 struct remote_proc_info *r_info)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001745{
1746 struct smd_channel *ch;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001747
1748 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1749 if (ch == 0) {
1750 pr_err("smd_alloc_channel() out of memory\n");
Brian Swetland34f719b2009-10-30 16:22:05 -07001751 return -1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001752 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001753 ch->n = alloc_elm->cid;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001754 ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001755
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001756 if (smd_alloc_v2(ch, table_id, r_info) && smd_alloc_v1(ch)) {
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001757 kfree(ch);
Brian Swetland34f719b2009-10-30 16:22:05 -07001758 return -1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001759 }
1760
1761 ch->fifo_mask = ch->fifo_size - 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001762
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001763 /* probe_worker guarentees ch->type will be a valid type */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001764 if (ch->type == SMD_APPS_MODEM)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001765 ch->notify_other_cpu = notify_modem_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001766 else if (ch->type == SMD_APPS_QDSP)
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001767 ch->notify_other_cpu = notify_dsp_smd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001768 else if (ch->type == SMD_APPS_DSPS)
1769 ch->notify_other_cpu = notify_dsps_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001770 else if (ch->type == SMD_APPS_WCNSS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001771 ch->notify_other_cpu = notify_wcnss_smd;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001772 else if (ch->type == SMD_APPS_RPM)
1773 ch->notify_other_cpu = notify_rpm_smd;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07001774
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001775 if (smd_is_packet(alloc_elm)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001776 ch->read = smd_packet_read;
1777 ch->write = smd_packet_write;
1778 ch->read_avail = smd_packet_read_avail;
1779 ch->write_avail = smd_packet_write_avail;
1780 ch->update_state = update_packet_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001781 ch->read_from_cb = smd_packet_read_from_cb;
1782 ch->is_pkt_ch = 1;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001783 } else {
1784 ch->read = smd_stream_read;
1785 ch->write = smd_stream_write;
1786 ch->read_avail = smd_stream_read_avail;
1787 ch->write_avail = smd_stream_write_avail;
1788 ch->update_state = update_stream_state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001789 ch->read_from_cb = smd_stream_read;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001790 }
1791
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001792 memcpy(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN);
1793 ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001794
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001795 ch->pdev.name = ch->name;
1796 ch->pdev.id = ch->type;
1797
1798 SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
1799 ch->name, ch->n);
1800
1801 mutex_lock(&smd_creation_mutex);
1802 list_add(&ch->ch_list, &smd_ch_closed_list);
1803 mutex_unlock(&smd_creation_mutex);
1804
1805 platform_device_register(&ch->pdev);
1806 if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
1807 /* create a platform driver to be used by smd_tty driver
1808 * so that it can access the loopback port
1809 */
1810 loopback_tty_pdev.id = ch->type;
1811 platform_device_register(&loopback_tty_pdev);
1812 }
1813 return 0;
1814}
1815
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05301816static inline void notify_loopback_smd(smd_channel_t *ch_notif)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001817{
1818 unsigned long flags;
1819 struct smd_channel *ch;
1820
1821 spin_lock_irqsave(&smd_lock, flags);
1822 list_for_each_entry(ch, &smd_ch_list_loopback, ch_list) {
1823 ch->notify(ch->priv, SMD_EVENT_DATA);
1824 }
1825 spin_unlock_irqrestore(&smd_lock, flags);
1826}
1827
1828static int smd_alloc_loopback_channel(void)
1829{
1830 static struct smd_half_channel smd_loopback_ctl;
1831 static char smd_loopback_data[SMD_BUF_SIZE];
1832 struct smd_channel *ch;
1833
1834 ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
1835 if (ch == 0) {
1836 pr_err("%s: out of memory\n", __func__);
1837 return -1;
1838 }
1839 ch->n = SMD_LOOPBACK_CID;
1840
1841 ch->send = &smd_loopback_ctl;
1842 ch->recv = &smd_loopback_ctl;
1843 ch->send_data = smd_loopback_data;
1844 ch->recv_data = smd_loopback_data;
1845 ch->fifo_size = SMD_BUF_SIZE;
1846
1847 ch->fifo_mask = ch->fifo_size - 1;
1848 ch->type = SMD_LOOPBACK_TYPE;
1849 ch->notify_other_cpu = notify_loopback_smd;
1850
1851 ch->read = smd_stream_read;
1852 ch->write = smd_stream_write;
1853 ch->read_avail = smd_stream_read_avail;
1854 ch->write_avail = smd_stream_write_avail;
1855 ch->update_state = update_stream_state;
1856 ch->read_from_cb = smd_stream_read;
1857
1858 memset(ch->name, 0, 20);
1859 memcpy(ch->name, "local_loopback", 14);
1860
1861 ch->pdev.name = ch->name;
1862 ch->pdev.id = ch->type;
1863
1864 SMD_INFO("%s: '%s' cid=%d\n", __func__, ch->name, ch->n);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001865
1866 mutex_lock(&smd_creation_mutex);
1867 list_add(&ch->ch_list, &smd_ch_closed_list);
1868 mutex_unlock(&smd_creation_mutex);
1869
1870 platform_device_register(&ch->pdev);
Brian Swetland34f719b2009-10-30 16:22:05 -07001871 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001872}
1873
1874static void do_nothing_notify(void *priv, unsigned flags)
1875{
1876}
1877
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001878static void finalize_channel_close_fn(struct work_struct *work)
1879{
1880 unsigned long flags;
1881 struct smd_channel *ch;
1882 struct smd_channel *index;
1883
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001884 mutex_lock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001885 spin_lock_irqsave(&smd_lock, flags);
1886 list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
1887 list_del(&ch->ch_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001888 list_add(&ch->ch_list, &smd_ch_closed_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001889 ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
1890 ch->notify = do_nothing_notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001891 }
1892 spin_unlock_irqrestore(&smd_lock, flags);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001893 mutex_unlock(&smd_creation_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001894}
1895
1896struct smd_channel *smd_get_channel(const char *name, uint32_t type)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001897{
1898 struct smd_channel *ch;
1899
1900 mutex_lock(&smd_creation_mutex);
1901 list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001902 if (!strcmp(name, ch->name) &&
1903 (type == ch->type)) {
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001904 list_del(&ch->ch_list);
1905 mutex_unlock(&smd_creation_mutex);
1906 return ch;
1907 }
1908 }
1909 mutex_unlock(&smd_creation_mutex);
1910
1911 return NULL;
1912}
1913
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001914int smd_named_open_on_edge(const char *name, uint32_t edge,
1915 smd_channel_t **_ch,
1916 void *priv, void (*notify)(void *, unsigned))
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001917{
1918 struct smd_channel *ch;
1919 unsigned long flags;
1920
Jeff Hugo7cc06b12013-06-17 16:13:18 -06001921 if (smd_initialized == 0 && !smd_edge_inited(edge)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001922 SMD_INFO("smd_open() before smd_init()\n");
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001923 return -ENODEV;
1924 }
1925
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001926 SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
1927
1928 ch = smd_get_channel(name, edge);
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001929 if (!ch) {
Eric Holmbergbb2b1fa2011-10-12 16:41:37 -06001930 /* check closing list for port */
1931 spin_lock_irqsave(&smd_lock, flags);
1932 list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
1933 if (!strncmp(name, ch->name, 20) &&
1934 (edge == ch->type)) {
1935 /* channel exists, but is being closed */
1936 spin_unlock_irqrestore(&smd_lock, flags);
1937 return -EAGAIN;
1938 }
1939 }
1940
1941 /* check closing workqueue list for port */
1942 list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
1943 if (!strncmp(name, ch->name, 20) &&
1944 (edge == ch->type)) {
1945 /* channel exists, but is being closed */
1946 spin_unlock_irqrestore(&smd_lock, flags);
1947 return -EAGAIN;
1948 }
1949 }
1950 spin_unlock_irqrestore(&smd_lock, flags);
1951
1952 /* one final check to handle closing->closed race condition */
1953 ch = smd_get_channel(name, edge);
1954 if (!ch)
1955 return -ENODEV;
1956 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001957
1958 if (notify == 0)
1959 notify = do_nothing_notify;
1960
1961 ch->notify = notify;
1962 ch->current_packet = 0;
1963 ch->last_state = SMD_SS_CLOSED;
1964 ch->priv = priv;
1965
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001966 if (edge == SMD_LOOPBACK_TYPE) {
1967 ch->last_state = SMD_SS_OPENED;
Jeff Hugo918b2dc2012-03-21 13:42:09 -06001968 ch->half_ch->set_state(ch->send, SMD_SS_OPENED);
1969 ch->half_ch->set_fDSR(ch->send, 1);
1970 ch->half_ch->set_fCTS(ch->send, 1);
1971 ch->half_ch->set_fCD(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001972 }
1973
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001974 *_ch = ch;
1975
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001976 SMD_DBG("smd_open: opening '%s'\n", ch->name);
1977
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001978 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001979 if (unlikely(ch->type == SMD_LOOPBACK_TYPE))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001980 list_add(&ch->ch_list, &smd_ch_list_loopback);
Jeff Hugof4df2ff2013-08-28 17:45:50 -06001981 else
1982 list_add(&ch->ch_list,
1983 &remote_info[edge_to_pids[ch->type].remote_pid].ch_list);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001984
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001985 SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
1986
1987 if (edge != SMD_LOOPBACK_TYPE)
1988 smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
1989
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001990 spin_unlock_irqrestore(&smd_lock, flags);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07001991
1992 return 0;
1993}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001994EXPORT_SYMBOL(smd_named_open_on_edge);
1995
1996
1997int smd_open(const char *name, smd_channel_t **_ch,
1998 void *priv, void (*notify)(void *, unsigned))
1999{
2000 return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
2001 notify);
2002}
2003EXPORT_SYMBOL(smd_open);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002004
2005int smd_close(smd_channel_t *ch)
2006{
2007 unsigned long flags;
2008
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002009 if (ch == 0)
2010 return -1;
2011
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002012 SMD_INFO("smd_close(%s)\n", ch->name);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002013
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002014 spin_lock_irqsave(&smd_lock, flags);
2015 list_del(&ch->ch_list);
2016 if (ch->n == SMD_LOOPBACK_CID) {
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002017 ch->half_ch->set_fDSR(ch->send, 0);
2018 ch->half_ch->set_fCTS(ch->send, 0);
2019 ch->half_ch->set_fCD(ch->send, 0);
2020 ch->half_ch->set_state(ch->send, SMD_SS_CLOSED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002021 } else
2022 ch_set_state(ch, SMD_SS_CLOSED);
2023
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002024 if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002025 list_add(&ch->ch_list, &smd_ch_closing_list);
2026 spin_unlock_irqrestore(&smd_lock, flags);
2027 } else {
2028 spin_unlock_irqrestore(&smd_lock, flags);
2029 ch->notify = do_nothing_notify;
2030 mutex_lock(&smd_creation_mutex);
2031 list_add(&ch->ch_list, &smd_ch_closed_list);
2032 mutex_unlock(&smd_creation_mutex);
2033 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002034
2035 return 0;
2036}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002037EXPORT_SYMBOL(smd_close);
2038
2039int smd_write_start(smd_channel_t *ch, int len)
2040{
2041 int ret;
2042 unsigned hdr[5];
2043
2044 if (!ch) {
2045 pr_err("%s: Invalid channel specified\n", __func__);
2046 return -ENODEV;
2047 }
2048 if (!ch->is_pkt_ch) {
2049 pr_err("%s: non-packet channel specified\n", __func__);
2050 return -EACCES;
2051 }
2052 if (len < 1) {
2053 pr_err("%s: invalid length: %d\n", __func__, len);
2054 return -EINVAL;
2055 }
2056
2057 if (ch->pending_pkt_sz) {
2058 pr_err("%s: packet of size: %d in progress\n", __func__,
2059 ch->pending_pkt_sz);
2060 return -EBUSY;
2061 }
2062 ch->pending_pkt_sz = len;
2063
2064 if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
2065 ch->pending_pkt_sz = 0;
2066 SMD_DBG("%s: no space to write packet header\n", __func__);
2067 return -EAGAIN;
2068 }
2069
2070 hdr[0] = len;
2071 hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
2072
2073
2074 ret = smd_stream_write(ch, hdr, sizeof(hdr), 0);
2075 if (ret < 0 || ret != sizeof(hdr)) {
2076 ch->pending_pkt_sz = 0;
2077 pr_err("%s: packet header failed to write\n", __func__);
2078 return -EPERM;
2079 }
2080 return 0;
2081}
2082EXPORT_SYMBOL(smd_write_start);
2083
2084int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
2085{
2086 int bytes_written;
2087
2088 if (!ch) {
2089 pr_err("%s: Invalid channel specified\n", __func__);
2090 return -ENODEV;
2091 }
2092 if (len < 1) {
2093 pr_err("%s: invalid length: %d\n", __func__, len);
2094 return -EINVAL;
2095 }
2096
2097 if (!ch->pending_pkt_sz) {
2098 pr_err("%s: no transaction in progress\n", __func__);
2099 return -ENOEXEC;
2100 }
2101 if (ch->pending_pkt_sz - len < 0) {
2102 pr_err("%s: segment of size: %d will make packet go over "
2103 "length\n", __func__, len);
2104 return -EINVAL;
2105 }
2106
2107 bytes_written = smd_stream_write(ch, data, len, user_buf);
2108
2109 ch->pending_pkt_sz -= bytes_written;
2110
2111 return bytes_written;
2112}
2113EXPORT_SYMBOL(smd_write_segment);
2114
2115int smd_write_end(smd_channel_t *ch)
2116{
2117
2118 if (!ch) {
2119 pr_err("%s: Invalid channel specified\n", __func__);
2120 return -ENODEV;
2121 }
2122 if (ch->pending_pkt_sz) {
2123 pr_err("%s: current packet not completely written\n", __func__);
2124 return -E2BIG;
2125 }
2126
2127 return 0;
2128}
2129EXPORT_SYMBOL(smd_write_end);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002130
Jeff Hugo44fd9832013-04-04 15:56:21 -06002131int smd_write_segment_avail(smd_channel_t *ch)
2132{
2133 int n;
2134
2135 if (!ch) {
2136 pr_err("%s: Invalid channel specified\n", __func__);
2137 return -ENODEV;
2138 }
2139 if (!ch->is_pkt_ch) {
2140 pr_err("%s: non-packet channel specified\n", __func__);
2141 return -ENODEV;
2142 }
2143
2144 n = smd_stream_write_avail(ch);
2145
2146 /* pkt hdr already written, no need to reserve space for it */
2147 if (ch->pending_pkt_sz)
2148 return n;
2149
2150 return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
2151}
2152EXPORT_SYMBOL(smd_write_segment_avail);
2153
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002154int smd_read(smd_channel_t *ch, void *data, int len)
2155{
Jack Pham1b236d12012-03-19 15:27:18 -07002156 if (!ch) {
2157 pr_err("%s: Invalid channel specified\n", __func__);
2158 return -ENODEV;
2159 }
2160
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002161 return ch->read(ch, data, len, 0);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002162}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002163EXPORT_SYMBOL(smd_read);
2164
2165int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
2166{
Jack Pham1b236d12012-03-19 15:27:18 -07002167 if (!ch) {
2168 pr_err("%s: Invalid channel specified\n", __func__);
2169 return -ENODEV;
2170 }
2171
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002172 return ch->read(ch, data, len, 1);
2173}
2174EXPORT_SYMBOL(smd_read_user_buffer);
2175
2176int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
2177{
Jack Pham1b236d12012-03-19 15:27:18 -07002178 if (!ch) {
2179 pr_err("%s: Invalid channel specified\n", __func__);
2180 return -ENODEV;
2181 }
2182
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002183 return ch->read_from_cb(ch, data, len, 0);
2184}
2185EXPORT_SYMBOL(smd_read_from_cb);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002186
2187int smd_write(smd_channel_t *ch, const void *data, int len)
2188{
Jack Pham1b236d12012-03-19 15:27:18 -07002189 if (!ch) {
2190 pr_err("%s: Invalid channel specified\n", __func__);
2191 return -ENODEV;
2192 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002194 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002195}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002196EXPORT_SYMBOL(smd_write);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002198int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
Brian Swetland636eb9c2009-12-07 15:28:08 -08002199{
Jack Pham1b236d12012-03-19 15:27:18 -07002200 if (!ch) {
2201 pr_err("%s: Invalid channel specified\n", __func__);
2202 return -ENODEV;
2203 }
2204
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002205 return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002206}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002207EXPORT_SYMBOL(smd_write_user_buffer);
Brian Swetland636eb9c2009-12-07 15:28:08 -08002208
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002209int smd_read_avail(smd_channel_t *ch)
2210{
Jack Pham1b236d12012-03-19 15:27:18 -07002211 if (!ch) {
2212 pr_err("%s: Invalid channel specified\n", __func__);
2213 return -ENODEV;
2214 }
2215
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002216 return ch->read_avail(ch);
2217}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002218EXPORT_SYMBOL(smd_read_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002219
2220int smd_write_avail(smd_channel_t *ch)
2221{
Jack Pham1b236d12012-03-19 15:27:18 -07002222 if (!ch) {
2223 pr_err("%s: Invalid channel specified\n", __func__);
2224 return -ENODEV;
2225 }
2226
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002227 return ch->write_avail(ch);
2228}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002229EXPORT_SYMBOL(smd_write_avail);
2230
2231void smd_enable_read_intr(smd_channel_t *ch)
2232{
2233 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002234 ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002235}
2236EXPORT_SYMBOL(smd_enable_read_intr);
2237
2238void smd_disable_read_intr(smd_channel_t *ch)
2239{
2240 if (ch)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002241 ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002242}
2243EXPORT_SYMBOL(smd_disable_read_intr);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002244
Eric Holmbergdeace152012-07-25 12:17:11 -06002245/**
2246 * Enable/disable receive interrupts for the remote processor used by a
2247 * particular channel.
2248 * @ch: open channel handle to use for the edge
2249 * @mask: 1 = mask interrupts; 0 = unmask interrupts
2250 * @returns: 0 for success; < 0 for failure
2251 *
2252 * Note that this enables/disables all interrupts from the remote subsystem for
2253 * all channels. As such, it should be used with care and only for specific
2254 * use cases such as power-collapse sequencing.
2255 */
2256int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
2257{
2258 struct irq_chip *irq_chip;
2259 struct irq_data *irq_data;
2260 struct interrupt_config_item *int_cfg;
2261
2262 if (!ch)
2263 return -EINVAL;
2264
2265 if (ch->type >= ARRAY_SIZE(edge_to_pids))
2266 return -ENODEV;
2267
2268 int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
2269
2270 if (int_cfg->irq_id < 0)
2271 return -ENODEV;
2272
2273 irq_chip = irq_get_chip(int_cfg->irq_id);
2274 if (!irq_chip)
2275 return -ENODEV;
2276
2277 irq_data = irq_get_irq_data(int_cfg->irq_id);
2278 if (!irq_data)
2279 return -ENODEV;
2280
2281 if (mask) {
Jeff Hugo903d58a2013-08-29 14:57:00 -06002282 SMD_POWER_INFO("SMD Masking interrupts from %s\n",
Eric Holmbergdeace152012-07-25 12:17:11 -06002283 edge_to_pids[ch->type].subsys_name);
2284 irq_chip->irq_mask(irq_data);
2285 } else {
Jeff Hugo903d58a2013-08-29 14:57:00 -06002286 SMD_POWER_INFO("SMD Unmasking interrupts from %s\n",
Eric Holmbergdeace152012-07-25 12:17:11 -06002287 edge_to_pids[ch->type].subsys_name);
2288 irq_chip->irq_unmask(irq_data);
2289 }
2290
2291 return 0;
2292}
2293EXPORT_SYMBOL(smd_mask_receive_interrupt);
2294
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002295int smd_wait_until_readable(smd_channel_t *ch, int bytes)
2296{
2297 return -1;
2298}
2299
2300int smd_wait_until_writable(smd_channel_t *ch, int bytes)
2301{
2302 return -1;
2303}
2304
2305int smd_cur_packet_size(smd_channel_t *ch)
2306{
Jack Pham1b236d12012-03-19 15:27:18 -07002307 if (!ch) {
2308 pr_err("%s: Invalid channel specified\n", __func__);
2309 return -ENODEV;
2310 }
2311
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002312 return ch->current_packet;
2313}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002314EXPORT_SYMBOL(smd_cur_packet_size);
2315
2316int smd_tiocmget(smd_channel_t *ch)
2317{
Jack Pham1b236d12012-03-19 15:27:18 -07002318 if (!ch) {
2319 pr_err("%s: Invalid channel specified\n", __func__);
2320 return -ENODEV;
2321 }
2322
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002323 return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
2324 (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
2325 (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
2326 (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
2327 (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
2328 (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002329}
2330EXPORT_SYMBOL(smd_tiocmget);
2331
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002332/* this api will be called while holding smd_lock */
2333int
2334smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002335{
Jack Pham1b236d12012-03-19 15:27:18 -07002336 if (!ch) {
2337 pr_err("%s: Invalid channel specified\n", __func__);
2338 return -ENODEV;
2339 }
2340
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002341 if (set & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002342 ch->half_ch->set_fDSR(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002343
2344 if (set & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002345 ch->half_ch->set_fCTS(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002346
2347 if (clear & TIOCM_DTR)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002348 ch->half_ch->set_fDSR(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002349
2350 if (clear & TIOCM_RTS)
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002351 ch->half_ch->set_fCTS(ch->send, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002352
Jeff Hugo918b2dc2012-03-21 13:42:09 -06002353 ch->half_ch->set_fSTATE(ch->send, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002354 barrier();
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05302355 ch->notify_other_cpu(ch);
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002356
2357 return 0;
2358}
2359EXPORT_SYMBOL(smd_tiocmset_from_cb);
2360
2361int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
2362{
2363 unsigned long flags;
2364
Jack Pham1b236d12012-03-19 15:27:18 -07002365 if (!ch) {
2366 pr_err("%s: Invalid channel specified\n", __func__);
2367 return -ENODEV;
2368 }
2369
Vamsi Krishnacb12a102011-08-17 15:18:26 -07002370 spin_lock_irqsave(&smd_lock, flags);
2371 smd_tiocmset_from_cb(ch, set, clear);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002372 spin_unlock_irqrestore(&smd_lock, flags);
2373
2374 return 0;
2375}
2376EXPORT_SYMBOL(smd_tiocmset);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002377
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002378int smd_is_pkt_avail(smd_channel_t *ch)
2379{
Jeff Hugoa8549f12012-08-13 20:36:18 -06002380 unsigned long flags;
2381
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002382 if (!ch || !ch->is_pkt_ch)
2383 return -EINVAL;
2384
2385 if (ch->current_packet)
2386 return 1;
2387
Jeff Hugoa8549f12012-08-13 20:36:18 -06002388 spin_lock_irqsave(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002389 update_packet_state(ch);
Jeff Hugoa8549f12012-08-13 20:36:18 -06002390 spin_unlock_irqrestore(&smd_lock, flags);
Jeff Hugod71d6ac2012-04-11 16:48:07 -06002391
2392 return ch->current_packet ? 1 : 0;
2393}
2394EXPORT_SYMBOL(smd_is_pkt_avail);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002395
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002396static int smsm_cb_init(void)
2397{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002398 struct smsm_state_info *state_info;
2399 int n;
2400 int ret = 0;
2401
2402 smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
2403 GFP_KERNEL);
2404
2405 if (!smsm_states) {
2406 pr_err("%s: SMSM init failed\n", __func__);
2407 return -ENOMEM;
2408 }
2409
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002410 smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
2411 if (!smsm_cb_wq) {
2412 pr_err("%s: smsm_cb_wq creation failed\n", __func__);
2413 kfree(smsm_states);
2414 return -EFAULT;
2415 }
2416
Eric Holmbergc8002902011-09-16 13:55:57 -06002417 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002418 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2419 state_info = &smsm_states[n];
2420 state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
Eric Holmberge8a39322012-04-03 15:14:02 -06002421 state_info->intr_mask_set = 0x0;
2422 state_info->intr_mask_clear = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002423 INIT_LIST_HEAD(&state_info->callbacks);
2424 }
Eric Holmbergc8002902011-09-16 13:55:57 -06002425 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002426
2427 return ret;
2428}
2429
2430static int smsm_init(void)
2431{
2432 struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
2433 int i;
2434 struct smsm_size_info_type *smsm_size_info;
Eric Holmberge5266d32013-02-25 18:29:27 -07002435 unsigned long flags;
2436 unsigned long j_start;
Jeff Hugo7cc06b12013-06-17 16:13:18 -06002437 static int first = 1;
2438 remote_spinlock_t *remote_spinlock;
2439
2440 if (!first)
2441 return 0;
2442 first = 0;
Eric Holmberge5266d32013-02-25 18:29:27 -07002443
2444 /* Verify that remote spinlock is not deadlocked */
Jeff Hugo7cc06b12013-06-17 16:13:18 -06002445 remote_spinlock = smem_get_remote_spinlock();
Eric Holmberge5266d32013-02-25 18:29:27 -07002446 j_start = jiffies;
Jeff Hugo7cc06b12013-06-17 16:13:18 -06002447 while (!remote_spin_trylock_irqsave(remote_spinlock, flags)) {
Eric Holmberge5266d32013-02-25 18:29:27 -07002448 if (jiffies_to_msecs(jiffies - j_start) > RSPIN_INIT_WAIT_MS) {
2449 panic("%s: Remote processor %d will not release spinlock\n",
Jeff Hugo7cc06b12013-06-17 16:13:18 -06002450 __func__, remote_spin_owner(remote_spinlock));
Eric Holmberge5266d32013-02-25 18:29:27 -07002451 }
2452 }
Jeff Hugo7cc06b12013-06-17 16:13:18 -06002453 remote_spin_unlock_irqrestore(remote_spinlock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002454
Jeff Hugof4df2ff2013-08-28 17:45:50 -06002455 smsm_size_info = smem_alloc_to_proc(SMEM_SMSM_SIZE_INFO,
2456 sizeof(struct smsm_size_info_type), 0,
2457 SMEM_ANY_HOST_FLAG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002458 if (smsm_size_info) {
2459 SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
2460 SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
2461 }
2462
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002463 i = kfifo_alloc(&smsm_snapshot_fifo,
2464 sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
2465 GFP_KERNEL);
2466 if (i) {
2467 pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
2468 return i;
2469 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002470 wake_lock_init(&smsm_snapshot_wakelock, WAKE_LOCK_SUSPEND,
2471 "smsm_snapshot");
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002472
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002473 if (!smsm_info.state) {
Jeff Hugof4df2ff2013-08-28 17:45:50 -06002474 smsm_info.state = smem_alloc2_to_proc(ID_SHARED_STATE,
2475 SMSM_NUM_ENTRIES *
2476 sizeof(uint32_t), 0,
2477 SMEM_ANY_HOST_FLAG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002478
2479 if (smsm_info.state) {
2480 __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2481 if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
2482 __raw_writel(0, \
2483 SMSM_STATE_ADDR(SMSM_APPS_DEM_I));
2484 }
2485 }
2486
2487 if (!smsm_info.intr_mask) {
Jeff Hugof4df2ff2013-08-28 17:45:50 -06002488 smsm_info.intr_mask = smem_alloc2_to_proc(
2489 SMEM_SMSM_CPU_INTR_MASK,
2490 SMSM_NUM_ENTRIES *
2491 SMSM_NUM_HOSTS *
2492 sizeof(uint32_t), 0,
2493 SMEM_ANY_HOST_FLAG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002494
Eric Holmberge8a39322012-04-03 15:14:02 -06002495 if (smsm_info.intr_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002496 for (i = 0; i < SMSM_NUM_ENTRIES; i++)
Eric Holmberge8a39322012-04-03 15:14:02 -06002497 __raw_writel(0x0,
2498 SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
2499
2500 /* Configure legacy modem bits */
2501 __raw_writel(LEGACY_MODEM_SMSM_MASK,
2502 SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
2503 SMSM_APPS));
2504 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002505 }
2506
2507 if (!smsm_info.intr_mux)
Jeff Hugof4df2ff2013-08-28 17:45:50 -06002508 smsm_info.intr_mux = smem_alloc2_to_proc(SMEM_SMD_SMSM_INTR_MUX,
2509 SMSM_NUM_INTR_MUX *
2510 sizeof(uint32_t), 0,
2511 SMEM_ANY_HOST_FLAG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002512
2513 i = smsm_cb_init();
2514 if (i)
2515 return i;
2516
2517 wmb();
Eric Holmberg144c2de2012-10-04 13:37:28 -06002518
2519 smsm_pm_notifier(&smsm_pm_nb, PM_POST_SUSPEND, NULL);
2520 i = register_pm_notifier(&smsm_pm_nb);
2521 if (i)
2522 pr_err("%s: power state notif error %d\n", __func__, i);
2523
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002524 return 0;
2525}
2526
2527void smsm_reset_modem(unsigned mode)
2528{
2529 if (mode == SMSM_SYSTEM_DOWNLOAD) {
2530 mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
2531 } else if (mode == SMSM_MODEM_WAIT) {
2532 mode = SMSM_RESET | SMSM_MODEM_WAIT;
2533 } else { /* reset_mode is SMSM_RESET or default */
2534 mode = SMSM_RESET;
2535 }
2536
2537 smsm_change_state(SMSM_APPS_STATE, mode, mode);
2538}
2539EXPORT_SYMBOL(smsm_reset_modem);
2540
2541void smsm_reset_modem_cont(void)
2542{
2543 unsigned long flags;
2544 uint32_t state;
2545
2546 if (!smsm_info.state)
2547 return;
2548
2549 spin_lock_irqsave(&smem_lock, flags);
2550 state = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE)) \
2551 & ~SMSM_MODEM_WAIT;
2552 __raw_writel(state, SMSM_STATE_ADDR(SMSM_APPS_STATE));
2553 wmb();
2554 spin_unlock_irqrestore(&smem_lock, flags);
2555}
2556EXPORT_SYMBOL(smsm_reset_modem_cont);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002557
Eric Holmbergda31d042012-03-28 14:01:02 -06002558static void smsm_cb_snapshot(uint32_t use_wakelock)
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002559{
2560 int n;
2561 uint32_t new_state;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002562 unsigned long flags;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002563 int ret;
Eric Holmberg53516dd2013-09-26 15:59:33 -06002564 uint64_t timestamp;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002565
Eric Holmberg53516dd2013-09-26 15:59:33 -06002566 timestamp = sched_clock();
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002567 ret = kfifo_avail(&smsm_snapshot_fifo);
Eric Holmbergda31d042012-03-28 14:01:02 -06002568 if (ret < SMSM_SNAPSHOT_SIZE) {
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002569 pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
2570 return;
2571 }
2572
Eric Holmberg96b55f62012-04-03 19:10:46 -06002573 /*
2574 * To avoid a race condition with notify_smsm_cb_clients_worker, the
2575 * following sequence must be followed:
2576 * 1) increment snapshot count
2577 * 2) insert data into FIFO
2578 *
2579 * Potentially in parallel, the worker:
2580 * a) verifies >= 1 snapshots are in FIFO
2581 * b) processes snapshot
2582 * c) decrements reference count
2583 *
2584 * This order ensures that 1 will always occur before abc.
2585 */
Eric Holmbergda31d042012-03-28 14:01:02 -06002586 if (use_wakelock) {
2587 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2588 if (smsm_snapshot_count == 0) {
Jeff Hugo903d58a2013-08-29 14:57:00 -06002589 SMSM_POWER_INFO("SMSM snapshot wake lock\n");
Eric Holmbergda31d042012-03-28 14:01:02 -06002590 wake_lock(&smsm_snapshot_wakelock);
2591 }
2592 ++smsm_snapshot_count;
2593 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2594 }
Eric Holmberg96b55f62012-04-03 19:10:46 -06002595
2596 /* queue state entries */
2597 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2598 new_state = __raw_readl(SMSM_STATE_ADDR(n));
2599
2600 ret = kfifo_in(&smsm_snapshot_fifo,
2601 &new_state, sizeof(new_state));
2602 if (ret != sizeof(new_state)) {
2603 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2604 goto restore_snapshot_count;
2605 }
2606 }
2607
Eric Holmberg53516dd2013-09-26 15:59:33 -06002608 ret = kfifo_in(&smsm_snapshot_fifo, &timestamp, sizeof(timestamp));
2609 if (ret != sizeof(timestamp)) {
2610 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2611 goto restore_snapshot_count;
2612 }
2613
Eric Holmberg96b55f62012-04-03 19:10:46 -06002614 /* queue wakelock usage flag */
2615 ret = kfifo_in(&smsm_snapshot_fifo,
2616 &use_wakelock, sizeof(use_wakelock));
2617 if (ret != sizeof(use_wakelock)) {
2618 pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
2619 goto restore_snapshot_count;
2620 }
2621
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002622 queue_work(smsm_cb_wq, &smsm_cb_work);
Eric Holmberg96b55f62012-04-03 19:10:46 -06002623 return;
2624
2625restore_snapshot_count:
2626 if (use_wakelock) {
2627 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2628 if (smsm_snapshot_count) {
2629 --smsm_snapshot_count;
2630 if (smsm_snapshot_count == 0) {
Jeff Hugo903d58a2013-08-29 14:57:00 -06002631 SMSM_POWER_INFO("SMSM snapshot wake unlock\n");
Eric Holmberg96b55f62012-04-03 19:10:46 -06002632 wake_unlock(&smsm_snapshot_wakelock);
2633 }
2634 } else {
2635 pr_err("%s: invalid snapshot count\n", __func__);
2636 }
2637 spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
2638 }
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002639}
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002640
2641static irqreturn_t smsm_irq_handler(int irq, void *data)
2642{
2643 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002644
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002645 if (irq == INT_ADSP_A11_SMSM) {
Eric Holmberg6282c5d2011-10-27 17:30:57 -06002646 uint32_t mux_val;
2647 static uint32_t prev_smem_q6_apps_smsm;
2648
2649 if (smsm_info.intr_mux && cpu_is_qsd8x50()) {
2650 mux_val = __raw_readl(
2651 SMSM_INTR_MUX_ADDR(SMEM_Q6_APPS_SMSM));
2652 if (mux_val != prev_smem_q6_apps_smsm)
2653 prev_smem_q6_apps_smsm = mux_val;
2654 }
2655
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002656 spin_lock_irqsave(&smem_lock, flags);
Eric Holmbergda31d042012-03-28 14:01:02 -06002657 smsm_cb_snapshot(1);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002658 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002659 return IRQ_HANDLED;
2660 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002661
2662 spin_lock_irqsave(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002663 if (!smsm_info.state) {
2664 SMSM_INFO("<SM NO STATE>\n");
2665 } else {
2666 unsigned old_apps, apps;
2667 unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002668
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002669 old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002670
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002671 SMSM_DBG("<SM %08x %08x>\n", apps, modm);
2672 if (apps & SMSM_RESET) {
2673 /* If we get an interrupt and the apps SMSM_RESET
2674 bit is already set, the modem is acking the
2675 app's reset ack. */
Eric Holmberg2bb6ccd2012-03-13 13:05:14 -06002676 if (!disable_smsm_reset_handshake)
Angshuman Sarkaread67bd2011-09-21 20:13:12 +05302677 apps &= ~SMSM_RESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002678 /* Issue a fake irq to handle any
2679 * smd state changes during reset
2680 */
2681 smd_fake_irq_handler(0);
Daniel Walker79848a22010-03-16 15:20:07 -07002682
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002683 /* queue modem restart notify chain */
2684 modem_queue_start_reset_notify();
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002685
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002686 } else if (modm & SMSM_RESET) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002687 pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
Ram Somani8b9589f2012-04-03 12:07:18 +05302688 if (!disable_smsm_reset_handshake) {
2689 apps |= SMSM_RESET;
2690 flush_cache_all();
2691 outer_flush_all();
2692 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002693 modem_queue_start_reset_notify();
2694
2695 } else if (modm & SMSM_INIT) {
2696 if (!(apps & SMSM_INIT)) {
2697 apps |= SMSM_INIT;
2698 modem_queue_smsm_init_notify();
2699 }
2700
2701 if (modm & SMSM_SMDINIT)
2702 apps |= SMSM_SMDINIT;
2703 if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
2704 (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
2705 apps |= SMSM_RUN;
2706 } else if (modm & SMSM_SYSTEM_DOWNLOAD) {
2707 pr_err("\nSMSM: Modem SMSM state changed to SMSM_SYSTEM_DOWNLOAD.");
2708 modem_queue_start_reset_notify();
2709 }
2710
2711 if (old_apps != apps) {
2712 SMSM_DBG("<SM %08x NOTIFY>\n", apps);
2713 __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002714 notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
2715 }
2716
Eric Holmbergda31d042012-03-28 14:01:02 -06002717 smsm_cb_snapshot(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002718 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002719 spin_unlock_irqrestore(&smem_lock, flags);
2720 return IRQ_HANDLED;
2721}
2722
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05302723irqreturn_t smsm_modem_irq_handler(int irq, void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002724{
Jeff Hugo903d58a2013-08-29 14:57:00 -06002725 SMSM_POWER_INFO("SMSM Int Modem->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002726 ++interrupt_stats[SMD_MODEM].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002727 return smsm_irq_handler(irq, data);
2728}
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002729
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05302730irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
Eric Holmberg98c6c642012-02-24 11:29:35 -07002731{
Jeff Hugo903d58a2013-08-29 14:57:00 -06002732 SMSM_POWER_INFO("SMSM Int LPASS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002733 ++interrupt_stats[SMD_Q6].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002734 return smsm_irq_handler(irq, data);
2735}
2736
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05302737irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
Eric Holmberg98c6c642012-02-24 11:29:35 -07002738{
Jeff Hugo903d58a2013-08-29 14:57:00 -06002739 SMSM_POWER_INFO("SMSM Int DSPS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002740 ++interrupt_stats[SMD_DSPS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002741 return smsm_irq_handler(irq, data);
2742}
2743
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05302744irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
Eric Holmberg98c6c642012-02-24 11:29:35 -07002745{
Jeff Hugo903d58a2013-08-29 14:57:00 -06002746 SMSM_POWER_INFO("SMSM Int WCNSS->Apps\n");
Eric Holmberg7ad623a2012-03-01 14:41:10 -07002747 ++interrupt_stats[SMD_WCNSS].smsm_in_count;
Eric Holmberg98c6c642012-02-24 11:29:35 -07002748 return smsm_irq_handler(irq, data);
2749}
2750
Eric Holmberge8a39322012-04-03 15:14:02 -06002751/*
2752 * Changes the global interrupt mask. The set and clear masks are re-applied
2753 * every time the global interrupt mask is updated for callback registration
2754 * and de-registration.
2755 *
2756 * The clear mask is applied first, so if a bit is set to 1 in both the clear
2757 * mask and the set mask, the result will be that the interrupt is set.
2758 *
2759 * @smsm_entry SMSM entry to change
2760 * @clear_mask 1 = clear bit, 0 = no-op
2761 * @set_mask 1 = set bit, 0 = no-op
2762 *
2763 * @returns 0 for success, < 0 for error
2764 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002765int smsm_change_intr_mask(uint32_t smsm_entry,
2766 uint32_t clear_mask, uint32_t set_mask)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002767{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002768 uint32_t old_mask, new_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002769 unsigned long flags;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002770
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002771 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2772 pr_err("smsm_change_state: Invalid entry %d\n",
2773 smsm_entry);
2774 return -EINVAL;
2775 }
2776
2777 if (!smsm_info.intr_mask) {
2778 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002779 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002780 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002781
2782 spin_lock_irqsave(&smem_lock, flags);
Eric Holmberge8a39322012-04-03 15:14:02 -06002783 smsm_states[smsm_entry].intr_mask_clear = clear_mask;
2784 smsm_states[smsm_entry].intr_mask_set = set_mask;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002785
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002786 old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2787 new_mask = (old_mask & ~clear_mask) | set_mask;
2788 __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002789
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002790 wmb();
2791 spin_unlock_irqrestore(&smem_lock, flags);
Brian Swetland5b0f5a32009-04-26 18:38:49 -07002792
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002793 return 0;
2794}
2795EXPORT_SYMBOL(smsm_change_intr_mask);
2796
2797int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
2798{
2799 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2800 pr_err("smsm_change_state: Invalid entry %d\n",
2801 smsm_entry);
2802 return -EINVAL;
2803 }
2804
2805 if (!smsm_info.intr_mask) {
2806 pr_err("smsm_change_intr_mask <SM NO STATE>\n");
2807 return -EIO;
2808 }
2809
2810 *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
2811 return 0;
2812}
2813EXPORT_SYMBOL(smsm_get_intr_mask);
2814
2815int smsm_change_state(uint32_t smsm_entry,
2816 uint32_t clear_mask, uint32_t set_mask)
2817{
2818 unsigned long flags;
2819 uint32_t old_state, new_state;
2820
2821 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2822 pr_err("smsm_change_state: Invalid entry %d",
2823 smsm_entry);
2824 return -EINVAL;
2825 }
2826
2827 if (!smsm_info.state) {
2828 pr_err("smsm_change_state <SM NO STATE>\n");
2829 return -EIO;
2830 }
2831 spin_lock_irqsave(&smem_lock, flags);
2832
2833 old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2834 new_state = (old_state & ~clear_mask) | set_mask;
2835 __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
Jeff Hugo903d58a2013-08-29 14:57:00 -06002836 SMSM_POWER_INFO("%s %d:%08x->%08x", __func__, smsm_entry,
Brent Hronik2e3ad2f2013-06-17 10:36:49 -06002837 old_state, new_state);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002838 notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002839
2840 spin_unlock_irqrestore(&smem_lock, flags);
2841
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002842 return 0;
2843}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002844EXPORT_SYMBOL(smsm_change_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002846uint32_t smsm_get_state(uint32_t smsm_entry)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002847{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002848 uint32_t rv = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002849
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002850 /* needs interface change to return error code */
2851 if (smsm_entry >= SMSM_NUM_ENTRIES) {
2852 pr_err("smsm_change_state: Invalid entry %d",
2853 smsm_entry);
2854 return 0;
2855 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002856
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002857 if (!smsm_info.state) {
2858 pr_err("smsm_get_state <SM NO STATE>\n");
2859 } else {
2860 rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
2861 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002862
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002863 return rv;
2864}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002865EXPORT_SYMBOL(smsm_get_state);
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002866
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002867/**
2868 * Performs SMSM callback client notifiction.
2869 */
2870void notify_smsm_cb_clients_worker(struct work_struct *work)
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002871{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002872 struct smsm_state_cb_info *cb_info;
2873 struct smsm_state_info *state_info;
2874 int n;
2875 uint32_t new_state;
2876 uint32_t state_changes;
Eric Holmbergda31d042012-03-28 14:01:02 -06002877 uint32_t use_wakelock;
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002878 int ret;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002879 unsigned long flags;
Eric Holmberg53516dd2013-09-26 15:59:33 -06002880 uint64_t t_snapshot;
2881 uint64_t t_start;
2882 unsigned long nanosec_rem;
Brian Swetland03e00cd2009-07-01 17:58:37 -07002883
Eric Holmbergda31d042012-03-28 14:01:02 -06002884 while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
Eric Holmberg53516dd2013-09-26 15:59:33 -06002885 t_start = sched_clock();
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002886 mutex_lock(&smsm_lock);
2887 for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
2888 state_info = &smsm_states[n];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002889
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002890 ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
2891 sizeof(new_state));
2892 if (ret != sizeof(new_state)) {
2893 pr_err("%s: snapshot underflow %d\n",
2894 __func__, ret);
2895 mutex_unlock(&smsm_lock);
2896 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002897 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002898
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002899 state_changes = state_info->last_value ^ new_state;
2900 if (state_changes) {
Jeff Hugo903d58a2013-08-29 14:57:00 -06002901 SMSM_POWER_INFO("SMSM Change %d: %08x->%08x\n",
Eric Holmberg98c6c642012-02-24 11:29:35 -07002902 n, state_info->last_value,
2903 new_state);
Eric Holmbergc7e8daf2011-12-28 11:49:21 -07002904 list_for_each_entry(cb_info,
2905 &state_info->callbacks, cb_list) {
2906
2907 if (cb_info->mask & state_changes)
2908 cb_info->notify(cb_info->data,
2909 state_info->last_value,
2910 new_state);
2911 }
2912 state_info->last_value = new_state;
2913 }
2914 }
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002915
Eric Holmberg53516dd2013-09-26 15:59:33 -06002916 ret = kfifo_out(&smsm_snapshot_fifo, &t_snapshot,
2917 sizeof(t_snapshot));
2918 if (ret != sizeof(t_snapshot)) {
2919 pr_err("%s: snapshot underflow %d\n",
2920 __func__, ret);
2921 mutex_unlock(&smsm_lock);
2922 return;
2923 }
2924
Eric Holmbergda31d042012-03-28 14:01:02 -06002925 /* read wakelock flag */
2926 ret = kfifo_out(&smsm_snapshot_fifo, &use_wakelock,
2927 sizeof(use_wakelock));
2928 if (ret != sizeof(use_wakelock)) {
2929 pr_err("%s: snapshot underflow %d\n",
2930 __func__, ret);
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002931 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002932 return;
Eric Holmberg59a9f9412012-03-19 10:04:22 -06002933 }
Karthikeyan Ramasubramanian3249a222012-04-18 17:16:49 -06002934 mutex_unlock(&smsm_lock);
Eric Holmbergda31d042012-03-28 14:01:02 -06002935
2936 if (use_wakelock) {
2937 spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
2938 if (smsm_snapshot_count) {
2939 --smsm_snapshot_count;
2940 if (smsm_snapshot_count == 0) {
Jeff Hugo903d58a2013-08-29 14:57:00 -06002941 SMSM_POWER_INFO("SMSM snapshot"
Eric Holmbergda31d042012-03-28 14:01:02 -06002942 " wake unlock\n");
2943 wake_unlock(&smsm_snapshot_wakelock);
2944 }
2945 } else {
2946 pr_err("%s: invalid snapshot count\n",
2947 __func__);
2948 }
2949 spin_unlock_irqrestore(&smsm_snapshot_count_lock,
2950 flags);
2951 }
Eric Holmberg53516dd2013-09-26 15:59:33 -06002952
2953 t_start = t_start - t_snapshot;
2954 nanosec_rem = do_div(t_start, 1000000000U);
2955 SMSM_POWER_INFO(
2956 "SMSM snapshot queue response time %6u.%09lu s\n",
2957 (unsigned)t_start, nanosec_rem);
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002958 }
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002959}
2960
Arve Hjønnevågec9d3d12009-06-16 14:48:21 -07002961
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002962/**
2963 * Registers callback for SMSM state notifications when the specified
2964 * bits change.
2965 *
2966 * @smsm_entry Processor entry to deregister
2967 * @mask Bits to deregister (if result is 0, callback is removed)
2968 * @notify Notification function to deregister
2969 * @data Opaque data passed in to callback
2970 *
2971 * @returns Status code
2972 * <0 error code
2973 * 0 inserted new entry
2974 * 1 updated mask of existing entry
2975 */
2976int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
2977 void (*notify)(void *, uint32_t, uint32_t), void *data)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002978{
Eric Holmberge8a39322012-04-03 15:14:02 -06002979 struct smsm_state_info *state;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002980 struct smsm_state_cb_info *cb_info;
2981 struct smsm_state_cb_info *cb_found = 0;
Eric Holmberge8a39322012-04-03 15:14:02 -06002982 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002983 int ret = 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002984
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002985 if (smsm_entry >= SMSM_NUM_ENTRIES)
2986 return -EINVAL;
2987
Eric Holmbergc8002902011-09-16 13:55:57 -06002988 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002989
2990 if (!smsm_states) {
2991 /* smsm not yet initialized */
2992 ret = -ENODEV;
2993 goto cleanup;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002994 }
Brian Swetland2eb44eb2008-09-29 16:00:48 -07002995
Eric Holmberge8a39322012-04-03 15:14:02 -06002996 state = &smsm_states[smsm_entry];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002997 list_for_each_entry(cb_info,
Eric Holmberge8a39322012-04-03 15:14:02 -06002998 &state->callbacks, cb_list) {
2999 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003000 (cb_info->data == data)) {
3001 cb_info->mask |= mask;
3002 cb_found = cb_info;
3003 ret = 1;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003004 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003005 new_mask |= cb_info->mask;
Brian Swetland5b0f5a32009-04-26 18:38:49 -07003006 }
3007
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003008 if (!cb_found) {
3009 cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
3010 GFP_ATOMIC);
3011 if (!cb_info) {
3012 ret = -ENOMEM;
3013 goto cleanup;
3014 }
3015
3016 cb_info->mask = mask;
3017 cb_info->notify = notify;
3018 cb_info->data = data;
3019 INIT_LIST_HEAD(&cb_info->cb_list);
3020 list_add_tail(&cb_info->cb_list,
Eric Holmberge8a39322012-04-03 15:14:02 -06003021 &state->callbacks);
3022 new_mask |= mask;
3023 }
3024
3025 /* update interrupt notification mask */
3026 if (smsm_entry == SMSM_MODEM_STATE)
3027 new_mask |= LEGACY_MODEM_SMSM_MASK;
3028
3029 if (smsm_info.intr_mask) {
3030 unsigned long flags;
3031
3032 spin_lock_irqsave(&smem_lock, flags);
3033 new_mask = (new_mask & ~state->intr_mask_clear)
3034 | state->intr_mask_set;
3035 __raw_writel(new_mask,
3036 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3037 wmb();
3038 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003039 }
3040
3041cleanup:
Eric Holmbergc8002902011-09-16 13:55:57 -06003042 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003043 return ret;
3044}
3045EXPORT_SYMBOL(smsm_state_cb_register);
3046
3047
3048/**
3049 * Deregisters for SMSM state notifications for the specified bits.
3050 *
3051 * @smsm_entry Processor entry to deregister
3052 * @mask Bits to deregister (if result is 0, callback is removed)
3053 * @notify Notification function to deregister
3054 * @data Opaque data passed in to callback
3055 *
3056 * @returns Status code
3057 * <0 error code
3058 * 0 not found
3059 * 1 updated mask
3060 * 2 removed callback
3061 */
3062int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
3063 void (*notify)(void *, uint32_t, uint32_t), void *data)
3064{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003065 struct smsm_state_cb_info *cb_info;
Eric Holmberge8a39322012-04-03 15:14:02 -06003066 struct smsm_state_cb_info *cb_tmp;
3067 struct smsm_state_info *state;
3068 uint32_t new_mask = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003069 int ret = 0;
3070
3071 if (smsm_entry >= SMSM_NUM_ENTRIES)
3072 return -EINVAL;
3073
Eric Holmbergc8002902011-09-16 13:55:57 -06003074 mutex_lock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003075
3076 if (!smsm_states) {
3077 /* smsm not yet initialized */
Eric Holmbergc8002902011-09-16 13:55:57 -06003078 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003079 return -ENODEV;
3080 }
3081
Eric Holmberge8a39322012-04-03 15:14:02 -06003082 state = &smsm_states[smsm_entry];
3083 list_for_each_entry_safe(cb_info, cb_tmp,
3084 &state->callbacks, cb_list) {
3085 if (!ret && (cb_info->notify == notify) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003086 (cb_info->data == data)) {
3087 cb_info->mask &= ~mask;
3088 ret = 1;
3089 if (!cb_info->mask) {
3090 /* no mask bits set, remove callback */
3091 list_del(&cb_info->cb_list);
3092 kfree(cb_info);
3093 ret = 2;
Eric Holmberge8a39322012-04-03 15:14:02 -06003094 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003095 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003096 }
Eric Holmberge8a39322012-04-03 15:14:02 -06003097 new_mask |= cb_info->mask;
3098 }
3099
3100 /* update interrupt notification mask */
3101 if (smsm_entry == SMSM_MODEM_STATE)
3102 new_mask |= LEGACY_MODEM_SMSM_MASK;
3103
3104 if (smsm_info.intr_mask) {
3105 unsigned long flags;
3106
3107 spin_lock_irqsave(&smem_lock, flags);
3108 new_mask = (new_mask & ~state->intr_mask_clear)
3109 | state->intr_mask_set;
3110 __raw_writel(new_mask,
3111 SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
3112 wmb();
3113 spin_unlock_irqrestore(&smem_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003114 }
3115
Eric Holmbergc8002902011-09-16 13:55:57 -06003116 mutex_unlock(&smsm_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003117 return ret;
3118}
3119EXPORT_SYMBOL(smsm_state_cb_deregister);
3120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003121static int restart_notifier_cb(struct notifier_block *this,
3122 unsigned long code,
3123 void *data);
3124
3125static struct restart_notifier_block restart_notifiers[] = {
Eric Holmbergca7ead22011-12-01 17:21:15 -07003126 {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
3127 {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
Sameer Thalappil52381142012-10-04 17:22:24 -07003128 {SMD_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
Eric Holmbergca7ead22011-12-01 17:21:15 -07003129 {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
Eric Holmberg8b0e74f2012-02-08 09:56:17 -07003130 {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
Jeff Hugod3cf6ec2012-09-26 15:30:10 -06003131 {SMD_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003132};
3133
3134static int restart_notifier_cb(struct notifier_block *this,
3135 unsigned long code,
3136 void *data)
3137{
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003138 remote_spinlock_t *remote_spinlock;
3139
Jeff Hugo73f356f2012-12-14 17:56:19 -07003140 /*
3141 * Some SMD or SMSM clients assume SMD/SMSM SSR handling will be
3142 * done in the AFTER_SHUTDOWN level. If this ever changes, extra
3143 * care should be taken to verify no clients are broken.
3144 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003145 if (code == SUBSYS_AFTER_SHUTDOWN) {
3146 struct restart_notifier_block *notifier;
3147
3148 notifier = container_of(this,
3149 struct restart_notifier_block, nb);
3150 SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
3151 __func__, notifier->processor,
3152 notifier->name);
3153
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003154 remote_spinlock = smem_get_remote_spinlock();
3155 remote_spin_release(remote_spinlock, notifier->processor);
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003156 remote_spin_release_all(notifier->processor);
3157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003158 smd_channel_reset(notifier->processor);
3159 }
3160
3161 return NOTIFY_DONE;
3162}
3163
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05303164/**
3165 * smd_post_init() - SMD post initialization
3166 * @is_leagcy: 1 for Leagcy/platform device init sequence
3167 * 0 for device tree init sequence
Jeff Hugof4df2ff2013-08-28 17:45:50 -06003168 * @remote_pid: remote pid that has been initialized. Ignored when is_legacy=1
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05303169 *
3170 * This function is used by the legacy and device tree initialization
3171 * to complete the SMD init sequence.
3172 */
Jeff Hugof4df2ff2013-08-28 17:45:50 -06003173void smd_post_init(bool is_legacy, unsigned remote_pid)
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05303174{
Jeff Hugof4df2ff2013-08-28 17:45:50 -06003175 int i;
3176
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05303177 if (is_legacy) {
3178 smd_initialized = 1;
3179 smd_alloc_loopback_channel();
Jeff Hugof4df2ff2013-08-28 17:45:50 -06003180 for (i = 1; i < NUM_SMD_SUBSYSTEMS; ++i)
3181 schedule_work(&remote_info[i].probe_work);
3182 } else {
3183 schedule_work(&remote_info[remote_pid].probe_work);
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05303184 }
3185}
3186
3187/**
3188 * smsm_post_init() - SMSM post initialization
3189 * @returns: 0 for success, standard Linux error code otherwise
3190 *
3191 * This function is used by the legacy and device tree initialization
3192 * to complete the SMSM init sequence.
3193 */
3194int smsm_post_init(void)
3195{
3196 int ret;
3197
3198 ret = smsm_init();
3199 if (ret) {
3200 pr_err("smsm_init() failed ret = %d\n", ret);
3201 return ret;
3202 }
3203 smsm_irq_handler(0, 0);
3204
3205 return ret;
3206}
3207
3208/**
3209 * smd_get_intr_config() - Get interrupt configuration structure
3210 * @edge: edge type identifes local and remote processor
3211 * @returns: pointer to interrupt configuration
3212 *
3213 * This function returns the interrupt configuration of remote processor
3214 * based on the edge type.
3215 */
3216struct interrupt_config *smd_get_intr_config(uint32_t edge)
3217{
3218 if (edge >= ARRAY_SIZE(edge_to_pids))
3219 return NULL;
3220 return &private_intr_config[edge_to_pids[edge].remote_pid];
3221}
3222
3223/**
3224 * smd_get_edge_remote_pid() - Get the remote processor ID
3225 * @edge: edge type identifes local and remote processor
3226 * @returns: remote processor ID
3227 *
3228 * This function returns remote processor ID based on edge type.
3229 */
3230int smd_edge_to_remote_pid(uint32_t edge)
3231{
3232 if (edge >= ARRAY_SIZE(edge_to_pids))
3233 return -EINVAL;
3234 return edge_to_pids[edge].remote_pid;
3235}
3236
3237/**
3238 * smd_set_edge_subsys_name() - Set the subsystem name
3239 * @edge: edge type identifies local and remote processor
3240 * @sussys_name: pointer to subsystem name
3241 *
3242 * This function is used to set the subsystem name for given edge type.
3243 */
3244void smd_set_edge_subsys_name(uint32_t edge, const char *subsys_name)
3245{
Brent Hronikcf948372013-10-16 14:54:10 -06003246 if (edge < ARRAY_SIZE(edge_to_pids))
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05303247 strlcpy(edge_to_pids[edge].subsys_name,
3248 subsys_name, SMD_MAX_CH_NAME_LEN);
3249 else
3250 pr_err("%s: Invalid edge type[%d]\n", __func__, edge);
3251}
3252
3253/**
3254 * smd_set_edge_initialized() - Set the edge initialized status
3255 * @edge: edge type identifies local and remote processor
3256 *
3257 * This function set the initialized varibale based on edge type.
3258 */
3259void smd_set_edge_initialized(uint32_t edge)
3260{
Brent Hronikcf948372013-10-16 14:54:10 -06003261 if (edge < ARRAY_SIZE(edge_to_pids))
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05303262 edge_to_pids[edge].initialized = true;
3263 else
3264 pr_err("%s: Invalid edge type[%d]\n", __func__, edge);
3265}
3266
3267/**
3268 * smd_cfg_smd_intr() - Set the SMD interrupt configuration
3269 * @proc: remote processor ID
3270 * @mask: bit position in IRQ register
3271 * @ptr: IRQ register
3272 *
3273 * This function is called in Legacy init sequence and used to set
3274 * the SMD interrupt configurations for particular processor.
3275 */
3276void smd_cfg_smd_intr(uint32_t proc, uint32_t mask, void *ptr)
3277{
3278 private_intr_config[proc].smd.out_bit_pos = mask;
3279 private_intr_config[proc].smd.out_base = ptr;
3280 private_intr_config[proc].smd.out_offset = 0;
3281}
3282
3283/*
3284 * smd_cfg_smsm_intr() - Set the SMSM interrupt configuration
3285 * @proc: remote processor ID
3286 * @mask: bit position in IRQ register
3287 * @ptr: IRQ register
3288 *
3289 * This function is called in Legacy init sequence and used to set
3290 * the SMSM interrupt configurations for particular processor.
3291 */
3292void smd_cfg_smsm_intr(uint32_t proc, uint32_t mask, void *ptr)
3293{
3294 private_intr_config[proc].smsm.out_bit_pos = mask;
3295 private_intr_config[proc].smsm.out_base = ptr;
3296 private_intr_config[proc].smsm.out_offset = 0;
3297}
3298
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003299static __init int modem_restart_late_init(void)
3300{
3301 int i;
3302 void *handle;
3303 struct restart_notifier_block *nb;
3304
3305 for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
3306 nb = &restart_notifiers[i];
3307 handle = subsys_notif_register_notifier(nb->name, &nb->nb);
3308 SMD_DBG("%s: registering notif for '%s', handle=%p\n",
3309 __func__, nb->name, handle);
3310 }
Eric Holmbergcfbc1d52013-03-13 18:30:19 -06003311
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003312 return 0;
3313}
3314late_initcall(modem_restart_late_init);
3315
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003316int __init msm_smd_init(void)
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003317{
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003318 static bool registered;
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003319 int rc;
Jeff Hugof4df2ff2013-08-28 17:45:50 -06003320 int i;
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003321
3322 if (registered)
3323 return 0;
3324
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05303325 smd_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smd");
3326 if (!smd_log_ctx) {
Jeff Hugo903d58a2013-08-29 14:57:00 -06003327 pr_err("%s: unable to create SMD logging context\n", __func__);
3328 msm_smd_debug_mask = 0;
3329 }
3330
3331 smsm_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smsm");
3332 if (!smsm_log_ctx) {
3333 pr_err("%s: unable to create SMSM logging context\n", __func__);
Arun Kumar Neelakantam7cffb332013-01-28 15:43:35 +05303334 msm_smd_debug_mask = 0;
3335 }
3336
Mahesh Sivasubramaniand9041b02012-05-09 12:57:23 -06003337 registered = true;
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003338
Jeff Hugof4df2ff2013-08-28 17:45:50 -06003339 for (i = 0; i < NUM_SMD_SUBSYSTEMS; ++i) {
3340 remote_info[i].remote_pid = i;
3341 remote_info[i].free_space = UINT_MAX;
3342 INIT_WORK(&remote_info[i].probe_work, smd_channel_probe_worker);
3343 INIT_LIST_HEAD(&remote_info[i].ch_list);
3344 }
Jeff Hugo7cc06b12013-06-17 16:13:18 -06003345
3346 channel_close_wq = create_singlethread_workqueue("smd_channel_close");
3347 if (IS_ERR(channel_close_wq)) {
3348 pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
3349 return -ENOMEM;
3350 }
3351
Arun Kumar Neelakantam804f5162013-07-25 17:58:05 +05303352 rc = msm_smd_driver_register();
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003353 if (rc) {
3354 pr_err("%s: msm_smd_driver register failed %d\n",
3355 __func__, rc);
3356 return rc;
3357 }
Karthikeyan Ramasubramanianfa44cd72012-08-22 18:08:14 -06003358 return 0;
Brian Swetland2eb44eb2008-09-29 16:00:48 -07003359}
3360
3361module_init(msm_smd_init);
3362
3363MODULE_DESCRIPTION("MSM Shared Memory Core");
3364MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
3365MODULE_LICENSE("GPL");