blob: 88ab8e01878578edf547012ea6accca7a0efb8cf [file] [log] [blame]
Duy Truong790f06d2013-02-13 16:38:12 -08001/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * IPC ROUTER SMD XPRT module.
15 */
16#define DEBUG
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070019#include <linux/platform_device.h>
20#include <linux/types.h>
21
22#include <mach/msm_smd.h>
Stephen Boyd77db8bb2012-06-27 15:15:16 -070023#include <mach/subsystem_restart.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024
25#include "ipc_router.h"
26#include "smd_private.h"
27
28static int msm_ipc_router_smd_xprt_debug_mask;
29module_param_named(debug_mask, msm_ipc_router_smd_xprt_debug_mask,
30 int, S_IRUGO | S_IWUSR | S_IWGRP);
31
32#if defined(DEBUG)
33#define D(x...) do { \
34if (msm_ipc_router_smd_xprt_debug_mask) \
35 pr_info(x); \
36} while (0)
37#else
38#define D(x...) do { } while (0)
39#endif
40
41#define MIN_FRAG_SZ (IPC_ROUTER_HDR_SIZE + sizeof(union rr_control_msg))
42
Karthikeyan Ramasubramanian0b5ac732013-01-18 09:24:09 -070043#define NUM_SMD_XPRTS 4
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070044#define XPRT_NAME_LEN (SMD_MAX_CH_NAME_LEN + 12)
45
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046struct msm_ipc_router_smd_xprt {
47 struct msm_ipc_router_xprt xprt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048 smd_channel_t *channel;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070049 struct workqueue_struct *smd_xprt_wq;
50 wait_queue_head_t write_avail_wait_q;
51 struct rr_packet *in_pkt;
52 int is_partial_in_pkt;
53 struct delayed_work read_work;
54 spinlock_t ss_reset_lock; /*Subsystem reset lock*/
55 int ss_reset;
Karthikeyan Ramasubramanianc29692c2012-06-04 14:46:02 -060056 void *pil;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057};
58
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -060059struct msm_ipc_router_smd_xprt_work {
60 struct msm_ipc_router_xprt *xprt;
61 struct work_struct work;
62};
63
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064static void smd_xprt_read_data(struct work_struct *work);
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -060065static void smd_xprt_open_event(struct work_struct *work);
66static void smd_xprt_close_event(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070068struct msm_ipc_router_smd_xprt_config {
69 char ch_name[SMD_MAX_CH_NAME_LEN];
70 char xprt_name[XPRT_NAME_LEN];
71 uint32_t edge;
72 uint32_t link_id;
73};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070075struct msm_ipc_router_smd_xprt_config smd_xprt_cfg[] = {
76 {"RPCRPY_CNTL", "ipc_rtr_smd_rpcrpy_cntl", SMD_APPS_MODEM, 1},
77 {"IPCRTR", "ipc_rtr_smd_ipcrtr", SMD_APPS_MODEM, 1},
Karthikeyan Ramasubramanianccc47262012-03-07 11:59:33 -070078 {"IPCRTR", "ipc_rtr_q6_ipcrtr", SMD_APPS_QDSP, 1},
Karthikeyan Ramasubramanian0b5ac732013-01-18 09:24:09 -070079 {"IPCRTR", "ipc_rtr_wcnss_ipcrtr", SMD_APPS_WCNSS, 1},
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070080};
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -060081
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070082static struct msm_ipc_router_smd_xprt smd_remote_xprt[NUM_SMD_XPRTS];
83
Karthikeyan Ramasubramanianccc47262012-03-07 11:59:33 -070084static int find_smd_xprt_cfg(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085{
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070086 int i;
87
88 for (i = 0; i < NUM_SMD_XPRTS; i++) {
Karthikeyan Ramasubramanianccc47262012-03-07 11:59:33 -070089 if (!strncmp(pdev->name, smd_xprt_cfg[i].ch_name, 20) &&
90 (pdev->id == smd_xprt_cfg[i].edge))
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070091 return i;
92 }
93
94 return -ENODEV;
95}
96
97static int msm_ipc_router_smd_remote_write_avail(
98 struct msm_ipc_router_xprt *xprt)
99{
100 struct msm_ipc_router_smd_xprt *smd_xprtp =
101 container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
102
103 return smd_write_avail(smd_xprtp->channel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104}
105
106static int msm_ipc_router_smd_remote_write(void *data,
107 uint32_t len,
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700108 struct msm_ipc_router_xprt *xprt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109{
110 struct rr_packet *pkt = (struct rr_packet *)data;
111 struct sk_buff *ipc_rtr_pkt;
112 int align_sz, align_data = 0;
113 int offset, sz_written = 0;
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600114 int ret, num_retries = 0;
115 unsigned long flags;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700116 struct msm_ipc_router_smd_xprt *smd_xprtp =
117 container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118
119 if (!pkt)
120 return -EINVAL;
121
122 if (!len || pkt->length != len)
123 return -EINVAL;
124
125 align_sz = ALIGN_SIZE(pkt->length);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700126 while ((ret = smd_write_start(smd_xprtp->channel,
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600127 (len + align_sz))) < 0) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700128 spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
129 if (smd_xprtp->ss_reset) {
130 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock,
131 flags);
132 pr_err("%s: %s chnl reset\n", __func__, xprt->name);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600133 return -ENETRESET;
134 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700135 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600136 if (num_retries >= 5) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700137 pr_err("%s: Error %d @smd_write_start for %s\n",
138 __func__, ret, xprt->name);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600139 return ret;
140 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141 msleep(50);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700142 num_retries++;
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600143 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144
145 D("%s: Ready to write\n", __func__);
146 skb_queue_walk(pkt->pkt_fragment_q, ipc_rtr_pkt) {
147 offset = 0;
148 while (offset < ipc_rtr_pkt->len) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700149 if (!smd_write_avail(smd_xprtp->channel))
150 smd_enable_read_intr(smd_xprtp->channel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700152 wait_event(smd_xprtp->write_avail_wait_q,
153 (smd_write_avail(smd_xprtp->channel) ||
154 smd_xprtp->ss_reset));
155 smd_disable_read_intr(smd_xprtp->channel);
156 spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
157 if (smd_xprtp->ss_reset) {
158 spin_unlock_irqrestore(
159 &smd_xprtp->ss_reset_lock, flags);
160 pr_err("%s: %s chnl reset\n",
161 __func__, xprt->name);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600162 return -ENETRESET;
163 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700164 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock,
165 flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700167 sz_written = smd_write_segment(smd_xprtp->channel,
168 ipc_rtr_pkt->data + offset,
169 (ipc_rtr_pkt->len - offset), 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170 offset += sz_written;
171 sz_written = 0;
172 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700173 D("%s: Wrote %d bytes over %s\n",
174 __func__, offset, xprt->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 }
176
177 if (align_sz) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700178 if (smd_write_avail(smd_xprtp->channel) < align_sz)
179 smd_enable_read_intr(smd_xprtp->channel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700181 wait_event(smd_xprtp->write_avail_wait_q,
182 ((smd_write_avail(smd_xprtp->channel) >=
183 align_sz) || smd_xprtp->ss_reset));
184 smd_disable_read_intr(smd_xprtp->channel);
185 spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
186 if (smd_xprtp->ss_reset) {
187 spin_unlock_irqrestore(
188 &smd_xprtp->ss_reset_lock, flags);
189 pr_err("%s: %s chnl reset\n",
190 __func__, xprt->name);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600191 return -ENETRESET;
192 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700193 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock,
194 flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700196 smd_write_segment(smd_xprtp->channel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 &align_data, align_sz, 0);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700198 D("%s: Wrote %d align bytes over %s\n",
199 __func__, align_sz, xprt->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700201 if (!smd_write_end(smd_xprtp->channel))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 D("%s: Finished writing\n", __func__);
203 return len;
204}
205
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700206static int msm_ipc_router_smd_remote_close(struct msm_ipc_router_xprt *xprt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207{
Karthikeyan Ramasubramanianc29692c2012-06-04 14:46:02 -0600208 int rc;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700209 struct msm_ipc_router_smd_xprt *smd_xprtp =
210 container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
211
Karthikeyan Ramasubramanianc29692c2012-06-04 14:46:02 -0600212 rc = smd_close(smd_xprtp->channel);
213 if (smd_xprtp->pil) {
Stephen Boyd77db8bb2012-06-27 15:15:16 -0700214 subsystem_put(smd_xprtp->pil);
Karthikeyan Ramasubramanianc29692c2012-06-04 14:46:02 -0600215 smd_xprtp->pil = NULL;
216 }
217 return rc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218}
219
220static void smd_xprt_read_data(struct work_struct *work)
221{
222 int pkt_size, sz_read, sz;
223 struct sk_buff *ipc_rtr_pkt;
224 void *data;
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600225 unsigned long flags;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700226 struct delayed_work *rwork = to_delayed_work(work);
227 struct msm_ipc_router_smd_xprt *smd_xprtp =
228 container_of(rwork, struct msm_ipc_router_smd_xprt, read_work);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600229
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700230 spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
231 if (smd_xprtp->ss_reset) {
232 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
233 if (smd_xprtp->in_pkt)
234 release_pkt(smd_xprtp->in_pkt);
235 smd_xprtp->is_partial_in_pkt = 0;
236 pr_err("%s: %s channel reset\n",
237 __func__, smd_xprtp->xprt.name);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600238 return;
239 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700240 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241
242 D("%s pkt_size: %d, read_avail: %d\n", __func__,
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700243 smd_cur_packet_size(smd_xprtp->channel),
244 smd_read_avail(smd_xprtp->channel));
245 while ((pkt_size = smd_cur_packet_size(smd_xprtp->channel)) &&
246 smd_read_avail(smd_xprtp->channel)) {
247 if (!smd_xprtp->is_partial_in_pkt) {
248 smd_xprtp->in_pkt = kzalloc(sizeof(struct rr_packet),
249 GFP_KERNEL);
250 if (!smd_xprtp->in_pkt) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251 pr_err("%s: Couldn't alloc rr_packet\n",
252 __func__);
253 return;
254 }
255
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700256 smd_xprtp->in_pkt->pkt_fragment_q =
257 kmalloc(sizeof(struct sk_buff_head),
258 GFP_KERNEL);
259 if (!smd_xprtp->in_pkt->pkt_fragment_q) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 pr_err("%s: Couldn't alloc pkt_fragment_q\n",
261 __func__);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700262 kfree(smd_xprtp->in_pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 return;
264 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700265 skb_queue_head_init(smd_xprtp->in_pkt->pkt_fragment_q);
266 smd_xprtp->is_partial_in_pkt = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267 D("%s: Allocated rr_packet\n", __func__);
268 }
269
Karthikeyan Ramasubramanian51247a02011-10-12 14:53:15 -0600270 if (((pkt_size >= MIN_FRAG_SZ) &&
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700271 (smd_read_avail(smd_xprtp->channel) < MIN_FRAG_SZ)) ||
Karthikeyan Ramasubramanian51247a02011-10-12 14:53:15 -0600272 ((pkt_size < MIN_FRAG_SZ) &&
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700273 (smd_read_avail(smd_xprtp->channel) < pkt_size)))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274 return;
275
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700276 sz = smd_read_avail(smd_xprtp->channel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277 do {
278 ipc_rtr_pkt = alloc_skb(sz, GFP_KERNEL);
279 if (!ipc_rtr_pkt) {
280 if (sz <= (PAGE_SIZE/2)) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700281 queue_delayed_work(
282 smd_xprtp->smd_xprt_wq,
283 &smd_xprtp->read_work,
284 msecs_to_jiffies(100));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 return;
286 }
287 sz = sz / 2;
288 }
289 } while (!ipc_rtr_pkt);
290
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700291 D("%s: Allocated the sk_buff of size %d\n", __func__, sz);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292 data = skb_put(ipc_rtr_pkt, sz);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700293 sz_read = smd_read(smd_xprtp->channel, data, sz);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294 if (sz_read != sz) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700295 pr_err("%s: Couldn't read %s completely\n",
296 __func__, smd_xprtp->xprt.name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297 kfree_skb(ipc_rtr_pkt);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700298 release_pkt(smd_xprtp->in_pkt);
299 smd_xprtp->is_partial_in_pkt = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300 return;
301 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700302 skb_queue_tail(smd_xprtp->in_pkt->pkt_fragment_q, ipc_rtr_pkt);
303 smd_xprtp->in_pkt->length += sz_read;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700304 if (sz_read != pkt_size)
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700305 smd_xprtp->is_partial_in_pkt = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306 else
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700307 smd_xprtp->is_partial_in_pkt = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700309 if (!smd_xprtp->is_partial_in_pkt) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310 D("%s: Packet size read %d\n",
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700311 __func__, smd_xprtp->in_pkt->length);
312 msm_ipc_router_xprt_notify(&smd_xprtp->xprt,
313 IPC_ROUTER_XPRT_EVENT_DATA,
314 (void *)smd_xprtp->in_pkt);
315 release_pkt(smd_xprtp->in_pkt);
316 smd_xprtp->in_pkt = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317 }
318 }
319}
320
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600321static void smd_xprt_open_event(struct work_struct *work)
322{
323 struct msm_ipc_router_smd_xprt_work *xprt_work =
324 container_of(work, struct msm_ipc_router_smd_xprt_work, work);
325
326 msm_ipc_router_xprt_notify(xprt_work->xprt,
327 IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700328 D("%s: Notified IPC Router of %s OPEN\n",
329 __func__, xprt_work->xprt->name);
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600330 kfree(xprt_work);
331}
332
333static void smd_xprt_close_event(struct work_struct *work)
334{
335 struct msm_ipc_router_smd_xprt_work *xprt_work =
336 container_of(work, struct msm_ipc_router_smd_xprt_work, work);
337
338 msm_ipc_router_xprt_notify(xprt_work->xprt,
339 IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700340 D("%s: Notified IPC Router of %s CLOSE\n",
341 __func__, xprt_work->xprt->name);
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600342 kfree(xprt_work);
343}
344
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345static void msm_ipc_router_smd_remote_notify(void *_dev, unsigned event)
346{
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600347 unsigned long flags;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700348 struct msm_ipc_router_smd_xprt *smd_xprtp;
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600349 struct msm_ipc_router_smd_xprt_work *xprt_work;
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600350
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700351 smd_xprtp = (struct msm_ipc_router_smd_xprt *)_dev;
352 if (!smd_xprtp)
353 return;
354
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600355 switch (event) {
356 case SMD_EVENT_DATA:
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700357 if (smd_read_avail(smd_xprtp->channel))
358 queue_delayed_work(smd_xprtp->smd_xprt_wq,
359 &smd_xprtp->read_work, 0);
360 if (smd_write_avail(smd_xprtp->channel))
361 wake_up(&smd_xprtp->write_avail_wait_q);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600362 break;
363
364 case SMD_EVENT_OPEN:
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700365 spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
366 smd_xprtp->ss_reset = 0;
367 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600368 xprt_work = kmalloc(sizeof(struct msm_ipc_router_smd_xprt_work),
369 GFP_ATOMIC);
370 if (!xprt_work) {
371 pr_err("%s: Couldn't notify %d event to IPC Router\n",
372 __func__, event);
373 return;
374 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700375 xprt_work->xprt = &smd_xprtp->xprt;
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600376 INIT_WORK(&xprt_work->work, smd_xprt_open_event);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700377 queue_work(smd_xprtp->smd_xprt_wq, &xprt_work->work);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600378 break;
379
380 case SMD_EVENT_CLOSE:
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700381 spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
382 smd_xprtp->ss_reset = 1;
383 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
384 wake_up(&smd_xprtp->write_avail_wait_q);
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600385 xprt_work = kmalloc(sizeof(struct msm_ipc_router_smd_xprt_work),
386 GFP_ATOMIC);
387 if (!xprt_work) {
388 pr_err("%s: Couldn't notify %d event to IPC Router\n",
389 __func__, event);
390 return;
391 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700392 xprt_work->xprt = &smd_xprtp->xprt;
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600393 INIT_WORK(&xprt_work->work, smd_xprt_close_event);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700394 queue_work(smd_xprtp->smd_xprt_wq, &xprt_work->work);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600395 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 }
397}
398
Karthikeyan Ramasubramanianc29692c2012-06-04 14:46:02 -0600399static void *msm_ipc_load_subsystem(uint32_t edge)
400{
401 void *pil = NULL;
402 const char *peripheral;
403
404 peripheral = smd_edge_to_subsystem(edge);
405 if (peripheral) {
Stephen Boyd77db8bb2012-06-27 15:15:16 -0700406 pil = subsystem_get(peripheral);
Karthikeyan Ramasubramanianc29692c2012-06-04 14:46:02 -0600407 if (IS_ERR(pil)) {
408 pr_err("%s: Failed to load %s\n",
409 __func__, peripheral);
410 pil = NULL;
411 }
412 }
413 return pil;
414}
415
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416static int msm_ipc_router_smd_remote_probe(struct platform_device *pdev)
417{
418 int rc;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700419 int id; /*Index into the smd_xprt_cfg table*/
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420
Karthikeyan Ramasubramanianccc47262012-03-07 11:59:33 -0700421 id = find_smd_xprt_cfg(pdev);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700422 if (id < 0) {
423 pr_err("%s: called for unknown ch %s\n",
424 __func__, pdev->name);
425 return id;
426 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700428 smd_remote_xprt[id].smd_xprt_wq =
429 create_singlethread_workqueue(pdev->name);
430 if (!smd_remote_xprt[id].smd_xprt_wq) {
431 pr_err("%s: WQ creation failed for %s\n",
432 __func__, pdev->name);
433 return -EFAULT;
434 }
435
436 smd_remote_xprt[id].xprt.name = smd_xprt_cfg[id].xprt_name;
437 smd_remote_xprt[id].xprt.link_id = smd_xprt_cfg[id].link_id;
438 smd_remote_xprt[id].xprt.read_avail = NULL;
439 smd_remote_xprt[id].xprt.read = NULL;
440 smd_remote_xprt[id].xprt.write_avail =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441 msm_ipc_router_smd_remote_write_avail;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700442 smd_remote_xprt[id].xprt.write = msm_ipc_router_smd_remote_write;
443 smd_remote_xprt[id].xprt.close = msm_ipc_router_smd_remote_close;
444 smd_remote_xprt[id].xprt.priv = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700446 init_waitqueue_head(&smd_remote_xprt[id].write_avail_wait_q);
447 smd_remote_xprt[id].in_pkt = NULL;
448 smd_remote_xprt[id].is_partial_in_pkt = 0;
449 INIT_DELAYED_WORK(&smd_remote_xprt[id].read_work, smd_xprt_read_data);
450 spin_lock_init(&smd_remote_xprt[id].ss_reset_lock);
451 smd_remote_xprt[id].ss_reset = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700452
Karthikeyan Ramasubramanianc29692c2012-06-04 14:46:02 -0600453 smd_remote_xprt[id].pil = msm_ipc_load_subsystem(
454 smd_xprt_cfg[id].edge);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700455 rc = smd_named_open_on_edge(smd_xprt_cfg[id].ch_name,
456 smd_xprt_cfg[id].edge,
457 &smd_remote_xprt[id].channel,
458 &smd_remote_xprt[id],
459 msm_ipc_router_smd_remote_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460 if (rc < 0) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700461 pr_err("%s: Channel open failed for %s\n",
462 __func__, smd_xprt_cfg[id].ch_name);
Karthikeyan Ramasubramanianc29692c2012-06-04 14:46:02 -0600463 if (smd_remote_xprt[id].pil) {
Stephen Boyd77db8bb2012-06-27 15:15:16 -0700464 subsystem_put(smd_remote_xprt[id].pil);
Karthikeyan Ramasubramanianc29692c2012-06-04 14:46:02 -0600465 smd_remote_xprt[id].pil = NULL;
466 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700467 destroy_workqueue(smd_remote_xprt[id].smd_xprt_wq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468 return rc;
469 }
470
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700471 smd_disable_read_intr(smd_remote_xprt[id].channel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_RPCINIT);
474
475 return 0;
476}
477
Karthikeyan Ramasubramanian6b963bd2012-05-01 11:27:54 -0600478void *msm_ipc_load_default_node(void)
479{
480 void *pil = NULL;
481 const char *peripheral;
482
483 peripheral = smd_edge_to_subsystem(SMD_APPS_MODEM);
484 if (peripheral && !strncmp(peripheral, "modem", 6)) {
Stephen Boyd77db8bb2012-06-27 15:15:16 -0700485 pil = subsystem_get(peripheral);
Karthikeyan Ramasubramanian6b963bd2012-05-01 11:27:54 -0600486 if (IS_ERR(pil)) {
487 pr_err("%s: Failed to load %s\n",
488 __func__, peripheral);
489 pil = NULL;
490 }
491 }
492 return pil;
493}
494EXPORT_SYMBOL(msm_ipc_load_default_node);
495
496void msm_ipc_unload_default_node(void *pil)
497{
498 if (pil)
Stephen Boyd77db8bb2012-06-27 15:15:16 -0700499 subsystem_put(pil);
Karthikeyan Ramasubramanian6b963bd2012-05-01 11:27:54 -0600500}
501EXPORT_SYMBOL(msm_ipc_unload_default_node);
502
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700503static struct platform_driver msm_ipc_router_smd_remote_driver[] = {
504 {
505 .probe = msm_ipc_router_smd_remote_probe,
506 .driver = {
507 .name = "RPCRPY_CNTL",
508 .owner = THIS_MODULE,
509 },
510 },
511 {
512 .probe = msm_ipc_router_smd_remote_probe,
513 .driver = {
514 .name = "IPCRTR",
515 .owner = THIS_MODULE,
516 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 },
518};
519
520static int __init msm_ipc_router_smd_init(void)
521{
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700522 int i, ret, rc = 0;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700523 BUG_ON(ARRAY_SIZE(smd_xprt_cfg) != NUM_SMD_XPRTS);
Karthikeyan Ramasubramanianccc47262012-03-07 11:59:33 -0700524 for (i = 0; i < ARRAY_SIZE(msm_ipc_router_smd_remote_driver); i++) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700525 ret = platform_driver_register(
526 &msm_ipc_router_smd_remote_driver[i]);
527 if (ret) {
528 pr_err("%s: Failed to register platform driver for"
529 " xprt%d. Continuing...\n", __func__, i);
530 rc = ret;
531 }
532 }
533 return rc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534}
535
536module_init(msm_ipc_router_smd_init);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700537MODULE_DESCRIPTION("IPC Router SMD XPRT");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538MODULE_LICENSE("GPL v2");