blob: 307b6aeef08a4c8df48f3ac8991c56dcf070c38c [file] [log] [blame]
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * IPC ROUTER SMD XPRT module.
15 */
16#define DEBUG
17
18#include <linux/platform_device.h>
19#include <linux/types.h>
20
21#include <mach/msm_smd.h>
Karthikeyan Ramasubramanian6b963bd2012-05-01 11:27:54 -060022#include <mach/peripheral-loader.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023
24#include "ipc_router.h"
25#include "smd_private.h"
26
27static int msm_ipc_router_smd_xprt_debug_mask;
28module_param_named(debug_mask, msm_ipc_router_smd_xprt_debug_mask,
29 int, S_IRUGO | S_IWUSR | S_IWGRP);
30
31#if defined(DEBUG)
32#define D(x...) do { \
33if (msm_ipc_router_smd_xprt_debug_mask) \
34 pr_info(x); \
35} while (0)
36#else
37#define D(x...) do { } while (0)
38#endif
39
40#define MIN_FRAG_SZ (IPC_ROUTER_HDR_SIZE + sizeof(union rr_control_msg))
41
Karthikeyan Ramasubramanianccc47262012-03-07 11:59:33 -070042#define NUM_SMD_XPRTS 3
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070043#define XPRT_NAME_LEN (SMD_MAX_CH_NAME_LEN + 12)
44
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045struct msm_ipc_router_smd_xprt {
46 struct msm_ipc_router_xprt xprt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047 smd_channel_t *channel;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070048 struct workqueue_struct *smd_xprt_wq;
49 wait_queue_head_t write_avail_wait_q;
50 struct rr_packet *in_pkt;
51 int is_partial_in_pkt;
52 struct delayed_work read_work;
53 spinlock_t ss_reset_lock; /*Subsystem reset lock*/
54 int ss_reset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055};
56
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -060057struct msm_ipc_router_smd_xprt_work {
58 struct msm_ipc_router_xprt *xprt;
59 struct work_struct work;
60};
61
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062static void smd_xprt_read_data(struct work_struct *work);
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -060063static void smd_xprt_open_event(struct work_struct *work);
64static void smd_xprt_close_event(struct work_struct *work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070066struct msm_ipc_router_smd_xprt_config {
67 char ch_name[SMD_MAX_CH_NAME_LEN];
68 char xprt_name[XPRT_NAME_LEN];
69 uint32_t edge;
70 uint32_t link_id;
71};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070073struct msm_ipc_router_smd_xprt_config smd_xprt_cfg[] = {
74 {"RPCRPY_CNTL", "ipc_rtr_smd_rpcrpy_cntl", SMD_APPS_MODEM, 1},
75 {"IPCRTR", "ipc_rtr_smd_ipcrtr", SMD_APPS_MODEM, 1},
Karthikeyan Ramasubramanianccc47262012-03-07 11:59:33 -070076 {"IPCRTR", "ipc_rtr_q6_ipcrtr", SMD_APPS_QDSP, 1},
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070077};
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -060078
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070079static struct msm_ipc_router_smd_xprt smd_remote_xprt[NUM_SMD_XPRTS];
80
Karthikeyan Ramasubramanianccc47262012-03-07 11:59:33 -070081static int find_smd_xprt_cfg(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082{
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070083 int i;
84
85 for (i = 0; i < NUM_SMD_XPRTS; i++) {
Karthikeyan Ramasubramanianccc47262012-03-07 11:59:33 -070086 if (!strncmp(pdev->name, smd_xprt_cfg[i].ch_name, 20) &&
87 (pdev->id == smd_xprt_cfg[i].edge))
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -070088 return i;
89 }
90
91 return -ENODEV;
92}
93
94static int msm_ipc_router_smd_remote_write_avail(
95 struct msm_ipc_router_xprt *xprt)
96{
97 struct msm_ipc_router_smd_xprt *smd_xprtp =
98 container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
99
100 return smd_write_avail(smd_xprtp->channel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101}
102
103static int msm_ipc_router_smd_remote_write(void *data,
104 uint32_t len,
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700105 struct msm_ipc_router_xprt *xprt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106{
107 struct rr_packet *pkt = (struct rr_packet *)data;
108 struct sk_buff *ipc_rtr_pkt;
109 int align_sz, align_data = 0;
110 int offset, sz_written = 0;
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600111 int ret, num_retries = 0;
112 unsigned long flags;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700113 struct msm_ipc_router_smd_xprt *smd_xprtp =
114 container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700115
116 if (!pkt)
117 return -EINVAL;
118
119 if (!len || pkt->length != len)
120 return -EINVAL;
121
122 align_sz = ALIGN_SIZE(pkt->length);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700123 while ((ret = smd_write_start(smd_xprtp->channel,
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600124 (len + align_sz))) < 0) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700125 spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
126 if (smd_xprtp->ss_reset) {
127 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock,
128 flags);
129 pr_err("%s: %s chnl reset\n", __func__, xprt->name);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600130 return -ENETRESET;
131 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700132 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600133 if (num_retries >= 5) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700134 pr_err("%s: Error %d @smd_write_start for %s\n",
135 __func__, ret, xprt->name);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600136 return ret;
137 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138 msleep(50);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700139 num_retries++;
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600140 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141
142 D("%s: Ready to write\n", __func__);
143 skb_queue_walk(pkt->pkt_fragment_q, ipc_rtr_pkt) {
144 offset = 0;
145 while (offset < ipc_rtr_pkt->len) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700146 if (!smd_write_avail(smd_xprtp->channel))
147 smd_enable_read_intr(smd_xprtp->channel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700148
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700149 wait_event(smd_xprtp->write_avail_wait_q,
150 (smd_write_avail(smd_xprtp->channel) ||
151 smd_xprtp->ss_reset));
152 smd_disable_read_intr(smd_xprtp->channel);
153 spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
154 if (smd_xprtp->ss_reset) {
155 spin_unlock_irqrestore(
156 &smd_xprtp->ss_reset_lock, flags);
157 pr_err("%s: %s chnl reset\n",
158 __func__, xprt->name);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600159 return -ENETRESET;
160 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700161 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock,
162 flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700163
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700164 sz_written = smd_write_segment(smd_xprtp->channel,
165 ipc_rtr_pkt->data + offset,
166 (ipc_rtr_pkt->len - offset), 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167 offset += sz_written;
168 sz_written = 0;
169 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700170 D("%s: Wrote %d bytes over %s\n",
171 __func__, offset, xprt->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172 }
173
174 if (align_sz) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700175 if (smd_write_avail(smd_xprtp->channel) < align_sz)
176 smd_enable_read_intr(smd_xprtp->channel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700178 wait_event(smd_xprtp->write_avail_wait_q,
179 ((smd_write_avail(smd_xprtp->channel) >=
180 align_sz) || smd_xprtp->ss_reset));
181 smd_disable_read_intr(smd_xprtp->channel);
182 spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
183 if (smd_xprtp->ss_reset) {
184 spin_unlock_irqrestore(
185 &smd_xprtp->ss_reset_lock, flags);
186 pr_err("%s: %s chnl reset\n",
187 __func__, xprt->name);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600188 return -ENETRESET;
189 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700190 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock,
191 flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700193 smd_write_segment(smd_xprtp->channel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 &align_data, align_sz, 0);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700195 D("%s: Wrote %d align bytes over %s\n",
196 __func__, align_sz, xprt->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700198 if (!smd_write_end(smd_xprtp->channel))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199 D("%s: Finished writing\n", __func__);
200 return len;
201}
202
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700203static int msm_ipc_router_smd_remote_close(struct msm_ipc_router_xprt *xprt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204{
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700205 struct msm_ipc_router_smd_xprt *smd_xprtp =
206 container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
207
208 return smd_close(smd_xprtp->channel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209}
210
211static void smd_xprt_read_data(struct work_struct *work)
212{
213 int pkt_size, sz_read, sz;
214 struct sk_buff *ipc_rtr_pkt;
215 void *data;
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600216 unsigned long flags;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700217 struct delayed_work *rwork = to_delayed_work(work);
218 struct msm_ipc_router_smd_xprt *smd_xprtp =
219 container_of(rwork, struct msm_ipc_router_smd_xprt, read_work);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600220
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700221 spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
222 if (smd_xprtp->ss_reset) {
223 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
224 if (smd_xprtp->in_pkt)
225 release_pkt(smd_xprtp->in_pkt);
226 smd_xprtp->is_partial_in_pkt = 0;
227 pr_err("%s: %s channel reset\n",
228 __func__, smd_xprtp->xprt.name);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600229 return;
230 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700231 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232
233 D("%s pkt_size: %d, read_avail: %d\n", __func__,
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700234 smd_cur_packet_size(smd_xprtp->channel),
235 smd_read_avail(smd_xprtp->channel));
236 while ((pkt_size = smd_cur_packet_size(smd_xprtp->channel)) &&
237 smd_read_avail(smd_xprtp->channel)) {
238 if (!smd_xprtp->is_partial_in_pkt) {
239 smd_xprtp->in_pkt = kzalloc(sizeof(struct rr_packet),
240 GFP_KERNEL);
241 if (!smd_xprtp->in_pkt) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242 pr_err("%s: Couldn't alloc rr_packet\n",
243 __func__);
244 return;
245 }
246
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700247 smd_xprtp->in_pkt->pkt_fragment_q =
248 kmalloc(sizeof(struct sk_buff_head),
249 GFP_KERNEL);
250 if (!smd_xprtp->in_pkt->pkt_fragment_q) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251 pr_err("%s: Couldn't alloc pkt_fragment_q\n",
252 __func__);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700253 kfree(smd_xprtp->in_pkt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254 return;
255 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700256 skb_queue_head_init(smd_xprtp->in_pkt->pkt_fragment_q);
257 smd_xprtp->is_partial_in_pkt = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258 D("%s: Allocated rr_packet\n", __func__);
259 }
260
Karthikeyan Ramasubramanian51247a02011-10-12 14:53:15 -0600261 if (((pkt_size >= MIN_FRAG_SZ) &&
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700262 (smd_read_avail(smd_xprtp->channel) < MIN_FRAG_SZ)) ||
Karthikeyan Ramasubramanian51247a02011-10-12 14:53:15 -0600263 ((pkt_size < MIN_FRAG_SZ) &&
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700264 (smd_read_avail(smd_xprtp->channel) < pkt_size)))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 return;
266
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700267 sz = smd_read_avail(smd_xprtp->channel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 do {
269 ipc_rtr_pkt = alloc_skb(sz, GFP_KERNEL);
270 if (!ipc_rtr_pkt) {
271 if (sz <= (PAGE_SIZE/2)) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700272 queue_delayed_work(
273 smd_xprtp->smd_xprt_wq,
274 &smd_xprtp->read_work,
275 msecs_to_jiffies(100));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276 return;
277 }
278 sz = sz / 2;
279 }
280 } while (!ipc_rtr_pkt);
281
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700282 D("%s: Allocated the sk_buff of size %d\n", __func__, sz);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 data = skb_put(ipc_rtr_pkt, sz);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700284 sz_read = smd_read(smd_xprtp->channel, data, sz);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 if (sz_read != sz) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700286 pr_err("%s: Couldn't read %s completely\n",
287 __func__, smd_xprtp->xprt.name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288 kfree_skb(ipc_rtr_pkt);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700289 release_pkt(smd_xprtp->in_pkt);
290 smd_xprtp->is_partial_in_pkt = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291 return;
292 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700293 skb_queue_tail(smd_xprtp->in_pkt->pkt_fragment_q, ipc_rtr_pkt);
294 smd_xprtp->in_pkt->length += sz_read;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295 if (sz_read != pkt_size)
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700296 smd_xprtp->is_partial_in_pkt = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297 else
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700298 smd_xprtp->is_partial_in_pkt = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700300 if (!smd_xprtp->is_partial_in_pkt) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301 D("%s: Packet size read %d\n",
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700302 __func__, smd_xprtp->in_pkt->length);
303 msm_ipc_router_xprt_notify(&smd_xprtp->xprt,
304 IPC_ROUTER_XPRT_EVENT_DATA,
305 (void *)smd_xprtp->in_pkt);
306 release_pkt(smd_xprtp->in_pkt);
307 smd_xprtp->in_pkt = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308 }
309 }
310}
311
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600312static void smd_xprt_open_event(struct work_struct *work)
313{
314 struct msm_ipc_router_smd_xprt_work *xprt_work =
315 container_of(work, struct msm_ipc_router_smd_xprt_work, work);
316
317 msm_ipc_router_xprt_notify(xprt_work->xprt,
318 IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700319 D("%s: Notified IPC Router of %s OPEN\n",
320 __func__, xprt_work->xprt->name);
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600321 kfree(xprt_work);
322}
323
324static void smd_xprt_close_event(struct work_struct *work)
325{
326 struct msm_ipc_router_smd_xprt_work *xprt_work =
327 container_of(work, struct msm_ipc_router_smd_xprt_work, work);
328
329 msm_ipc_router_xprt_notify(xprt_work->xprt,
330 IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700331 D("%s: Notified IPC Router of %s CLOSE\n",
332 __func__, xprt_work->xprt->name);
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600333 kfree(xprt_work);
334}
335
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336static void msm_ipc_router_smd_remote_notify(void *_dev, unsigned event)
337{
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600338 unsigned long flags;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700339 struct msm_ipc_router_smd_xprt *smd_xprtp;
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600340 struct msm_ipc_router_smd_xprt_work *xprt_work;
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600341
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700342 smd_xprtp = (struct msm_ipc_router_smd_xprt *)_dev;
343 if (!smd_xprtp)
344 return;
345
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600346 switch (event) {
347 case SMD_EVENT_DATA:
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700348 if (smd_read_avail(smd_xprtp->channel))
349 queue_delayed_work(smd_xprtp->smd_xprt_wq,
350 &smd_xprtp->read_work, 0);
351 if (smd_write_avail(smd_xprtp->channel))
352 wake_up(&smd_xprtp->write_avail_wait_q);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600353 break;
354
355 case SMD_EVENT_OPEN:
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700356 spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
357 smd_xprtp->ss_reset = 0;
358 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600359 xprt_work = kmalloc(sizeof(struct msm_ipc_router_smd_xprt_work),
360 GFP_ATOMIC);
361 if (!xprt_work) {
362 pr_err("%s: Couldn't notify %d event to IPC Router\n",
363 __func__, event);
364 return;
365 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700366 xprt_work->xprt = &smd_xprtp->xprt;
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600367 INIT_WORK(&xprt_work->work, smd_xprt_open_event);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700368 queue_work(smd_xprtp->smd_xprt_wq, &xprt_work->work);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600369 break;
370
371 case SMD_EVENT_CLOSE:
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700372 spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
373 smd_xprtp->ss_reset = 1;
374 spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
375 wake_up(&smd_xprtp->write_avail_wait_q);
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600376 xprt_work = kmalloc(sizeof(struct msm_ipc_router_smd_xprt_work),
377 GFP_ATOMIC);
378 if (!xprt_work) {
379 pr_err("%s: Couldn't notify %d event to IPC Router\n",
380 __func__, event);
381 return;
382 }
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700383 xprt_work->xprt = &smd_xprtp->xprt;
Karthikeyan Ramasubramaniandd7daab2011-09-30 15:16:59 -0600384 INIT_WORK(&xprt_work->work, smd_xprt_close_event);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700385 queue_work(smd_xprtp->smd_xprt_wq, &xprt_work->work);
Karthikeyan Ramasubramanianff6fbae2011-06-09 11:13:19 -0600386 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 }
388}
389
390static int msm_ipc_router_smd_remote_probe(struct platform_device *pdev)
391{
392 int rc;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700393 int id; /*Index into the smd_xprt_cfg table*/
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394
Karthikeyan Ramasubramanianccc47262012-03-07 11:59:33 -0700395 id = find_smd_xprt_cfg(pdev);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700396 if (id < 0) {
397 pr_err("%s: called for unknown ch %s\n",
398 __func__, pdev->name);
399 return id;
400 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700402 smd_remote_xprt[id].smd_xprt_wq =
403 create_singlethread_workqueue(pdev->name);
404 if (!smd_remote_xprt[id].smd_xprt_wq) {
405 pr_err("%s: WQ creation failed for %s\n",
406 __func__, pdev->name);
407 return -EFAULT;
408 }
409
410 smd_remote_xprt[id].xprt.name = smd_xprt_cfg[id].xprt_name;
411 smd_remote_xprt[id].xprt.link_id = smd_xprt_cfg[id].link_id;
412 smd_remote_xprt[id].xprt.read_avail = NULL;
413 smd_remote_xprt[id].xprt.read = NULL;
414 smd_remote_xprt[id].xprt.write_avail =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415 msm_ipc_router_smd_remote_write_avail;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700416 smd_remote_xprt[id].xprt.write = msm_ipc_router_smd_remote_write;
417 smd_remote_xprt[id].xprt.close = msm_ipc_router_smd_remote_close;
418 smd_remote_xprt[id].xprt.priv = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700419
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700420 init_waitqueue_head(&smd_remote_xprt[id].write_avail_wait_q);
421 smd_remote_xprt[id].in_pkt = NULL;
422 smd_remote_xprt[id].is_partial_in_pkt = 0;
423 INIT_DELAYED_WORK(&smd_remote_xprt[id].read_work, smd_xprt_read_data);
424 spin_lock_init(&smd_remote_xprt[id].ss_reset_lock);
425 smd_remote_xprt[id].ss_reset = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700426
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700427 rc = smd_named_open_on_edge(smd_xprt_cfg[id].ch_name,
428 smd_xprt_cfg[id].edge,
429 &smd_remote_xprt[id].channel,
430 &smd_remote_xprt[id],
431 msm_ipc_router_smd_remote_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432 if (rc < 0) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700433 pr_err("%s: Channel open failed for %s\n",
434 __func__, smd_xprt_cfg[id].ch_name);
435 destroy_workqueue(smd_remote_xprt[id].smd_xprt_wq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436 return rc;
437 }
438
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700439 smd_disable_read_intr(smd_remote_xprt[id].channel);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_RPCINIT);
442
443 return 0;
444}
445
Karthikeyan Ramasubramanian6b963bd2012-05-01 11:27:54 -0600446void *msm_ipc_load_default_node(void)
447{
448 void *pil = NULL;
449 const char *peripheral;
450
451 peripheral = smd_edge_to_subsystem(SMD_APPS_MODEM);
452 if (peripheral && !strncmp(peripheral, "modem", 6)) {
453 pil = pil_get(peripheral);
454 if (IS_ERR(pil)) {
455 pr_err("%s: Failed to load %s\n",
456 __func__, peripheral);
457 pil = NULL;
458 }
459 }
460 return pil;
461}
462EXPORT_SYMBOL(msm_ipc_load_default_node);
463
464void msm_ipc_unload_default_node(void *pil)
465{
466 if (pil)
467 pil_put(pil);
468}
469EXPORT_SYMBOL(msm_ipc_unload_default_node);
470
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700471static struct platform_driver msm_ipc_router_smd_remote_driver[] = {
472 {
473 .probe = msm_ipc_router_smd_remote_probe,
474 .driver = {
475 .name = "RPCRPY_CNTL",
476 .owner = THIS_MODULE,
477 },
478 },
479 {
480 .probe = msm_ipc_router_smd_remote_probe,
481 .driver = {
482 .name = "IPCRTR",
483 .owner = THIS_MODULE,
484 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 },
486};
487
488static int __init msm_ipc_router_smd_init(void)
489{
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700490 int i, ret, rc = 0;
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700491 BUG_ON(ARRAY_SIZE(smd_xprt_cfg) != NUM_SMD_XPRTS);
Karthikeyan Ramasubramanianccc47262012-03-07 11:59:33 -0700492 for (i = 0; i < ARRAY_SIZE(msm_ipc_router_smd_remote_driver); i++) {
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700493 ret = platform_driver_register(
494 &msm_ipc_router_smd_remote_driver[i]);
495 if (ret) {
496 pr_err("%s: Failed to register platform driver for"
497 " xprt%d. Continuing...\n", __func__, i);
498 rc = ret;
499 }
500 }
501 return rc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502}
503
504module_init(msm_ipc_router_smd_init);
Karthikeyan Ramasubramanian8cec5922012-02-16 17:41:58 -0700505MODULE_DESCRIPTION("IPC Router SMD XPRT");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506MODULE_LICENSE("GPL v2");