blob: 52ce17f9e919c4c9d35efe05a5d5f04d845c4d3c [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/smux_loopback.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/types.h>
16#include <linux/err.h>
17#include <linux/workqueue.h>
18#include <linux/kfifo.h>
19#include <linux/slab.h>
20#include <linux/smux.h>
21#include "smux_private.h"
22
23#define SMUX_LOOP_FIFO_SIZE 128
24
25static void smux_loopback_rx_worker(struct work_struct *work);
26static struct workqueue_struct *smux_loopback_wq;
27static DECLARE_WORK(smux_loopback_work, smux_loopback_rx_worker);
28static struct kfifo smux_loop_pkt_fifo;
29static DEFINE_SPINLOCK(hw_fn_lock);
30
31/**
32 * Initialize loopback framework (called by n_smux.c).
33 */
34int smux_loopback_init(void)
35{
36 int ret = 0;
37
38 spin_lock_init(&hw_fn_lock);
39 smux_loopback_wq = create_singlethread_workqueue("smux_loopback_wq");
40 if (IS_ERR(smux_loopback_wq)) {
41 pr_err("%s: failed to create workqueue\n", __func__);
42 return -ENOMEM;
43 }
44
45 ret |= kfifo_alloc(&smux_loop_pkt_fifo,
46 SMUX_LOOP_FIFO_SIZE * sizeof(struct smux_pkt_t *),
47 GFP_KERNEL);
48
49 return ret;
50}
51
52/**
53 * Simulate a write to the TTY hardware by duplicating
54 * the TX packet and putting it into the RX queue.
55 *
56 * @pkt Packet to write
57 *
58 * @returns 0 on success
59 */
60int smux_tx_loopback(struct smux_pkt_t *pkt_ptr)
61{
62 struct smux_pkt_t *send_pkt;
63 unsigned long flags;
64 int i;
65 int ret;
66
67 /* duplicate packet */
68 send_pkt = smux_alloc_pkt();
69 send_pkt->hdr = pkt_ptr->hdr;
70 if (pkt_ptr->hdr.payload_len) {
71 ret = smux_alloc_pkt_payload(send_pkt);
72 if (ret) {
73 ret = -ENOMEM;
74 goto out;
75 }
76 memcpy(send_pkt->payload, pkt_ptr->payload,
77 pkt_ptr->hdr.payload_len);
78 }
79
80 /* queue duplicate as pseudo-RX data */
81 spin_lock_irqsave(&hw_fn_lock, flags);
82 i = kfifo_avail(&smux_loop_pkt_fifo);
83 if (i < sizeof(struct smux_pkt_t *)) {
84 pr_err("%s: no space in fifo\n", __func__);
85 ret = -ENOMEM;
86 goto unlock;
87 }
88
89 i = kfifo_in(&smux_loop_pkt_fifo,
90 &send_pkt,
91 sizeof(struct smux_pkt_t *));
92 if (i < 0) {
93 pr_err("%s: fifo error\n", __func__);
94 ret = -ENOMEM;
95 goto unlock;
96 }
97 queue_work(smux_loopback_wq, &smux_loopback_work);
98 ret = 0;
99
100unlock:
101 spin_unlock_irqrestore(&hw_fn_lock, flags);
102out:
103 return ret;
104}
105
106/**
107 * Receive loopback byte processor.
108 *
109 * @pkt Incoming packet
110 */
111static void smux_loopback_rx_byte(struct smux_pkt_t *pkt)
112{
113 static int simulated_retry_cnt;
114 const char ack = SMUX_WAKEUP_ACK;
115
116 switch (pkt->hdr.flags) {
117 case SMUX_WAKEUP_REQ:
118 /* reply with ACK after appropriate delays */
119 ++simulated_retry_cnt;
120 if (simulated_retry_cnt >= smux_simulate_wakeup_delay) {
121 pr_err("%s: completed %d of %d\n",
122 __func__, simulated_retry_cnt,
123 smux_simulate_wakeup_delay);
124 pr_err("%s: simulated wakeup\n", __func__);
125 simulated_retry_cnt = 0;
126 smux_rx_state_machine(&ack, 1, 0);
127 } else {
128 /* force retry */
129 pr_err("%s: dropping wakeup request %d of %d\n",
130 __func__, simulated_retry_cnt,
131 smux_simulate_wakeup_delay);
132 }
133 break;
134 case SMUX_WAKEUP_ACK:
135 /* this shouldn't happen since we don't send requests */
136 pr_err("%s: wakeup ACK unexpected\n", __func__);
137 break;
138
139 default:
140 /* invalid character */
141 pr_err("%s: invalid character 0x%x\n",
142 __func__, (unsigned)pkt->hdr.flags);
143 break;
144 }
145}
146
147/**
148 * Simulated remote hardware used for local loopback testing.
149 *
150 * @work Not used
151 */
152static void smux_loopback_rx_worker(struct work_struct *work)
153{
154 struct smux_pkt_t *pkt;
155 struct smux_pkt_t reply_pkt;
156 char *data;
157 int len;
158 int lcid;
159 int i;
160 unsigned long flags;
161
162 data = kzalloc(SMUX_MAX_PKT_SIZE, GFP_ATOMIC);
163
164 spin_lock_irqsave(&hw_fn_lock, flags);
165 while (kfifo_len(&smux_loop_pkt_fifo) >= sizeof(struct smux_pkt_t *)) {
166 i = kfifo_out(&smux_loop_pkt_fifo, &pkt,
167 sizeof(struct smux_pkt_t *));
168 spin_unlock_irqrestore(&hw_fn_lock, flags);
169
170 if (pkt->hdr.magic != SMUX_MAGIC) {
171 pr_err("%s: invalid magic %x\n", __func__,
172 pkt->hdr.magic);
173 return;
174 }
175
176 lcid = pkt->hdr.lcid;
177 if (smux_assert_lch_id(lcid)) {
178 pr_err("%s: invalid channel id %d\n", __func__, lcid);
179 return;
180 }
181
182 switch (pkt->hdr.cmd) {
183 case SMUX_CMD_OPEN_LCH:
184 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
185 break;
186
187 /* Reply with Open ACK */
188 smux_init_pkt(&reply_pkt);
189 reply_pkt.hdr.lcid = lcid;
190 reply_pkt.hdr.cmd = SMUX_CMD_OPEN_LCH;
191 reply_pkt.hdr.flags = SMUX_CMD_OPEN_ACK
192 | SMUX_CMD_OPEN_POWER_COLLAPSE;
193 reply_pkt.hdr.payload_len = 0;
194 reply_pkt.hdr.pad_len = 0;
195 smux_serialize(&reply_pkt, data, &len);
196 smux_rx_state_machine(data, len, 0);
197
198 /* Send Remote Open */
199 smux_init_pkt(&reply_pkt);
200 reply_pkt.hdr.lcid = lcid;
201 reply_pkt.hdr.cmd = SMUX_CMD_OPEN_LCH;
202 reply_pkt.hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
203 reply_pkt.hdr.payload_len = 0;
204 reply_pkt.hdr.pad_len = 0;
205 smux_serialize(&reply_pkt, data, &len);
206 smux_rx_state_machine(data, len, 0);
207 break;
208
209 case SMUX_CMD_CLOSE_LCH:
210 if (pkt->hdr.flags == SMUX_CMD_CLOSE_ACK)
211 break;
212
213 /* Reply with Close ACK */
214 smux_init_pkt(&reply_pkt);
215 reply_pkt.hdr.lcid = lcid;
216 reply_pkt.hdr.cmd = SMUX_CMD_CLOSE_LCH;
217 reply_pkt.hdr.flags = SMUX_CMD_CLOSE_ACK;
218 reply_pkt.hdr.payload_len = 0;
219 reply_pkt.hdr.pad_len = 0;
220 smux_serialize(&reply_pkt, data, &len);
221 smux_rx_state_machine(data, len, 0);
222
223 /* Send Remote Close */
224 smux_init_pkt(&reply_pkt);
225 reply_pkt.hdr.lcid = lcid;
226 reply_pkt.hdr.cmd = SMUX_CMD_CLOSE_LCH;
227 reply_pkt.hdr.flags = 0;
228 reply_pkt.hdr.payload_len = 0;
229 reply_pkt.hdr.pad_len = 0;
230 smux_serialize(&reply_pkt, data, &len);
231 smux_rx_state_machine(data, len, 0);
232 break;
233
234 case SMUX_CMD_DATA:
235 /* Echo back received data */
236 smux_init_pkt(&reply_pkt);
237 reply_pkt.hdr.lcid = lcid;
238 reply_pkt.hdr.cmd = SMUX_CMD_DATA;
239 reply_pkt.hdr.flags = 0;
240 reply_pkt.hdr.payload_len = pkt->hdr.payload_len;
241 reply_pkt.payload = pkt->payload;
242 reply_pkt.hdr.pad_len = pkt->hdr.pad_len;
243 smux_serialize(&reply_pkt, data, &len);
244 smux_rx_state_machine(data, len, 0);
245 break;
246
247 case SMUX_CMD_STATUS:
248 /* Echo back received status */
249 smux_init_pkt(&reply_pkt);
250 reply_pkt.hdr.lcid = lcid;
251 reply_pkt.hdr.cmd = SMUX_CMD_STATUS;
252 reply_pkt.hdr.flags = pkt->hdr.flags;
253 reply_pkt.hdr.payload_len = 0;
254 reply_pkt.payload = NULL;
255 reply_pkt.hdr.pad_len = pkt->hdr.pad_len;
256 smux_serialize(&reply_pkt, data, &len);
257 smux_rx_state_machine(data, len, 0);
258 break;
259
260 case SMUX_CMD_PWR_CTL:
261 /* reply with ack */
262 smux_init_pkt(&reply_pkt);
263 reply_pkt.hdr.lcid = lcid;
264 reply_pkt.hdr.cmd = SMUX_CMD_PWR_CTL;
265 reply_pkt.hdr.flags = SMUX_CMD_PWR_CTL_SLEEP_REQ
266 | SMUX_CMD_PWR_CTL_ACK;
267 reply_pkt.hdr.payload_len = 0;
268 reply_pkt.payload = NULL;
269 reply_pkt.hdr.pad_len = pkt->hdr.pad_len;
270 smux_serialize(&reply_pkt, data, &len);
271 smux_rx_state_machine(data, len, 0);
272 break;
273
274 case SMUX_CMD_BYTE:
275 smux_loopback_rx_byte(pkt);
276 break;
277
278 default:
279 pr_err("%s: unknown command %d\n",
280 __func__, pkt->hdr.cmd);
281 break;
282 };
283
284 smux_free_pkt(pkt);
285 spin_lock_irqsave(&hw_fn_lock, flags);
286 }
287 spin_unlock_irqrestore(&hw_fn_lock, flags);
288 kfree(data);
289}