blob: 182cd4001645655dbc32c23b73ad25c192618fc5 [file] [log] [blame]
Bar Weiner0dae81b2013-02-14 13:53:54 +02001/*
2 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/wait.h>
15#include <linux/poll.h>
16#include <linux/usb/rmnet_ctrl_qti.h>
17
18#include "u_rmnet.h"
19
20struct rmnet_ctrl_qti_port {
21 struct grmnet *port_usb;
22
23 bool is_open;
24
25 atomic_t connected;
26 atomic_t line_state;
27
28 atomic_t open_excl;
29 atomic_t read_excl;
30 atomic_t write_excl;
31 atomic_t ioctl_excl;
32
33 wait_queue_head_t read_wq;
34
35 struct list_head cpkt_req_q;
36
37 spinlock_t lock;
38};
39static struct rmnet_ctrl_qti_port *ctrl_port;
40
41static inline int rmnet_ctrl_lock(atomic_t *excl)
42{
43 if (atomic_inc_return(excl) == 1) {
44 return 0;
45 } else {
46 atomic_dec(excl);
47 return -EBUSY;
48 }
49}
50
51static inline void rmnet_ctrl_unlock(atomic_t *excl)
52{
53 atomic_dec(excl);
54}
55
56static void rmnet_ctrl_queue_notify(struct rmnet_ctrl_qti_port *port)
57{
58 unsigned long flags;
59 struct rmnet_ctrl_pkt *cpkt = NULL;
60
61 pr_debug("%s: Queue empty packet for QTI", __func__);
62
63 spin_lock_irqsave(&port->lock, flags);
64 if (!port->is_open) {
65 pr_err("%s: rmnet ctrl file handler %p is not open",
66 __func__, port);
67 spin_unlock_irqrestore(&port->lock, flags);
68 return;
69 }
70
71 cpkt = alloc_rmnet_ctrl_pkt(0, GFP_ATOMIC);
72 if (!cpkt) {
73 pr_err("%s: Unable to allocate reset function pkt\n", __func__);
74 spin_unlock_irqrestore(&port->lock, flags);
75 return;
76 }
77
78 list_add_tail(&cpkt->list, &port->cpkt_req_q);
79 spin_unlock_irqrestore(&port->lock, flags);
80
81 pr_debug("%s: Wake up read queue", __func__);
82 wake_up(&port->read_wq);
83}
84
85static int grmnet_ctrl_qti_send_cpkt_tomodem(u8 portno,
86 void *buf, size_t len)
87{
88 unsigned long flags;
89 struct rmnet_ctrl_qti_port *port = ctrl_port;
90 struct rmnet_ctrl_pkt *cpkt;
91
92 if (len > MAX_QTI_PKT_SIZE) {
93 pr_err("given pkt size too big:%d > max_pkt_size:%d\n",
94 len, MAX_QTI_PKT_SIZE);
95 return -EINVAL;
96 }
97
98 cpkt = alloc_rmnet_ctrl_pkt(len, GFP_ATOMIC);
99 if (IS_ERR(cpkt)) {
100 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
101 return -ENOMEM;
102 }
103
104 memcpy(cpkt->buf, buf, len);
105 cpkt->len = len;
106
107 pr_debug("%s: Add to cpkt_req_q packet with len = %d\n", __func__, len);
108 spin_lock_irqsave(&port->lock, flags);
109
110 /* drop cpkt if port is not open */
111 if (!port->is_open) {
112 pr_err("rmnet file handler %p is not open", port);
113 spin_unlock_irqrestore(&port->lock, flags);
114 free_rmnet_ctrl_pkt(cpkt);
115 return 0;
116 }
117
118 list_add_tail(&cpkt->list, &port->cpkt_req_q);
119 spin_unlock_irqrestore(&port->lock, flags);
120
121 /* wakeup read thread */
122 pr_debug("%s: Wake up read queue", __func__);
123 wake_up(&port->read_wq);
124
125 return 0;
126}
127
128static void
129gqti_ctrl_notify_modem(void *gptr, u8 portno, int val)
130{
131 struct rmnet_ctrl_qti_port *port = ctrl_port;
132
133 atomic_set(&port->line_state, val);
134
135 /* send 0 len pkt to qti to notify state change */
136 rmnet_ctrl_queue_notify(port);
137}
138
139int gqti_ctrl_connect(struct grmnet *gr)
140{
141 struct rmnet_ctrl_qti_port *port;
142 unsigned long flags;
143
144 pr_debug("%s: grmnet:%p\n", __func__, gr);
145
146 if (!gr) {
147 pr_err("%s: grmnet port is null\n", __func__);
148 return -ENODEV;
149 }
150
151 port = ctrl_port;
152
153 spin_lock_irqsave(&port->lock, flags);
154 port->port_usb = gr;
155 gr->send_encap_cmd = grmnet_ctrl_qti_send_cpkt_tomodem;
156 gr->notify_modem = gqti_ctrl_notify_modem;
157 spin_unlock_irqrestore(&port->lock, flags);
158
159 atomic_set(&port->connected, 1);
160 wake_up(&port->read_wq);
161
162 if (port && port->port_usb && port->port_usb->connect)
163 port->port_usb->connect(port->port_usb);
164
165 return 0;
166}
167
168void gqti_ctrl_disconnect(struct grmnet *gr)
169{
170 struct rmnet_ctrl_qti_port *port = ctrl_port;
171 unsigned long flags;
172 struct rmnet_ctrl_pkt *cpkt;
173
174 pr_debug("%s: grmnet:%p\n", __func__, gr);
175
176 if (!gr) {
177 pr_err("%s: grmnet port is null\n", __func__);
178 return;
179 }
180
181 if (port && port->port_usb && port->port_usb->disconnect)
182 port->port_usb->disconnect(port->port_usb);
183
184 atomic_set(&port->connected, 0);
185 atomic_set(&port->line_state, 0);
186 spin_lock_irqsave(&port->lock, flags);
187 port->port_usb = 0;
188 gr->send_encap_cmd = 0;
189 gr->notify_modem = 0;
190
191 while (!list_empty(&port->cpkt_req_q)) {
192 cpkt = list_first_entry(&port->cpkt_req_q,
193 struct rmnet_ctrl_pkt, list);
194
195 list_del(&cpkt->list);
196 free_rmnet_ctrl_pkt(cpkt);
197 }
198
199 spin_unlock_irqrestore(&port->lock, flags);
200
201 /* send 0 len pkt to qti to notify state change */
202 rmnet_ctrl_queue_notify(port);
203}
204
205static int rmnet_ctrl_open(struct inode *ip, struct file *fp)
206{
207 unsigned long flags;
208
209 pr_debug("Open rmnet_ctrl_qti device file\n");
210
211 if (rmnet_ctrl_lock(&ctrl_port->open_excl)) {
212 pr_debug("Already opened\n");
213 return -EBUSY;
214 }
215
216 fp->private_data = ctrl_port;
217
218 spin_lock_irqsave(&ctrl_port->lock, flags);
219 ctrl_port->is_open = true;
220 spin_unlock_irqrestore(&ctrl_port->lock, flags);
221
222 return 0;
223}
224
225static int rmnet_ctrl_release(struct inode *ip, struct file *fp)
226{
227 unsigned long flags;
228 struct rmnet_ctrl_qti_port *port = fp->private_data;
229
230 pr_debug("Close rmnet control file");
231
232 spin_lock_irqsave(&port->lock, flags);
233 port->is_open = false;
234 spin_unlock_irqrestore(&port->lock, flags);
235
236 rmnet_ctrl_unlock(&port->open_excl);
237
238 return 0;
239}
240
241static ssize_t
242rmnet_ctrl_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
243{
244 struct rmnet_ctrl_qti_port *port = fp->private_data;
245 struct rmnet_ctrl_pkt *cpkt = NULL;
246 unsigned long flags;
247 int ret = 0;
248
249 pr_debug("%s: Enter(%d)\n", __func__, count);
250
251 if (count > MAX_QTI_PKT_SIZE) {
252 pr_err("Buffer size is too big %d, should be at most %d\n",
253 count, MAX_QTI_PKT_SIZE);
254 return -EINVAL;
255 }
256
257 if (rmnet_ctrl_lock(&port->read_excl)) {
258 pr_err("Previous reading is not finished yet\n");
259 return -EBUSY;
260 }
261
Bar Weiner0dae81b2013-02-14 13:53:54 +0200262 /* block until a new packet is available */
263 do {
264 spin_lock_irqsave(&port->lock, flags);
265 if (!list_empty(&port->cpkt_req_q))
266 break;
267 spin_unlock_irqrestore(&port->lock, flags);
268
269 pr_debug("%s: Requests list is empty. Wait.\n", __func__);
270 ret = wait_event_interruptible(port->read_wq,
271 !list_empty(&port->cpkt_req_q));
272 if (ret < 0) {
273 pr_debug("Waiting failed\n");
274 rmnet_ctrl_unlock(&port->read_excl);
275 return -ERESTARTSYS;
276 }
277 } while (1);
278
279 cpkt = list_first_entry(&port->cpkt_req_q, struct rmnet_ctrl_pkt,
280 list);
281 list_del(&cpkt->list);
282 spin_unlock_irqrestore(&port->lock, flags);
283
284 if (cpkt->len > count) {
285 pr_err("cpkt size too big:%d > buf size:%d\n",
286 cpkt->len, count);
287 rmnet_ctrl_unlock(&port->read_excl);
288 free_rmnet_ctrl_pkt(cpkt);
289 return -ENOMEM;
290 }
291
292 pr_debug("%s: cpkt size:%d\n", __func__, cpkt->len);
293
294
295 rmnet_ctrl_unlock(&port->read_excl);
296
297 ret = copy_to_user(buf, cpkt->buf, cpkt->len);
298 if (ret) {
299 pr_err("copy_to_user failed: err %d\n", ret);
300 ret = -EFAULT;
301 } else {
302 pr_debug("%s: copied %d bytes to user\n", __func__, cpkt->len);
303 ret = cpkt->len;
304 }
305
306 free_rmnet_ctrl_pkt(cpkt);
307
308 return ret;
309}
310
311static ssize_t
312rmnet_ctrl_write(struct file *fp, const char __user *buf, size_t count,
313 loff_t *pos)
314{
315 struct rmnet_ctrl_qti_port *port = fp->private_data;
316 void *kbuf;
317 unsigned long flags;
318 int ret = 0;
319
320 pr_debug("%s: Enter(%d)", __func__, count);
321
322 if (!count) {
323 pr_debug("zero length ctrl pkt\n");
324 return -EINVAL;
325 }
326
327 if (count > MAX_QTI_PKT_SIZE) {
328 pr_debug("given pkt size too big:%d > max_pkt_size:%d\n",
329 count, MAX_QTI_PKT_SIZE);
330 return -EINVAL;
331 }
332
333 if (rmnet_ctrl_lock(&port->write_excl)) {
334 pr_err("Previous writing not finished yet\n");
335 return -EBUSY;
336 }
337
338 if (!atomic_read(&port->connected)) {
339 pr_debug("USB cable not connected\n");
340 rmnet_ctrl_unlock(&port->write_excl);
341 return -EPIPE;
342 }
343
344 kbuf = kmalloc(count, GFP_KERNEL);
345 if (!kbuf) {
346 pr_err("failed to allocate ctrl pkt\n");
347 rmnet_ctrl_unlock(&port->write_excl);
348 return -ENOMEM;
349 }
350 ret = copy_from_user(kbuf, buf, count);
351 if (ret) {
352 pr_err("copy_from_user failed err:%d\n", ret);
353 kfree(kbuf);
354 rmnet_ctrl_unlock(&port->write_excl);
355 return -EFAULT;
356 }
357
358 spin_lock_irqsave(&port->lock, flags);
359 if (port->port_usb && port->port_usb->send_cpkt_response) {
360 ret = port->port_usb->send_cpkt_response(port->port_usb,
361 kbuf, count);
362 if (ret) {
363 pr_err("failed to send ctrl packet. error=%d\n", ret);
364 spin_unlock_irqrestore(&port->lock, flags);
365 kfree(kbuf);
366 rmnet_ctrl_unlock(&port->write_excl);
367 return ret;
368 }
369 } else {
370 pr_err("send_cpkt_response callback is NULL\n");
371 spin_unlock_irqrestore(&port->lock, flags);
372 kfree(kbuf);
373 rmnet_ctrl_unlock(&port->write_excl);
374 return -EINVAL;
375 }
376 spin_unlock_irqrestore(&port->lock, flags);
377
378 kfree(kbuf);
379 rmnet_ctrl_unlock(&port->write_excl);
380
381 pr_debug("%s: Exit(%d)", __func__, count);
382
383 return count;
384
385}
386
387static long rmnet_ctrl_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
388{
389 struct rmnet_ctrl_qti_port *port = fp->private_data;
390 int val, ret = 0;
391
392 pr_debug("%s: Received command %d", __func__, cmd);
393
394 if (rmnet_ctrl_lock(&port->ioctl_excl))
395 return -EBUSY;
396
397 switch (cmd) {
398 case FRMNET_CTRL_GET_LINE_STATE:
399 val = atomic_read(&port->line_state);
400 ret = copy_to_user((void __user *)arg, &val, sizeof(val));
401 if (ret) {
402 pr_err("copying to user space failed");
403 ret = -EFAULT;
404 }
405 pr_debug("%s: Sent line_state: %d", __func__,
406 atomic_read(&port->line_state));
407 break;
408 default:
409 pr_err("wrong parameter");
410 ret = -EINVAL;
411 }
412
413 rmnet_ctrl_unlock(&port->ioctl_excl);
414
415 return ret;
416}
417
418static unsigned int rmnet_ctrl_poll(struct file *file, poll_table *wait)
419{
420 struct rmnet_ctrl_qti_port *port = file->private_data;
421 unsigned long flags;
422 unsigned int mask = 0;
423
424 if (!port) {
425 pr_err("%s on a NULL device\n", __func__);
426 return POLLERR;
427 }
428
429 poll_wait(file, &port->read_wq, wait);
430
431 spin_lock_irqsave(&port->lock, flags);
432 if (!list_empty(&port->cpkt_req_q)) {
433 mask |= POLLIN | POLLRDNORM;
434 pr_debug("%s sets POLLIN for rmnet_ctrl_qti_port\n", __func__);
435 }
436 spin_unlock_irqrestore(&port->lock, flags);
437
438 return mask;
439}
440
441/* file operations for rmnet device /dev/rmnet_ctrl */
442static const struct file_operations rmnet_ctrl_fops = {
443 .owner = THIS_MODULE,
444 .open = rmnet_ctrl_open,
445 .release = rmnet_ctrl_release,
446 .read = rmnet_ctrl_read,
447 .write = rmnet_ctrl_write,
448 .unlocked_ioctl = rmnet_ctrl_ioctl,
449 .poll = rmnet_ctrl_poll,
450};
451
452static struct miscdevice rmnet_device = {
453 .minor = MISC_DYNAMIC_MINOR,
454 .name = "rmnet_ctrl",
455 .fops = &rmnet_ctrl_fops,
456};
457
458static int __init gqti_ctrl_init(void)
459{
460 int ret;
461 struct rmnet_ctrl_qti_port *port = NULL;
462
463 port = kzalloc(sizeof(struct rmnet_ctrl_qti_port), GFP_KERNEL);
464 if (!port) {
465 pr_err("Failed to allocate rmnet control device\n");
466 return -ENOMEM;
467 }
468
469 INIT_LIST_HEAD(&port->cpkt_req_q);
470 spin_lock_init(&port->lock);
471
472 atomic_set(&port->open_excl, 0);
473 atomic_set(&port->read_excl, 0);
474 atomic_set(&port->write_excl, 0);
475 atomic_set(&port->ioctl_excl, 0);
476 atomic_set(&port->connected, 0);
477 atomic_set(&port->line_state, 0);
478
479 init_waitqueue_head(&port->read_wq);
480
481 ctrl_port = port;
482
483 ret = misc_register(&rmnet_device);
484 if (ret) {
485 pr_err("rmnet control driver failed to register");
486 goto fail_init;
487 }
488
489 return ret;
490
491fail_init:
492 kfree(port);
493 ctrl_port = NULL;
494 return ret;
495}
496module_init(gqti_ctrl_init);
497
498static void __exit gqti_ctrl_cleanup(void)
499{
500 misc_deregister(&rmnet_device);
501
502 kfree(ctrl_port);
503 ctrl_port = NULL;
504}
505module_exit(gqti_ctrl_cleanup);