blob: e92978ff382f222c5aa4f21e0e94b276d02453ac [file] [log] [blame]
Bar Weiner0dae81b2013-02-14 13:53:54 +02001/*
2 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/wait.h>
15#include <linux/poll.h>
16#include <linux/usb/rmnet_ctrl_qti.h>
17
18#include "u_rmnet.h"
19
20struct rmnet_ctrl_qti_port {
21 struct grmnet *port_usb;
22
23 bool is_open;
24
25 atomic_t connected;
26 atomic_t line_state;
27
28 atomic_t open_excl;
29 atomic_t read_excl;
30 atomic_t write_excl;
31 atomic_t ioctl_excl;
32
33 wait_queue_head_t read_wq;
34
35 struct list_head cpkt_req_q;
36
37 spinlock_t lock;
38};
39static struct rmnet_ctrl_qti_port *ctrl_port;
40
41static inline int rmnet_ctrl_lock(atomic_t *excl)
42{
43 if (atomic_inc_return(excl) == 1) {
44 return 0;
45 } else {
46 atomic_dec(excl);
47 return -EBUSY;
48 }
49}
50
51static inline void rmnet_ctrl_unlock(atomic_t *excl)
52{
53 atomic_dec(excl);
54}
55
56static void rmnet_ctrl_queue_notify(struct rmnet_ctrl_qti_port *port)
57{
58 unsigned long flags;
59 struct rmnet_ctrl_pkt *cpkt = NULL;
60
61 pr_debug("%s: Queue empty packet for QTI", __func__);
62
63 spin_lock_irqsave(&port->lock, flags);
64 if (!port->is_open) {
65 pr_err("%s: rmnet ctrl file handler %p is not open",
66 __func__, port);
67 spin_unlock_irqrestore(&port->lock, flags);
68 return;
69 }
70
71 cpkt = alloc_rmnet_ctrl_pkt(0, GFP_ATOMIC);
72 if (!cpkt) {
73 pr_err("%s: Unable to allocate reset function pkt\n", __func__);
74 spin_unlock_irqrestore(&port->lock, flags);
75 return;
76 }
77
78 list_add_tail(&cpkt->list, &port->cpkt_req_q);
79 spin_unlock_irqrestore(&port->lock, flags);
80
81 pr_debug("%s: Wake up read queue", __func__);
82 wake_up(&port->read_wq);
83}
84
85static int grmnet_ctrl_qti_send_cpkt_tomodem(u8 portno,
86 void *buf, size_t len)
87{
88 unsigned long flags;
89 struct rmnet_ctrl_qti_port *port = ctrl_port;
90 struct rmnet_ctrl_pkt *cpkt;
91
92 if (len > MAX_QTI_PKT_SIZE) {
93 pr_err("given pkt size too big:%d > max_pkt_size:%d\n",
94 len, MAX_QTI_PKT_SIZE);
95 return -EINVAL;
96 }
97
98 cpkt = alloc_rmnet_ctrl_pkt(len, GFP_ATOMIC);
99 if (IS_ERR(cpkt)) {
100 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
101 return -ENOMEM;
102 }
103
104 memcpy(cpkt->buf, buf, len);
105 cpkt->len = len;
106
107 pr_debug("%s: Add to cpkt_req_q packet with len = %d\n", __func__, len);
108 spin_lock_irqsave(&port->lock, flags);
109
110 /* drop cpkt if port is not open */
111 if (!port->is_open) {
112 pr_err("rmnet file handler %p is not open", port);
113 spin_unlock_irqrestore(&port->lock, flags);
114 free_rmnet_ctrl_pkt(cpkt);
115 return 0;
116 }
117
118 list_add_tail(&cpkt->list, &port->cpkt_req_q);
119 spin_unlock_irqrestore(&port->lock, flags);
120
121 /* wakeup read thread */
122 pr_debug("%s: Wake up read queue", __func__);
123 wake_up(&port->read_wq);
124
125 return 0;
126}
127
128static void
129gqti_ctrl_notify_modem(void *gptr, u8 portno, int val)
130{
131 struct rmnet_ctrl_qti_port *port = ctrl_port;
132
133 atomic_set(&port->line_state, val);
134
135 /* send 0 len pkt to qti to notify state change */
136 rmnet_ctrl_queue_notify(port);
137}
138
139int gqti_ctrl_connect(struct grmnet *gr)
140{
141 struct rmnet_ctrl_qti_port *port;
142 unsigned long flags;
143
144 pr_debug("%s: grmnet:%p\n", __func__, gr);
145
146 if (!gr) {
147 pr_err("%s: grmnet port is null\n", __func__);
148 return -ENODEV;
149 }
150
151 port = ctrl_port;
152
153 spin_lock_irqsave(&port->lock, flags);
154 port->port_usb = gr;
155 gr->send_encap_cmd = grmnet_ctrl_qti_send_cpkt_tomodem;
156 gr->notify_modem = gqti_ctrl_notify_modem;
157 spin_unlock_irqrestore(&port->lock, flags);
158
159 atomic_set(&port->connected, 1);
160 wake_up(&port->read_wq);
161
162 if (port && port->port_usb && port->port_usb->connect)
163 port->port_usb->connect(port->port_usb);
164
165 return 0;
166}
167
168void gqti_ctrl_disconnect(struct grmnet *gr)
169{
170 struct rmnet_ctrl_qti_port *port = ctrl_port;
171 unsigned long flags;
172 struct rmnet_ctrl_pkt *cpkt;
173
174 pr_debug("%s: grmnet:%p\n", __func__, gr);
175
176 if (!gr) {
177 pr_err("%s: grmnet port is null\n", __func__);
178 return;
179 }
180
181 if (port && port->port_usb && port->port_usb->disconnect)
182 port->port_usb->disconnect(port->port_usb);
183
184 atomic_set(&port->connected, 0);
185 atomic_set(&port->line_state, 0);
186 spin_lock_irqsave(&port->lock, flags);
187 port->port_usb = 0;
188 gr->send_encap_cmd = 0;
189 gr->notify_modem = 0;
190
191 while (!list_empty(&port->cpkt_req_q)) {
192 cpkt = list_first_entry(&port->cpkt_req_q,
193 struct rmnet_ctrl_pkt, list);
194
195 list_del(&cpkt->list);
196 free_rmnet_ctrl_pkt(cpkt);
197 }
198
199 spin_unlock_irqrestore(&port->lock, flags);
200
201 /* send 0 len pkt to qti to notify state change */
202 rmnet_ctrl_queue_notify(port);
203}
204
205static int rmnet_ctrl_open(struct inode *ip, struct file *fp)
206{
207 unsigned long flags;
208
209 pr_debug("Open rmnet_ctrl_qti device file\n");
210
211 if (rmnet_ctrl_lock(&ctrl_port->open_excl)) {
212 pr_debug("Already opened\n");
213 return -EBUSY;
214 }
215
216 fp->private_data = ctrl_port;
217
218 spin_lock_irqsave(&ctrl_port->lock, flags);
219 ctrl_port->is_open = true;
220 spin_unlock_irqrestore(&ctrl_port->lock, flags);
221
222 return 0;
223}
224
225static int rmnet_ctrl_release(struct inode *ip, struct file *fp)
226{
227 unsigned long flags;
228 struct rmnet_ctrl_qti_port *port = fp->private_data;
229
230 pr_debug("Close rmnet control file");
231
232 spin_lock_irqsave(&port->lock, flags);
233 port->is_open = false;
234 spin_unlock_irqrestore(&port->lock, flags);
235
236 rmnet_ctrl_unlock(&port->open_excl);
237
238 return 0;
239}
240
241static ssize_t
242rmnet_ctrl_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
243{
244 struct rmnet_ctrl_qti_port *port = fp->private_data;
245 struct rmnet_ctrl_pkt *cpkt = NULL;
246 unsigned long flags;
247 int ret = 0;
248
249 pr_debug("%s: Enter(%d)\n", __func__, count);
250
251 if (count > MAX_QTI_PKT_SIZE) {
252 pr_err("Buffer size is too big %d, should be at most %d\n",
253 count, MAX_QTI_PKT_SIZE);
254 return -EINVAL;
255 }
256
257 if (rmnet_ctrl_lock(&port->read_excl)) {
258 pr_err("Previous reading is not finished yet\n");
259 return -EBUSY;
260 }
261
262 /* block until online */
263 while (!(atomic_read(&port->connected))) {
264 pr_debug("Not connected. Wait.\n");
265 ret = wait_event_interruptible(port->read_wq,
266 atomic_read(&port->connected));
267 if (ret < 0) {
268 rmnet_ctrl_unlock(&port->read_excl);
269 if (ret == -ERESTARTSYS)
270 return -ERESTARTSYS;
271 else
272 return -EINTR;
273 }
274 }
275
276 /* block until a new packet is available */
277 do {
278 spin_lock_irqsave(&port->lock, flags);
279 if (!list_empty(&port->cpkt_req_q))
280 break;
281 spin_unlock_irqrestore(&port->lock, flags);
282
283 pr_debug("%s: Requests list is empty. Wait.\n", __func__);
284 ret = wait_event_interruptible(port->read_wq,
285 !list_empty(&port->cpkt_req_q));
286 if (ret < 0) {
287 pr_debug("Waiting failed\n");
288 rmnet_ctrl_unlock(&port->read_excl);
289 return -ERESTARTSYS;
290 }
291 } while (1);
292
293 cpkt = list_first_entry(&port->cpkt_req_q, struct rmnet_ctrl_pkt,
294 list);
295 list_del(&cpkt->list);
296 spin_unlock_irqrestore(&port->lock, flags);
297
298 if (cpkt->len > count) {
299 pr_err("cpkt size too big:%d > buf size:%d\n",
300 cpkt->len, count);
301 rmnet_ctrl_unlock(&port->read_excl);
302 free_rmnet_ctrl_pkt(cpkt);
303 return -ENOMEM;
304 }
305
306 pr_debug("%s: cpkt size:%d\n", __func__, cpkt->len);
307
308
309 rmnet_ctrl_unlock(&port->read_excl);
310
311 ret = copy_to_user(buf, cpkt->buf, cpkt->len);
312 if (ret) {
313 pr_err("copy_to_user failed: err %d\n", ret);
314 ret = -EFAULT;
315 } else {
316 pr_debug("%s: copied %d bytes to user\n", __func__, cpkt->len);
317 ret = cpkt->len;
318 }
319
320 free_rmnet_ctrl_pkt(cpkt);
321
322 return ret;
323}
324
325static ssize_t
326rmnet_ctrl_write(struct file *fp, const char __user *buf, size_t count,
327 loff_t *pos)
328{
329 struct rmnet_ctrl_qti_port *port = fp->private_data;
330 void *kbuf;
331 unsigned long flags;
332 int ret = 0;
333
334 pr_debug("%s: Enter(%d)", __func__, count);
335
336 if (!count) {
337 pr_debug("zero length ctrl pkt\n");
338 return -EINVAL;
339 }
340
341 if (count > MAX_QTI_PKT_SIZE) {
342 pr_debug("given pkt size too big:%d > max_pkt_size:%d\n",
343 count, MAX_QTI_PKT_SIZE);
344 return -EINVAL;
345 }
346
347 if (rmnet_ctrl_lock(&port->write_excl)) {
348 pr_err("Previous writing not finished yet\n");
349 return -EBUSY;
350 }
351
352 if (!atomic_read(&port->connected)) {
353 pr_debug("USB cable not connected\n");
354 rmnet_ctrl_unlock(&port->write_excl);
355 return -EPIPE;
356 }
357
358 kbuf = kmalloc(count, GFP_KERNEL);
359 if (!kbuf) {
360 pr_err("failed to allocate ctrl pkt\n");
361 rmnet_ctrl_unlock(&port->write_excl);
362 return -ENOMEM;
363 }
364 ret = copy_from_user(kbuf, buf, count);
365 if (ret) {
366 pr_err("copy_from_user failed err:%d\n", ret);
367 kfree(kbuf);
368 rmnet_ctrl_unlock(&port->write_excl);
369 return -EFAULT;
370 }
371
372 spin_lock_irqsave(&port->lock, flags);
373 if (port->port_usb && port->port_usb->send_cpkt_response) {
374 ret = port->port_usb->send_cpkt_response(port->port_usb,
375 kbuf, count);
376 if (ret) {
377 pr_err("failed to send ctrl packet. error=%d\n", ret);
378 spin_unlock_irqrestore(&port->lock, flags);
379 kfree(kbuf);
380 rmnet_ctrl_unlock(&port->write_excl);
381 return ret;
382 }
383 } else {
384 pr_err("send_cpkt_response callback is NULL\n");
385 spin_unlock_irqrestore(&port->lock, flags);
386 kfree(kbuf);
387 rmnet_ctrl_unlock(&port->write_excl);
388 return -EINVAL;
389 }
390 spin_unlock_irqrestore(&port->lock, flags);
391
392 kfree(kbuf);
393 rmnet_ctrl_unlock(&port->write_excl);
394
395 pr_debug("%s: Exit(%d)", __func__, count);
396
397 return count;
398
399}
400
401static long rmnet_ctrl_ioctl(struct file *fp, unsigned cmd, unsigned long arg)
402{
403 struct rmnet_ctrl_qti_port *port = fp->private_data;
404 int val, ret = 0;
405
406 pr_debug("%s: Received command %d", __func__, cmd);
407
408 if (rmnet_ctrl_lock(&port->ioctl_excl))
409 return -EBUSY;
410
411 switch (cmd) {
412 case FRMNET_CTRL_GET_LINE_STATE:
413 val = atomic_read(&port->line_state);
414 ret = copy_to_user((void __user *)arg, &val, sizeof(val));
415 if (ret) {
416 pr_err("copying to user space failed");
417 ret = -EFAULT;
418 }
419 pr_debug("%s: Sent line_state: %d", __func__,
420 atomic_read(&port->line_state));
421 break;
422 default:
423 pr_err("wrong parameter");
424 ret = -EINVAL;
425 }
426
427 rmnet_ctrl_unlock(&port->ioctl_excl);
428
429 return ret;
430}
431
432static unsigned int rmnet_ctrl_poll(struct file *file, poll_table *wait)
433{
434 struct rmnet_ctrl_qti_port *port = file->private_data;
435 unsigned long flags;
436 unsigned int mask = 0;
437
438 if (!port) {
439 pr_err("%s on a NULL device\n", __func__);
440 return POLLERR;
441 }
442
443 poll_wait(file, &port->read_wq, wait);
444
445 spin_lock_irqsave(&port->lock, flags);
446 if (!list_empty(&port->cpkt_req_q)) {
447 mask |= POLLIN | POLLRDNORM;
448 pr_debug("%s sets POLLIN for rmnet_ctrl_qti_port\n", __func__);
449 }
450 spin_unlock_irqrestore(&port->lock, flags);
451
452 return mask;
453}
454
455/* file operations for rmnet device /dev/rmnet_ctrl */
456static const struct file_operations rmnet_ctrl_fops = {
457 .owner = THIS_MODULE,
458 .open = rmnet_ctrl_open,
459 .release = rmnet_ctrl_release,
460 .read = rmnet_ctrl_read,
461 .write = rmnet_ctrl_write,
462 .unlocked_ioctl = rmnet_ctrl_ioctl,
463 .poll = rmnet_ctrl_poll,
464};
465
466static struct miscdevice rmnet_device = {
467 .minor = MISC_DYNAMIC_MINOR,
468 .name = "rmnet_ctrl",
469 .fops = &rmnet_ctrl_fops,
470};
471
472static int __init gqti_ctrl_init(void)
473{
474 int ret;
475 struct rmnet_ctrl_qti_port *port = NULL;
476
477 port = kzalloc(sizeof(struct rmnet_ctrl_qti_port), GFP_KERNEL);
478 if (!port) {
479 pr_err("Failed to allocate rmnet control device\n");
480 return -ENOMEM;
481 }
482
483 INIT_LIST_HEAD(&port->cpkt_req_q);
484 spin_lock_init(&port->lock);
485
486 atomic_set(&port->open_excl, 0);
487 atomic_set(&port->read_excl, 0);
488 atomic_set(&port->write_excl, 0);
489 atomic_set(&port->ioctl_excl, 0);
490 atomic_set(&port->connected, 0);
491 atomic_set(&port->line_state, 0);
492
493 init_waitqueue_head(&port->read_wq);
494
495 ctrl_port = port;
496
497 ret = misc_register(&rmnet_device);
498 if (ret) {
499 pr_err("rmnet control driver failed to register");
500 goto fail_init;
501 }
502
503 return ret;
504
505fail_init:
506 kfree(port);
507 ctrl_port = NULL;
508 return ret;
509}
510module_init(gqti_ctrl_init);
511
512static void __exit gqti_ctrl_cleanup(void)
513{
514 misc_deregister(&rmnet_device);
515
516 kfree(ctrl_port);
517 ctrl_port = NULL;
518}
519module_exit(gqti_ctrl_cleanup);