blob: 3ad91f6447d8eee319e570f4098a2fb75f9be49e [file] [log] [blame]
Andras Domokos4e69fc22010-09-30 17:18:53 +03001/*
2 * HSI character device driver, implements the character device
3 * interface.
4 *
5 * Copyright (C) 2010 Nokia Corporation. All rights reserved.
6 *
7 * Contact: Andras Domokos <andras.domokos@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 */
23
24#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/atomic.h>
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/mutex.h>
31#include <linux/list.h>
32#include <linux/slab.h>
33#include <linux/kmemleak.h>
34#include <linux/ioctl.h>
35#include <linux/wait.h>
36#include <linux/fs.h>
37#include <linux/sched.h>
38#include <linux/device.h>
39#include <linux/cdev.h>
40#include <linux/uaccess.h>
41#include <linux/scatterlist.h>
42#include <linux/stat.h>
43#include <linux/hsi/hsi.h>
44#include <linux/hsi/hsi_char.h>
45
46#define HSC_DEVS 16 /* Num of channels */
47#define HSC_MSGS 4
48
49#define HSC_RXBREAK 0
50
51#define HSC_ID_BITS 6
52#define HSC_PORT_ID_BITS 4
53#define HSC_ID_MASK 3
54#define HSC_PORT_ID_MASK 3
55#define HSC_CH_MASK 0xf
56
57/*
58 * We support up to 4 controllers that can have up to 4
59 * ports, which should currently be more than enough.
60 */
61#define HSC_BASEMINOR(id, port_id) \
62 ((((id) & HSC_ID_MASK) << HSC_ID_BITS) | \
63 (((port_id) & HSC_PORT_ID_MASK) << HSC_PORT_ID_BITS))
64
65enum {
66 HSC_CH_OPEN,
67 HSC_CH_READ,
68 HSC_CH_WRITE,
69 HSC_CH_WLINE,
70};
71
72enum {
73 HSC_RX,
74 HSC_TX,
75};
76
77struct hsc_client_data;
78/**
79 * struct hsc_channel - hsi_char internal channel data
80 * @ch: channel number
81 * @flags: Keeps state of the channel (open/close, reading, writing)
82 * @free_msgs_list: List of free HSI messages/requests
83 * @rx_msgs_queue: List of pending RX requests
84 * @tx_msgs_queue: List of pending TX requests
85 * @lock: Serialize access to the lists
86 * @cl: reference to the associated hsi_client
87 * @cl_data: reference to the client data that this channels belongs to
88 * @rx_wait: RX requests wait queue
89 * @tx_wait: TX requests wait queue
90 */
91struct hsc_channel {
92 unsigned int ch;
93 unsigned long flags;
94 struct list_head free_msgs_list;
95 struct list_head rx_msgs_queue;
96 struct list_head tx_msgs_queue;
97 spinlock_t lock;
98 struct hsi_client *cl;
99 struct hsc_client_data *cl_data;
100 wait_queue_head_t rx_wait;
101 wait_queue_head_t tx_wait;
102};
103
104/**
105 * struct hsc_client_data - hsi_char internal client data
106 * @cdev: Characther device associated to the hsi_client
107 * @lock: Lock to serialize open/close access
108 * @flags: Keeps track of port state (rx hwbreak armed)
109 * @usecnt: Use count for claiming the HSI port (mutex protected)
110 * @cl: Referece to the HSI client
111 * @channels: Array of channels accessible by the client
112 */
113struct hsc_client_data {
114 struct cdev cdev;
115 struct mutex lock;
116 unsigned long flags;
117 unsigned int usecnt;
118 struct hsi_client *cl;
119 struct hsc_channel channels[HSC_DEVS];
120};
121
122/* Stores the major number dynamically allocated for hsi_char */
123static unsigned int hsc_major;
124/* Maximum buffer size that hsi_char will accept from userspace */
125static unsigned int max_data_size = 0x1000;
Carlos Chineafdadb6e92012-04-13 16:03:03 +0300126module_param(max_data_size, uint, 0);
Andras Domokos4e69fc22010-09-30 17:18:53 +0300127MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)");
128
129static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg,
130 struct list_head *queue)
131{
132 unsigned long flags;
133
134 spin_lock_irqsave(&channel->lock, flags);
135 list_add_tail(&msg->link, queue);
136 spin_unlock_irqrestore(&channel->lock, flags);
137}
138
139static struct hsi_msg *hsc_get_first_msg(struct hsc_channel *channel,
140 struct list_head *queue)
141{
142 struct hsi_msg *msg = NULL;
143 unsigned long flags;
144
145 spin_lock_irqsave(&channel->lock, flags);
146
147 if (list_empty(queue))
148 goto out;
149
150 msg = list_first_entry(queue, struct hsi_msg, link);
151 list_del(&msg->link);
152out:
153 spin_unlock_irqrestore(&channel->lock, flags);
154
155 return msg;
156}
157
158static inline void hsc_msg_free(struct hsi_msg *msg)
159{
160 kfree(sg_virt(msg->sgt.sgl));
161 hsi_free_msg(msg);
162}
163
164static void hsc_free_list(struct list_head *list)
165{
166 struct hsi_msg *msg, *tmp;
167
168 list_for_each_entry_safe(msg, tmp, list, link) {
169 list_del(&msg->link);
170 hsc_msg_free(msg);
171 }
172}
173
174static void hsc_reset_list(struct hsc_channel *channel, struct list_head *l)
175{
176 unsigned long flags;
177 LIST_HEAD(list);
178
179 spin_lock_irqsave(&channel->lock, flags);
180 list_splice_init(l, &list);
181 spin_unlock_irqrestore(&channel->lock, flags);
182
183 hsc_free_list(&list);
184}
185
186static inline struct hsi_msg *hsc_msg_alloc(unsigned int alloc_size)
187{
188 struct hsi_msg *msg;
189 void *buf;
190
191 msg = hsi_alloc_msg(1, GFP_KERNEL);
192 if (!msg)
193 goto out;
194 buf = kmalloc(alloc_size, GFP_KERNEL);
195 if (!buf) {
196 hsi_free_msg(msg);
197 goto out;
198 }
199 sg_init_one(msg->sgt.sgl, buf, alloc_size);
200 /* Ignore false positive, due to sg pointer handling */
201 kmemleak_ignore(buf);
202
203 return msg;
204out:
205 return NULL;
206}
207
208static inline int hsc_msgs_alloc(struct hsc_channel *channel)
209{
210 struct hsi_msg *msg;
211 int i;
212
213 for (i = 0; i < HSC_MSGS; i++) {
214 msg = hsc_msg_alloc(max_data_size);
215 if (!msg)
216 goto out;
217 msg->channel = channel->ch;
218 list_add_tail(&msg->link, &channel->free_msgs_list);
219 }
220
221 return 0;
222out:
223 hsc_free_list(&channel->free_msgs_list);
224
225 return -ENOMEM;
226}
227
228static inline unsigned int hsc_msg_len_get(struct hsi_msg *msg)
229{
230 return msg->sgt.sgl->length;
231}
232
233static inline void hsc_msg_len_set(struct hsi_msg *msg, unsigned int len)
234{
235 msg->sgt.sgl->length = len;
236}
237
238static void hsc_rx_completed(struct hsi_msg *msg)
239{
240 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
241 struct hsc_channel *channel = cl_data->channels + msg->channel;
242
243 if (test_bit(HSC_CH_READ, &channel->flags)) {
244 hsc_add_tail(channel, msg, &channel->rx_msgs_queue);
245 wake_up(&channel->rx_wait);
246 } else {
247 hsc_add_tail(channel, msg, &channel->free_msgs_list);
248 }
249}
250
251static void hsc_rx_msg_destructor(struct hsi_msg *msg)
252{
253 msg->status = HSI_STATUS_ERROR;
254 hsc_msg_len_set(msg, 0);
255 hsc_rx_completed(msg);
256}
257
258static void hsc_tx_completed(struct hsi_msg *msg)
259{
260 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
261 struct hsc_channel *channel = cl_data->channels + msg->channel;
262
263 if (test_bit(HSC_CH_WRITE, &channel->flags)) {
264 hsc_add_tail(channel, msg, &channel->tx_msgs_queue);
265 wake_up(&channel->tx_wait);
266 } else {
267 hsc_add_tail(channel, msg, &channel->free_msgs_list);
268 }
269}
270
271static void hsc_tx_msg_destructor(struct hsi_msg *msg)
272{
273 msg->status = HSI_STATUS_ERROR;
274 hsc_msg_len_set(msg, 0);
275 hsc_tx_completed(msg);
276}
277
278static void hsc_break_req_destructor(struct hsi_msg *msg)
279{
280 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
281
282 hsi_free_msg(msg);
283 clear_bit(HSC_RXBREAK, &cl_data->flags);
284}
285
286static void hsc_break_received(struct hsi_msg *msg)
287{
288 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
289 struct hsc_channel *channel = cl_data->channels;
290 int i, ret;
291
292 /* Broadcast HWBREAK on all channels */
293 for (i = 0; i < HSC_DEVS; i++, channel++) {
294 struct hsi_msg *msg2;
295
296 if (!test_bit(HSC_CH_READ, &channel->flags))
297 continue;
298 msg2 = hsc_get_first_msg(channel, &channel->free_msgs_list);
299 if (!msg2)
300 continue;
301 clear_bit(HSC_CH_READ, &channel->flags);
302 hsc_msg_len_set(msg2, 0);
303 msg2->status = HSI_STATUS_COMPLETED;
304 hsc_add_tail(channel, msg2, &channel->rx_msgs_queue);
305 wake_up(&channel->rx_wait);
306 }
307 hsi_flush(msg->cl);
308 ret = hsi_async_read(msg->cl, msg);
309 if (ret < 0)
310 hsc_break_req_destructor(msg);
311}
312
313static int hsc_break_request(struct hsi_client *cl)
314{
315 struct hsc_client_data *cl_data = hsi_client_drvdata(cl);
316 struct hsi_msg *msg;
317 int ret;
318
319 if (test_and_set_bit(HSC_RXBREAK, &cl_data->flags))
320 return -EBUSY;
321
322 msg = hsi_alloc_msg(0, GFP_KERNEL);
323 if (!msg) {
324 clear_bit(HSC_RXBREAK, &cl_data->flags);
325 return -ENOMEM;
326 }
327 msg->break_frame = 1;
328 msg->complete = hsc_break_received;
329 msg->destructor = hsc_break_req_destructor;
330 ret = hsi_async_read(cl, msg);
331 if (ret < 0)
332 hsc_break_req_destructor(msg);
333
334 return ret;
335}
336
337static int hsc_break_send(struct hsi_client *cl)
338{
339 struct hsi_msg *msg;
340 int ret;
341
342 msg = hsi_alloc_msg(0, GFP_ATOMIC);
343 if (!msg)
344 return -ENOMEM;
345 msg->break_frame = 1;
346 msg->complete = hsi_free_msg;
347 msg->destructor = hsi_free_msg;
348 ret = hsi_async_write(cl, msg);
349 if (ret < 0)
350 hsi_free_msg(msg);
351
352 return ret;
353}
354
355static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc)
356{
357 struct hsi_config tmp;
358 int ret;
359
360 if ((rxc->mode != HSI_MODE_STREAM) && (rxc->mode != HSI_MODE_FRAME))
361 return -EINVAL;
362 if ((rxc->channels == 0) || (rxc->channels > HSC_DEVS))
363 return -EINVAL;
364 if (rxc->channels & (rxc->channels - 1))
365 return -EINVAL;
366 if ((rxc->flow != HSI_FLOW_SYNC) && (rxc->flow != HSI_FLOW_PIPE))
367 return -EINVAL;
368 tmp = cl->rx_cfg;
369 cl->rx_cfg.mode = rxc->mode;
370 cl->rx_cfg.channels = rxc->channels;
371 cl->rx_cfg.flow = rxc->flow;
372 ret = hsi_setup(cl);
373 if (ret < 0) {
374 cl->rx_cfg = tmp;
375 return ret;
376 }
377 if (rxc->mode == HSI_MODE_FRAME)
378 hsc_break_request(cl);
379
380 return ret;
381}
382
383static inline void hsc_rx_get(struct hsi_client *cl, struct hsc_rx_config *rxc)
384{
385 rxc->mode = cl->rx_cfg.mode;
386 rxc->channels = cl->rx_cfg.channels;
387 rxc->flow = cl->rx_cfg.flow;
388}
389
390static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc)
391{
392 struct hsi_config tmp;
393 int ret;
394
395 if ((txc->mode != HSI_MODE_STREAM) && (txc->mode != HSI_MODE_FRAME))
396 return -EINVAL;
397 if ((txc->channels == 0) || (txc->channels > HSC_DEVS))
398 return -EINVAL;
399 if (txc->channels & (txc->channels - 1))
400 return -EINVAL;
401 if ((txc->arb_mode != HSI_ARB_RR) && (txc->arb_mode != HSI_ARB_PRIO))
402 return -EINVAL;
403 tmp = cl->tx_cfg;
404 cl->tx_cfg.mode = txc->mode;
405 cl->tx_cfg.channels = txc->channels;
406 cl->tx_cfg.speed = txc->speed;
407 cl->tx_cfg.arb_mode = txc->arb_mode;
408 ret = hsi_setup(cl);
409 if (ret < 0) {
410 cl->tx_cfg = tmp;
411 return ret;
412 }
413
414 return ret;
415}
416
417static inline void hsc_tx_get(struct hsi_client *cl, struct hsc_tx_config *txc)
418{
419 txc->mode = cl->tx_cfg.mode;
420 txc->channels = cl->tx_cfg.channels;
421 txc->speed = cl->tx_cfg.speed;
422 txc->arb_mode = cl->tx_cfg.arb_mode;
423}
424
425static ssize_t hsc_read(struct file *file, char __user *buf, size_t len,
426 loff_t *ppos __maybe_unused)
427{
428 struct hsc_channel *channel = file->private_data;
429 struct hsi_msg *msg;
430 ssize_t ret;
431
432 if (len == 0)
433 return 0;
434 if (!IS_ALIGNED(len, sizeof(u32)))
435 return -EINVAL;
436 if (len > max_data_size)
437 len = max_data_size;
438 if (channel->ch >= channel->cl->rx_cfg.channels)
439 return -ECHRNG;
440 if (test_and_set_bit(HSC_CH_READ, &channel->flags))
441 return -EBUSY;
442 msg = hsc_get_first_msg(channel, &channel->free_msgs_list);
443 if (!msg) {
444 ret = -ENOSPC;
445 goto out;
446 }
447 hsc_msg_len_set(msg, len);
448 msg->complete = hsc_rx_completed;
449 msg->destructor = hsc_rx_msg_destructor;
450 ret = hsi_async_read(channel->cl, msg);
451 if (ret < 0) {
452 hsc_add_tail(channel, msg, &channel->free_msgs_list);
453 goto out;
454 }
455
456 ret = wait_event_interruptible(channel->rx_wait,
457 !list_empty(&channel->rx_msgs_queue));
458 if (ret < 0) {
459 clear_bit(HSC_CH_READ, &channel->flags);
460 hsi_flush(channel->cl);
461 return -EINTR;
462 }
463
464 msg = hsc_get_first_msg(channel, &channel->rx_msgs_queue);
465 if (msg) {
466 if (msg->status != HSI_STATUS_ERROR) {
467 ret = copy_to_user((void __user *)buf,
468 sg_virt(msg->sgt.sgl), hsc_msg_len_get(msg));
469 if (ret)
470 ret = -EFAULT;
471 else
472 ret = hsc_msg_len_get(msg);
473 } else {
474 ret = -EIO;
475 }
476 hsc_add_tail(channel, msg, &channel->free_msgs_list);
477 }
478out:
479 clear_bit(HSC_CH_READ, &channel->flags);
480
481 return ret;
482}
483
484static ssize_t hsc_write(struct file *file, const char __user *buf, size_t len,
485 loff_t *ppos __maybe_unused)
486{
487 struct hsc_channel *channel = file->private_data;
488 struct hsi_msg *msg;
489 ssize_t ret;
490
491 if ((len == 0) || !IS_ALIGNED(len, sizeof(u32)))
492 return -EINVAL;
493 if (len > max_data_size)
494 len = max_data_size;
495 if (channel->ch >= channel->cl->tx_cfg.channels)
496 return -ECHRNG;
497 if (test_and_set_bit(HSC_CH_WRITE, &channel->flags))
498 return -EBUSY;
499 msg = hsc_get_first_msg(channel, &channel->free_msgs_list);
500 if (!msg) {
501 clear_bit(HSC_CH_WRITE, &channel->flags);
502 return -ENOSPC;
503 }
504 if (copy_from_user(sg_virt(msg->sgt.sgl), (void __user *)buf, len)) {
505 ret = -EFAULT;
506 goto out;
507 }
508 hsc_msg_len_set(msg, len);
509 msg->complete = hsc_tx_completed;
510 msg->destructor = hsc_tx_msg_destructor;
511 ret = hsi_async_write(channel->cl, msg);
512 if (ret < 0)
513 goto out;
514
515 ret = wait_event_interruptible(channel->tx_wait,
516 !list_empty(&channel->tx_msgs_queue));
517 if (ret < 0) {
518 clear_bit(HSC_CH_WRITE, &channel->flags);
519 hsi_flush(channel->cl);
520 return -EINTR;
521 }
522
523 msg = hsc_get_first_msg(channel, &channel->tx_msgs_queue);
524 if (msg) {
525 if (msg->status == HSI_STATUS_ERROR)
526 ret = -EIO;
527 else
528 ret = hsc_msg_len_get(msg);
529
530 hsc_add_tail(channel, msg, &channel->free_msgs_list);
531 }
532out:
533 clear_bit(HSC_CH_WRITE, &channel->flags);
534
535 return ret;
536}
537
538static long hsc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
539{
540 struct hsc_channel *channel = file->private_data;
541 unsigned int state;
542 struct hsc_rx_config rxc;
543 struct hsc_tx_config txc;
544 long ret = 0;
545
546 switch (cmd) {
547 case HSC_RESET:
548 hsi_flush(channel->cl);
549 break;
550 case HSC_SET_PM:
551 if (copy_from_user(&state, (void __user *)arg, sizeof(state)))
552 return -EFAULT;
553 if (state == HSC_PM_DISABLE) {
554 if (test_and_set_bit(HSC_CH_WLINE, &channel->flags))
555 return -EINVAL;
556 ret = hsi_start_tx(channel->cl);
557 } else if (state == HSC_PM_ENABLE) {
558 if (!test_and_clear_bit(HSC_CH_WLINE, &channel->flags))
559 return -EINVAL;
560 ret = hsi_stop_tx(channel->cl);
561 } else {
562 ret = -EINVAL;
563 }
564 break;
565 case HSC_SEND_BREAK:
566 return hsc_break_send(channel->cl);
567 case HSC_SET_RX:
568 if (copy_from_user(&rxc, (void __user *)arg, sizeof(rxc)))
569 return -EFAULT;
570 return hsc_rx_set(channel->cl, &rxc);
571 case HSC_GET_RX:
572 hsc_rx_get(channel->cl, &rxc);
573 if (copy_to_user((void __user *)arg, &rxc, sizeof(rxc)))
574 return -EFAULT;
575 break;
576 case HSC_SET_TX:
577 if (copy_from_user(&txc, (void __user *)arg, sizeof(txc)))
578 return -EFAULT;
579 return hsc_tx_set(channel->cl, &txc);
580 case HSC_GET_TX:
581 hsc_tx_get(channel->cl, &txc);
582 if (copy_to_user((void __user *)arg, &txc, sizeof(txc)))
583 return -EFAULT;
584 break;
585 default:
586 return -ENOIOCTLCMD;
587 }
588
589 return ret;
590}
591
592static inline void __hsc_port_release(struct hsc_client_data *cl_data)
593{
594 BUG_ON(cl_data->usecnt == 0);
595
596 if (--cl_data->usecnt == 0) {
597 hsi_flush(cl_data->cl);
598 hsi_release_port(cl_data->cl);
599 }
600}
601
602static int hsc_open(struct inode *inode, struct file *file)
603{
604 struct hsc_client_data *cl_data;
605 struct hsc_channel *channel;
606 int ret = 0;
607
608 pr_debug("open, minor = %d\n", iminor(inode));
609
610 cl_data = container_of(inode->i_cdev, struct hsc_client_data, cdev);
611 mutex_lock(&cl_data->lock);
612 channel = cl_data->channels + (iminor(inode) & HSC_CH_MASK);
613
614 if (test_and_set_bit(HSC_CH_OPEN, &channel->flags)) {
615 ret = -EBUSY;
616 goto out;
617 }
618 /*
619 * Check if we have already claimed the port associated to the HSI
620 * client. If not then try to claim it, else increase its refcount
621 */
622 if (cl_data->usecnt == 0) {
623 ret = hsi_claim_port(cl_data->cl, 0);
624 if (ret < 0)
625 goto out;
626 hsi_setup(cl_data->cl);
627 }
628 cl_data->usecnt++;
629
630 ret = hsc_msgs_alloc(channel);
631 if (ret < 0) {
632 __hsc_port_release(cl_data);
633 goto out;
634 }
635
636 file->private_data = channel;
637 mutex_unlock(&cl_data->lock);
638
639 return ret;
640out:
641 mutex_unlock(&cl_data->lock);
642
643 return ret;
644}
645
646static int hsc_release(struct inode *inode __maybe_unused, struct file *file)
647{
648 struct hsc_channel *channel = file->private_data;
649 struct hsc_client_data *cl_data = channel->cl_data;
650
651 mutex_lock(&cl_data->lock);
652 file->private_data = NULL;
653 if (test_and_clear_bit(HSC_CH_WLINE, &channel->flags))
654 hsi_stop_tx(channel->cl);
655 __hsc_port_release(cl_data);
656 hsc_reset_list(channel, &channel->rx_msgs_queue);
657 hsc_reset_list(channel, &channel->tx_msgs_queue);
658 hsc_reset_list(channel, &channel->free_msgs_list);
659 clear_bit(HSC_CH_READ, &channel->flags);
660 clear_bit(HSC_CH_WRITE, &channel->flags);
661 clear_bit(HSC_CH_OPEN, &channel->flags);
662 wake_up(&channel->rx_wait);
663 wake_up(&channel->tx_wait);
664 mutex_unlock(&cl_data->lock);
665
666 return 0;
667}
668
669static const struct file_operations hsc_fops = {
670 .owner = THIS_MODULE,
671 .read = hsc_read,
672 .write = hsc_write,
673 .unlocked_ioctl = hsc_ioctl,
674 .open = hsc_open,
675 .release = hsc_release,
676};
677
678static void __devinit hsc_channel_init(struct hsc_channel *channel)
679{
680 init_waitqueue_head(&channel->rx_wait);
681 init_waitqueue_head(&channel->tx_wait);
682 spin_lock_init(&channel->lock);
683 INIT_LIST_HEAD(&channel->free_msgs_list);
684 INIT_LIST_HEAD(&channel->rx_msgs_queue);
685 INIT_LIST_HEAD(&channel->tx_msgs_queue);
686}
687
688static int __devinit hsc_probe(struct device *dev)
689{
690 const char devname[] = "hsi_char";
691 struct hsc_client_data *cl_data;
692 struct hsc_channel *channel;
693 struct hsi_client *cl = to_hsi_client(dev);
694 unsigned int hsc_baseminor;
695 dev_t hsc_dev;
696 int ret;
697 int i;
698
699 cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL);
700 if (!cl_data) {
701 dev_err(dev, "Could not allocate hsc_client_data\n");
702 return -ENOMEM;
703 }
704 hsc_baseminor = HSC_BASEMINOR(hsi_id(cl), hsi_port_id(cl));
705 if (!hsc_major) {
706 ret = alloc_chrdev_region(&hsc_dev, hsc_baseminor,
707 HSC_DEVS, devname);
708 if (ret > 0)
709 hsc_major = MAJOR(hsc_dev);
710 } else {
711 hsc_dev = MKDEV(hsc_major, hsc_baseminor);
712 ret = register_chrdev_region(hsc_dev, HSC_DEVS, devname);
713 }
714 if (ret < 0) {
715 dev_err(dev, "Device %s allocation failed %d\n",
716 hsc_major ? "minor" : "major", ret);
717 goto out1;
718 }
719 mutex_init(&cl_data->lock);
720 hsi_client_set_drvdata(cl, cl_data);
721 cdev_init(&cl_data->cdev, &hsc_fops);
722 cl_data->cdev.owner = THIS_MODULE;
723 cl_data->cl = cl;
724 for (i = 0, channel = cl_data->channels; i < HSC_DEVS; i++, channel++) {
725 hsc_channel_init(channel);
726 channel->ch = i;
727 channel->cl = cl;
728 channel->cl_data = cl_data;
729 }
730
731 /* 1 hsi client -> N char devices (one for each channel) */
732 ret = cdev_add(&cl_data->cdev, hsc_dev, HSC_DEVS);
733 if (ret) {
734 dev_err(dev, "Could not add char device %d\n", ret);
735 goto out2;
736 }
737
738 return 0;
739out2:
740 unregister_chrdev_region(hsc_dev, HSC_DEVS);
741out1:
742 kfree(cl_data);
743
744 return ret;
745}
746
747static int __devexit hsc_remove(struct device *dev)
748{
749 struct hsi_client *cl = to_hsi_client(dev);
750 struct hsc_client_data *cl_data = hsi_client_drvdata(cl);
751 dev_t hsc_dev = cl_data->cdev.dev;
752
753 cdev_del(&cl_data->cdev);
754 unregister_chrdev_region(hsc_dev, HSC_DEVS);
755 hsi_client_set_drvdata(cl, NULL);
756 kfree(cl_data);
757
758 return 0;
759}
760
761static struct hsi_client_driver hsc_driver = {
762 .driver = {
763 .name = "hsi_char",
764 .owner = THIS_MODULE,
765 .probe = hsc_probe,
766 .remove = __devexit_p(hsc_remove),
767 },
768};
769
770static int __init hsc_init(void)
771{
772 int ret;
773
774 if ((max_data_size < 4) || (max_data_size > 0x10000) ||
775 (max_data_size & (max_data_size - 1))) {
776 pr_err("Invalid max read/write data size");
777 return -EINVAL;
778 }
779
780 ret = hsi_register_client_driver(&hsc_driver);
781 if (ret) {
782 pr_err("Error while registering HSI/SSI driver %d", ret);
783 return ret;
784 }
785
786 pr_info("HSI/SSI char device loaded\n");
787
788 return 0;
789}
790module_init(hsc_init);
791
792static void __exit hsc_exit(void)
793{
794 hsi_unregister_client_driver(&hsc_driver);
795 pr_info("HSI char device removed\n");
796}
797module_exit(hsc_exit);
798
799MODULE_AUTHOR("Andras Domokos <andras.domokos@nokia.com>");
800MODULE_ALIAS("hsi:hsi_char");
801MODULE_DESCRIPTION("HSI character device");
802MODULE_LICENSE("GPL v2");