blob: dbbf4b03e2588f8eeef273c1f4979f85916cd5a4 [file] [log] [blame]
Aparna Das29924c12013-06-07 17:31:51 -07001/*
2 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/types.h>
16#include <linux/cdev.h>
17#include <linux/gfp.h>
18#include <linux/device.h>
19#include <linux/fs.h>
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/completion.h>
23#include <linux/of_gpio.h>
24#include <linux/mutex.h>
25#include <mach/msm_smsm.h>
26#include <linux/uaccess.h>
27#include <asm/system.h>
28
29#define SMP2P_NUM_PROCS 8
30
31#define SM_VERSION 1
32#define SM_BLOCKSIZE 128
33
34#define SMQ_MAGIC_INIT 0xFF00FF00
35#define SMQ_MAGIC_PRODUCER (SMQ_MAGIC_INIT | 0x1)
36#define SMQ_MAGIC_CONSUMER (SMQ_MAGIC_INIT | 0x2)
37
38enum SMQ_STATUS {
39 SMQ_SUCCESS = 0,
40 SMQ_ENOMEMORY = -1,
41 SMQ_EBADPARM = -2,
42 SMQ_UNDERFLOW = -3,
43 SMQ_OVERFLOW = -4
44};
45
46enum smq_type {
47 PRODUCER = 1,
48 CONSUMER = 2,
49 INVALID = 3
50};
51
52struct smq_block_map {
53 uint32_t index_read;
54 uint32_t num_blocks;
55 uint8_t *map;
56};
57
58struct smq_node {
59 uint16_t index_block;
60 uint16_t num_blocks;
61} __attribute__ ((__packed__));
62
63struct smq_hdr {
64 uint8_t producer_version;
65 uint8_t consumer_version;
66} __attribute__ ((__packed__));
67
68struct smq_out_state {
69 uint32_t init;
70 uint32_t index_check_queue_for_reset;
71 uint32_t index_sent_write;
72 uint32_t index_free_read;
73} __attribute__ ((__packed__));
74
75struct smq_out {
76 struct smq_out_state s;
77 struct smq_node sent[1];
78};
79
80struct smq_in_state {
81 uint32_t init;
82 uint32_t index_check_queue_for_reset_ack;
83 uint32_t index_sent_read;
84 uint32_t index_free_write;
85} __attribute__ ((__packed__));
86
87struct smq_in {
88 struct smq_in_state s;
89 struct smq_node free[1];
90};
91
92struct smq {
93 struct smq_hdr *hdr;
94 struct smq_out *out;
95 struct smq_in *in;
96 uint8_t *blocks;
97 uint32_t num_blocks;
98 struct mutex *lock;
99 uint32_t initialized;
100 struct smq_block_map block_map;
101 enum smq_type type;
102};
103
104struct gpio_info {
105 int gpio_base_id;
106 int irq_base_id;
107};
108
109struct rdbg_data {
110 struct device *device;
111 struct completion work;
112 struct gpio_info in;
113 struct gpio_info out;
114 bool device_initialized;
115 int gpio_out_offset;
116 bool device_opened;
117 void *smem_addr;
118 size_t smem_size;
119 struct smq producer_smrb;
120 struct smq consumer_smrb;
121 struct mutex write_mutex;
122};
123
124struct rdbg_device {
125 struct cdev cdev;
126 struct class *class;
127 dev_t dev_no;
128 int num_devices;
129 struct rdbg_data *rdbg_data;
130};
131
132static struct rdbg_device g_rdbg_instance = {
133 { {0} },
134 NULL,
135 0,
136 SMP2P_NUM_PROCS,
137 NULL
138};
139
140struct processor_specific_info {
141 char *name;
142 unsigned int smem_buffer_addr;
143 size_t smem_buffer_size;
144};
145
146static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = {
147 {0}, /*APPS*/
148 {"rdbg_modem", 0, 0}, /*MODEM*/
149 {"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/
150 {0}, /*SMP2P_RESERVED_PROC_1*/
151 {"rdbg_wcnss", 0, 0}, /*WCNSS*/
152 {0}, /*SMP2P_RESERVED_PROC_2*/
153 {0}, /*SMP2P_POWER_PROC*/
154 {0} /*SMP2P_REMOTE_MOCK_PROC*/
155};
156
157static int smq_blockmap_get(struct smq_block_map *block_map,
158 uint32_t *block_index, uint32_t n)
159{
160 uint32_t start;
161 uint32_t mark = 0;
162 uint32_t found = 0;
163 uint32_t i = 0;
164
165 start = block_map->index_read;
166
167 if (n == 1) {
168 do {
169 if (!block_map->map[block_map->index_read]) {
170 *block_index = block_map->index_read;
171 block_map->map[block_map->index_read] = 1;
172 block_map->index_read++;
173 block_map->index_read %= block_map->num_blocks;
174 return SMQ_SUCCESS;
175 }
176 block_map->index_read++;
177 } while (start != (block_map->index_read %=
178 block_map->num_blocks));
179 } else {
180 mark = block_map->num_blocks;
181
182 do {
183 if (!block_map->map[block_map->index_read]) {
184 if (mark > block_map->index_read) {
185 mark = block_map->index_read;
186 start = block_map->index_read;
187 found = 0;
188 }
189
190 found++;
191 if (found == n) {
192 *block_index = mark;
193 for (i = 0; i < n; i++)
194 block_map->map[mark + i] =
195 (uint8_t)(n - i);
196 block_map->index_read += block_map->map
197 [block_map->index_read] - 1;
198 return SMQ_SUCCESS;
199 }
200 } else {
201 found = 0;
202 block_map->index_read += block_map->map
203 [block_map->index_read] - 1;
204 mark = block_map->num_blocks;
205 }
206 block_map->index_read++;
207 } while (start != (block_map->index_read %=
208 block_map->num_blocks));
209 }
210
211 return SMQ_ENOMEMORY;
212}
213
214static void smq_blockmap_put(struct smq_block_map *block_map, uint32_t i)
215{
216 uint32_t num_blocks = block_map->map[i];
217
218 while (num_blocks--) {
219 block_map->map[i] = 0;
220 i++;
221 }
222}
223
224static int smq_blockmap_reset(struct smq_block_map *block_map)
225{
226 if (!block_map->map)
227 return SMQ_ENOMEMORY;
228 memset(block_map->map, 0 , block_map->num_blocks + 1);
229 block_map->index_read = 0;
230
231 return SMQ_SUCCESS;
232}
233
234static int smq_blockmap_ctor(struct smq_block_map *block_map,
235 uint32_t num_blocks)
236{
237 if (num_blocks <= 1)
238 return SMQ_ENOMEMORY;
239
240 block_map->map = kcalloc(num_blocks, sizeof(uint8_t), GFP_KERNEL);
241 if (!block_map->map)
242 return SMQ_ENOMEMORY;
243
244 block_map->num_blocks = num_blocks - 1;
245 smq_blockmap_reset(block_map);
246
247 return SMQ_SUCCESS;
248}
249
250static void smq_blockmap_dtor(struct smq_block_map *block_map)
251{
252 kfree(block_map->map);
253 block_map->map = NULL;
254}
255
256static int smq_free(struct smq *smq, void *data)
257{
258 struct smq_node node;
259 uint32_t index_block;
260 int err = SMQ_SUCCESS;
261
262 if (smq->lock)
263 mutex_lock(smq->lock);
264
265 if ((SM_VERSION != smq->hdr->producer_version) &&
266 (SMQ_MAGIC_PRODUCER != smq->out->s.init)) {
267 err = SMQ_UNDERFLOW;
268 goto bail;
269 }
270
271 index_block = ((uint8_t *)data - smq->blocks) / SM_BLOCKSIZE;
272 if (index_block >= smq->num_blocks) {
273 err = SMQ_EBADPARM;
274 goto bail;
275 }
276
277 node.index_block = (uint16_t)index_block;
278 node.num_blocks = 0;
279 *((struct smq_node *)(smq->in->free + smq->in->
280 s.index_free_write)) = node;
281
282 smq->in->s.index_free_write = (smq->in->s.index_free_write + 1)
283 % smq->num_blocks;
284
285bail:
286 if (smq->lock)
287 mutex_unlock(smq->lock);
288 return err;
289}
290
291static int smq_receive(struct smq *smq, void **pp, int *pnsize, int *pbmore)
292{
293 struct smq_node *node;
294 int err = SMQ_SUCCESS;
295 int more = 0;
296
297 if ((SM_VERSION != smq->hdr->producer_version) &&
298 (SMQ_MAGIC_PRODUCER != smq->out->s.init))
299 return SMQ_UNDERFLOW;
300
301 if (smq->in->s.index_sent_read == smq->out->s.index_sent_write) {
302 err = SMQ_UNDERFLOW;
303 goto bail;
304 }
305
306 node = (struct smq_node *)(smq->out->sent + smq->in->s.index_sent_read);
307 if (node->index_block >= smq->num_blocks) {
308 err = SMQ_EBADPARM;
309 goto bail;
310 }
311
312 smq->in->s.index_sent_read = (smq->in->s.index_sent_read + 1)
313 % smq->num_blocks;
314
315 *pp = smq->blocks + (node->index_block * SM_BLOCKSIZE);
316 *pnsize = SM_BLOCKSIZE * node->num_blocks;
317 rmb();
318 if (smq->in->s.index_sent_read != smq->out->s.index_sent_write)
319 more = 1;
320
321bail:
322 *pbmore = more;
323 return err;
324}
325
326static int smq_alloc_send(struct smq *smq, const uint8_t *pcb, int nsize)
327{
328 void *pv = 0;
329 int num_blocks;
330 uint32_t index_block = 0;
331 int err = SMQ_SUCCESS;
332 struct smq_node *node = NULL;
333
334 mutex_lock(smq->lock);
335
336 if ((SMQ_MAGIC_CONSUMER == smq->in->s.init) &&
337 (SM_VERSION == smq->hdr->consumer_version)) {
338 if (smq->out->s.index_check_queue_for_reset ==
339 smq->in->s.index_check_queue_for_reset_ack) {
340 while (smq->out->s.index_free_read !=
341 smq->in->s.index_free_write) {
342 node = (struct smq_node *)(
343 smq->in->free +
344 smq->out->s.index_free_read);
345 if (node->index_block >= smq->num_blocks) {
346 err = SMQ_EBADPARM;
347 goto bail;
348 }
349
350 smq->out->s.index_free_read =
351 (smq->out->s.index_free_read + 1)
352 % smq->num_blocks;
353
354 smq_blockmap_put(&smq->block_map,
355 node->index_block);
356 rmb();
357 }
358 }
359 }
360
361 num_blocks = ALIGN(nsize, SM_BLOCKSIZE)/SM_BLOCKSIZE;
362 err = smq_blockmap_get(&smq->block_map, &index_block, num_blocks);
363 if (SMQ_SUCCESS != err)
364 goto bail;
365
366 pv = smq->blocks + (SM_BLOCKSIZE * index_block);
367
368 err = copy_from_user((void *)pv, (void *)pcb, nsize);
369 if (0 != err)
370 goto bail;
371
372 ((struct smq_node *)(smq->out->sent +
373 smq->out->s.index_sent_write))->index_block
374 = (uint16_t)index_block;
375 ((struct smq_node *)(smq->out->sent +
376 smq->out->s.index_sent_write))->num_blocks
377 = (uint16_t)num_blocks;
378
379 smq->out->s.index_sent_write = (smq->out->s.index_sent_write + 1)
380 % smq->num_blocks;
381
382bail:
383 if (SMQ_SUCCESS != err) {
384 if (pv)
385 smq_blockmap_put(&smq->block_map, index_block);
386 }
387 mutex_unlock(smq->lock);
388 return err;
389}
390
391static int smq_reset_producer_queue_internal(struct smq *smq,
392 uint32_t reset_num)
393{
394 int retval = 0;
395 uint32_t i;
396
397 if (PRODUCER != smq->type)
398 goto bail;
399
400 mutex_lock(smq->lock);
401 if (smq->out->s.index_check_queue_for_reset != reset_num) {
402 smq->out->s.index_check_queue_for_reset = reset_num;
403 for (i = 0; i < smq->num_blocks; i++)
404 (smq->out->sent + i)->index_block = 0xFFFF;
405
406 smq_blockmap_reset(&smq->block_map);
407 smq->out->s.index_sent_write = 0;
408 smq->out->s.index_free_read = 0;
409 retval = 1;
410 }
411 mutex_unlock(smq->lock);
412
413bail:
414 return retval;
415}
416
417static int smq_check_queue_reset(struct smq *p_cons, struct smq *p_prod)
418{
419 int retval = 0;
420 uint32_t reset_num, i;
421
422 if ((CONSUMER != p_cons->type) ||
423 (SMQ_MAGIC_PRODUCER != p_cons->out->s.init) ||
424 (SM_VERSION != p_cons->hdr->producer_version))
425 goto bail;
426
427 reset_num = p_cons->out->s.index_check_queue_for_reset;
428 if (p_cons->in->s.index_check_queue_for_reset_ack != reset_num) {
429 p_cons->in->s.index_check_queue_for_reset_ack = reset_num;
430 for (i = 0; i < p_cons->num_blocks; i++)
431 (p_cons->in->free + i)->index_block = 0xFFFF;
432
433 p_cons->in->s.index_sent_read = 0;
434 p_cons->in->s.index_free_write = 0;
435
436 retval = smq_reset_producer_queue_internal(p_prod, reset_num);
437 }
438
439bail:
440 return retval;
441}
442
443static int check_subsystem_debug_enabled(void *base_addr, int size)
444{
445 int num_blocks;
446 uint8_t *pb_orig;
447 uint8_t *pb;
448 struct smq smq;
449 int err = 0;
450
451 pb = pb_orig = (uint8_t *)base_addr;
452 pb += sizeof(struct smq_hdr);
453 pb = PTR_ALIGN(pb, 8);
454 size -= pb - (uint8_t *)pb_orig;
455 num_blocks = (int)((size - sizeof(struct smq_out_state) -
456 sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
457 sizeof(struct smq_node) * 2));
458 if (0 >= num_blocks) {
459 err = SMQ_EBADPARM;
460 goto bail;
461 }
462
463 pb += num_blocks * SM_BLOCKSIZE;
464 smq.out = (struct smq_out *)pb;
465 pb += sizeof(struct smq_out_state) + (num_blocks *
466 sizeof(struct smq_node));
467 smq.in = (struct smq_in *)pb;
468
469 if (SMQ_MAGIC_CONSUMER != smq.in->s.init) {
470 pr_err("%s, smq in consumer not initialized", __func__);
471 err = -ECOMM;
472 }
473
474bail:
475 return err;
476}
477
478static void smq_dtor(struct smq *smq)
479{
480 if (SMQ_MAGIC_INIT == smq->initialized) {
481 switch (smq->type) {
482 case PRODUCER:
483 smq->out->s.init = 0;
484 smq_blockmap_dtor(&smq->block_map);
485 break;
486 case CONSUMER:
487 smq->in->s.init = 0;
488 break;
489 default:
490 case INVALID:
491 break;
492 }
493
494 smq->initialized = 0;
495 }
496}
497
498/*
499 * The shared memory is used as a circular ring buffer in each direction.
500 * Thus we have a bi-directional shared memory channel between the AP
501 * and a subsystem. We call this SMQ. Each memory channel contains a header,
502 * data and a control mechanism that is used to synchronize read and write
503 * of data between the AP and the remote subsystem.
504 *
505 * Overall SMQ memory view:
506 *
507 * +------------------------------------------------+
508 * | SMEM buffer |
509 * |-----------------------+------------------------|
510 * |Producer: LA | Producer: Remote |
511 * |Consumer: Remote | subsystem |
512 * | subsystem | Consumer: LA |
513 * | | |
514 * | Producer| Consumer|
515 * +-----------------------+------------------------+
516 * | |
517 * | |
518 * | +--------------------------------------+
519 * | |
520 * | |
521 * v v
522 * +--------------------------------------------------------------+
523 * | Header | Data | Control |
524 * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
525 * | | b | b | b | | S |n |n | | S |n |n | |
526 * | Producer | l | l | l | | M |o |o | | M |o |o | |
527 * | Ver | o | o | o | | Q |d |d | | Q |d |d | |
528 * |-----------| c | c | c | ... | |e |e | ... | |e |e | ... |
529 * | | k | k | k | | O | | | | I | | | |
530 * | Consumer | | | | | u |0 |1 | | n |0 |1 | |
531 * | Ver | 0 | 1 | 2 | | t | | | | | | | |
532 * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
533 * | |
534 * + |
535 * |
536 * +------------------------+
537 * |
538 * v
539 * +----+----+----+----+
540 * | SMQ Nodes |
541 * |----|----|----|----|
542 * Node # | 0 | 1 | 2 | ...|
543 * |----|----|----|----|
544 * Starting Block Index # | 0 | 3 | 8 | ...|
545 * |----|----|----|----|
546 * # of blocks | 3 | 5 | 1 | ...|
547 * +----+----+----+----+
548 *
549 * Header: Contains version numbers for software compatibility to ensure
550 * that both producers and consumers on the AP and subsystems know how to
551 * read from and write to the queue.
552 * Both the producer and consumer versions are 1.
553 * +---------+-------------------+
554 * | Size | Field |
555 * +---------+-------------------+
556 * | 1 byte | Producer Version |
557 * +---------+-------------------+
558 * | 1 byte | Consumer Version |
559 * +---------+-------------------+
560 *
561 * Data: The data portion contains multiple blocks [0..N] of a fixed size.
562 * The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1.
563 * Payload sent from the debug agent app is split (if necessary) and placed
564 * in these blocks. The first data block is placed at the next 8 byte aligned
565 * address after the header.
566 *
567 * The number of blocks for a given SMEM allocation is derived as follows:
568 * Number of Blocks = ((Total Size - Alignment - Size of Header
569 * - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE))
570 *
571 * The producer maintains a private block map of each of these blocks to
572 * determine which of these blocks in the queue is available and which are free.
573 *
574 * Control:
575 * The control portion contains a list of nodes [0..N] where N is number
576 * of available data blocks. Each node identifies the data
577 * block indexes that contain a particular debug message to be transfered,
578 * and the number of blocks it took to hold the contents of the message.
579 *
580 * Each node has the following structure:
581 * +---------+-------------------+
582 * | Size | Field |
583 * +---------+-------------------+
584 * | 2 bytes |Staring Block Index|
585 * +---------+-------------------+
586 * | 2 bytes |Number of Blocks |
587 * +---------+-------------------+
588 *
589 * The producer and the consumer update different parts of the control channel
590 * (SMQOut / SMQIn) respectively. Each of these control data structures contains
591 * information about the last node that was written / read, and the actual nodes
592 * that were written/read.
593 *
594 * SMQOut Structure (R/W by producer, R by consumer):
595 * +---------+-------------------+
596 * | Size | Field |
597 * +---------+-------------------+
598 * | 4 bytes | Magic Init Number |
599 * +---------+-------------------+
600 * | 4 bytes | Reset |
601 * +---------+-------------------+
602 * | 4 bytes | Last Sent Index |
603 * +---------+-------------------+
604 * | 4 bytes | Index Free Read |
605 * +---------+-------------------+
606 *
607 * SMQIn Structure (R/W by consumer, R by producer):
608 * +---------+-------------------+
609 * | Size | Field |
610 * +---------+-------------------+
611 * | 4 bytes | Magic Init Number |
612 * +---------+-------------------+
613 * | 4 bytes | Reset ACK |
614 * +---------+-------------------+
615 * | 4 bytes | Last Read Index |
616 * +---------+-------------------+
617 * | 4 bytes | Index Free Write |
618 * +---------+-------------------+
619 *
620 * Magic Init Number:
621 * Both SMQ Out and SMQ In initialize this field with a predefined magic
622 * number so as to make sure that both the consumer and producer blocks
623 * have fully initialized and have valid data in the shared memory control area.
624 * Producer Magic #: 0xFF00FF01
625 * Consumer Magic #: 0xFF00FF02
626 */
627static int smq_ctor(struct smq *smq, void *base_addr, int size,
628 enum smq_type type, struct mutex *lock_ptr)
629{
630 int num_blocks;
631 uint8_t *pb_orig;
632 uint8_t *pb;
633 uint32_t i;
634 int err;
635
636 if (SMQ_MAGIC_INIT == smq->initialized) {
637 err = SMQ_EBADPARM;
638 goto bail;
639 }
640
641 if (!base_addr || !size) {
642 err = SMQ_EBADPARM;
643 goto bail;
644 }
645
646 if (type == PRODUCER)
647 smq->lock = lock_ptr;
648
649 pb_orig = (uint8_t *)base_addr;
650 smq->hdr = (struct smq_hdr *)pb_orig;
651 pb = pb_orig;
652 pb += sizeof(struct smq_hdr);
653 pb = PTR_ALIGN(pb, 8);
654 size -= pb - (uint8_t *)pb_orig;
655 num_blocks = (int)((size - sizeof(struct smq_out_state) -
656 sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
657 sizeof(struct smq_node) * 2));
658 if (0 >= num_blocks) {
659 err = SMQ_ENOMEMORY;
660 goto bail;
661 }
662
663 smq->blocks = pb;
664 smq->num_blocks = num_blocks;
665 pb += num_blocks * SM_BLOCKSIZE;
666 smq->out = (struct smq_out *)pb;
667 pb += sizeof(struct smq_out_state) + (num_blocks *
668 sizeof(struct smq_node));
669 smq->in = (struct smq_in *)pb;
670 smq->type = type;
671 if (PRODUCER == type) {
672 smq->hdr->producer_version = SM_VERSION;
673 for (i = 0; i < smq->num_blocks; i++)
674 (smq->out->sent + i)->index_block = 0xFFFF;
675
676 err = smq_blockmap_ctor(&smq->block_map, smq->num_blocks);
677 if (SMQ_SUCCESS != err)
678 goto bail;
679
680 smq->out->s.index_sent_write = 0;
681 smq->out->s.index_free_read = 0;
682 if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
683 smq->out->s.index_check_queue_for_reset += 1;
684 } else {
685 smq->out->s.index_check_queue_for_reset = 1;
686 smq->out->s.init = SMQ_MAGIC_PRODUCER;
687 }
688 } else {
689 smq->hdr->consumer_version = SM_VERSION;
690 for (i = 0; i < smq->num_blocks; i++)
691 (smq->in->free + i)->index_block = 0xFFFF;
692
693 smq->in->s.index_sent_read = 0;
694 smq->in->s.index_free_write = 0;
695 if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
696 smq->in->s.index_check_queue_for_reset_ack =
697 smq->out->s.index_check_queue_for_reset;
698 } else {
699 smq->in->s.index_check_queue_for_reset_ack = 0;
700 }
701
702 smq->in->s.init = SMQ_MAGIC_CONSUMER;
703 }
704 smq->initialized = SMQ_MAGIC_INIT;
705 err = SMQ_SUCCESS;
706
707bail:
708 return err;
709}
710
711static void send_interrupt_to_subsystem(struct rdbg_data *rdbgdata)
712{
713 int offset = rdbgdata->gpio_out_offset;
714 int val = 1 ^ gpio_get_value(rdbgdata->out.gpio_base_id + offset);
715 gpio_set_value(rdbgdata->out.gpio_base_id + offset, val);
716 rdbgdata->gpio_out_offset = (offset + 1) % 32;
717
718 dev_dbg(rdbgdata->device, "%s: sent interrupt %d to subsystem",
719 __func__, val);
720}
721
722static irqreturn_t on_interrupt_from(int irq, void *ptr)
723{
724 struct rdbg_data *rdbgdata = (struct rdbg_data *) ptr;
725
726 dev_dbg(rdbgdata->device, "%s: Received interrupt %d from subsystem",
727 __func__, irq);
728
729 complete(&(rdbgdata->work));
730 return IRQ_HANDLED;
731}
732
733static int initialize_smq(struct rdbg_data *rdbgdata)
734{
735 int err = 0;
736
737 if (smq_ctor(&(rdbgdata->producer_smrb), (void *)(rdbgdata->smem_addr),
738 ((rdbgdata->smem_size)/2), PRODUCER, &rdbgdata->write_mutex)) {
739 dev_err(rdbgdata->device, "%s: smq producer allocation failed",
740 __func__);
741 err = -ENOMEM;
742 goto bail;
743 }
744
745 if (smq_ctor(&(rdbgdata->consumer_smrb), (void *)((uint32_t)
746 (rdbgdata->smem_addr) + ((rdbgdata->smem_size)/2)),
747 ((rdbgdata->smem_size)/2), CONSUMER, NULL)) {
748 dev_err(rdbgdata->device, "%s: smq conmsumer allocation failed",
749 __func__);
750 err = -ENOMEM;
751 }
752
753bail:
754 return err;
755
756}
757
758static int rdbg_open(struct inode *inode, struct file *filp)
759{
760 int device_id = -1;
761 struct rdbg_device *device = &g_rdbg_instance;
762 struct rdbg_data *rdbgdata = NULL;
763 int err = 0;
764
765 if (!inode || !device->rdbg_data) {
766 pr_err("Memory not allocated yet");
767 err = -ENODEV;
768 goto bail;
769 }
770
771 device_id = MINOR(inode->i_rdev);
772 rdbgdata = &device->rdbg_data[device_id];
773
774 if (rdbgdata->device_opened) {
775 dev_err(rdbgdata->device, "%s: Device already opened",
776 __func__);
777 err = -EEXIST;
778 goto bail;
779 }
780
781 rdbgdata->smem_size = proc_info[device_id].smem_buffer_size;
782 if (!rdbgdata->smem_size) {
783 dev_err(rdbgdata->device, "%s: smem not initialized", __func__);
784 err = -ENOMEM;
785 goto bail;
786 }
787
788 rdbgdata->smem_addr = smem_alloc(proc_info[device_id].smem_buffer_addr,
789 rdbgdata->smem_size);
790 if (!rdbgdata->smem_addr) {
791 dev_err(rdbgdata->device, "%s: Could not allocate smem memory",
792 __func__);
793 err = -ENOMEM;
794 goto bail;
795 }
796 dev_dbg(rdbgdata->device, "%s: SMEM address=0x%x smem_size=%d",
797 __func__, (unsigned int)rdbgdata->smem_addr,
798 rdbgdata->smem_size);
799
800 if (check_subsystem_debug_enabled(rdbgdata->smem_addr,
801 rdbgdata->smem_size/2)) {
802 dev_err(rdbgdata->device, "%s: Subsystem %s is not debug enabled",
803 __func__, proc_info[device_id].name);
804 err = -ECOMM;
805 goto bail;
806 }
807
808 init_completion(&rdbgdata->work);
809
810 err = request_irq(rdbgdata->in.irq_base_id, on_interrupt_from,
811 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
812 proc_info[device_id].name,
813 (void *)&device->rdbg_data[device_id]);
814 if (err) {
815 dev_err(rdbgdata->device,
816 "%s: Failed to register interrupt.Err=%d,irqid=%d.",
817 __func__, err, rdbgdata->in.irq_base_id);
818 goto irq_bail;
819 }
820
821 err = enable_irq_wake(rdbgdata->in.irq_base_id);
822 if (err < 0) {
823 dev_dbg(rdbgdata->device, "enable_irq_wake() failed with err=%d",
824 err);
825 err = 0;
826 }
827
828 mutex_init(&rdbgdata->write_mutex);
829
830 err = initialize_smq(rdbgdata);
831 if (err) {
832 dev_err(rdbgdata->device, "Error initializing smq. Err=%d",
833 err);
834 goto smq_bail;
835 }
836
837 rdbgdata->device_opened = 1;
838
839 filp->private_data = (void *)rdbgdata;
840
841 return 0;
842
843smq_bail:
844 smq_dtor(&(rdbgdata->producer_smrb));
845 smq_dtor(&(rdbgdata->consumer_smrb));
846 mutex_destroy(&rdbgdata->write_mutex);
847irq_bail:
848 free_irq(rdbgdata->in.irq_base_id, (void *)
849 &device->rdbg_data[device_id]);
850bail:
851 return err;
852}
853
854static int rdbg_release(struct inode *inode, struct file *filp)
855{
856 int device_id = -1;
857 struct rdbg_device *rdbgdevice = &g_rdbg_instance;
858 struct rdbg_data *rdbgdata = NULL;
859 int err = 0;
860
861 if (!inode || !rdbgdevice->rdbg_data) {
862 pr_err("Memory not allocated yet");
863 err = -ENODEV;
864 goto bail;
865 }
866
867 device_id = MINOR(inode->i_rdev);
868 rdbgdata = &rdbgdevice->rdbg_data[device_id];
869
870 if (rdbgdata->device_opened == 1) {
871 dev_dbg(rdbgdata->device, "%s: Destroying %s.", __func__,
872 proc_info[device_id].name);
873 rdbgdata->device_opened = 0;
874 complete(&(rdbgdata->work));
875 free_irq(rdbgdata->in.irq_base_id, (void *)
876 &rdbgdevice->rdbg_data[device_id]);
877 if (rdbgdevice->rdbg_data[device_id].producer_smrb.initialized)
878 smq_dtor(&(rdbgdevice->rdbg_data[device_id].
879 producer_smrb));
880 if (rdbgdevice->rdbg_data[device_id].consumer_smrb.initialized)
881 smq_dtor(&(rdbgdevice->rdbg_data[device_id].
882 consumer_smrb));
883 mutex_destroy(&rdbgdata->write_mutex);
884 }
885
886 filp->private_data = NULL;
887
888bail:
889 return err;
890}
891
892static ssize_t rdbg_read(struct file *filp, char __user *buf, size_t size,
893 loff_t *offset)
894{
895 int err = 0;
896 struct rdbg_data *rdbgdata = filp->private_data;
897 void *p_sent_buffer = NULL;
898 int nsize = 0;
899 int more = 0;
900
901 if (!rdbgdata) {
902 pr_err("Invalid argument");
903 err = -EINVAL;
904 goto bail;
905 }
906
907 dev_dbg(rdbgdata->device, "%s: In receive", __func__);
908 err = wait_for_completion_interruptible(&(rdbgdata->work));
909 if (err) {
910 dev_err(rdbgdata->device, "%s: Error in wait", __func__);
911 goto bail;
912 }
913
914 smq_check_queue_reset(&(rdbgdata->consumer_smrb),
915 &(rdbgdata->producer_smrb));
916 if (SMQ_SUCCESS != smq_receive(&(rdbgdata->consumer_smrb),
917 &p_sent_buffer, &nsize, &more)) {
918 dev_err(rdbgdata->device, "%s: Error in smq_recv(). Err code = %d",
919 __func__, err);
920 err = -ENODATA;
921 goto bail;
922 }
923
924 size = ((size < nsize) ? size : nsize);
925 err = copy_to_user(buf, p_sent_buffer, size);
926 if (err != 0) {
927 dev_err(rdbgdata->device, "%s: Error in copy_to_user(). Err code = %d",
928 __func__, err);
929 err = -ENODATA;
930 goto bail;
931 }
932
933 smq_free(&(rdbgdata->consumer_smrb), p_sent_buffer);
934 err = size;
935 dev_dbg(rdbgdata->device, "%s: Read data to buffer with address 0x%x",
936 __func__, (unsigned int) buf);
937
938bail:
939 dev_dbg(rdbgdata->device, "%s: Returning from receive", __func__);
940 return err;
941}
942
943static ssize_t rdbg_write(struct file *filp, const char __user *buf,
944 size_t size, loff_t *offset)
945{
946 int err = 0;
947 struct rdbg_data *rdbgdata = filp->private_data;
948
949 if (!rdbgdata) {
950 pr_err("Invalid argument");
951 err = -EINVAL;
952 goto bail;
953 }
954
955 if (smq_alloc_send(&(rdbgdata->producer_smrb), buf, size)) {
956 dev_err(rdbgdata->device, "%s, Error sending", __func__);
957 err = -ECOMM;
958 goto bail;
959 }
960 send_interrupt_to_subsystem(rdbgdata);
961
962 err = size;
963
964bail:
965 return err;
966}
967
968
969static const struct file_operations rdbg_fops = {
970 .open = rdbg_open,
971 .read = rdbg_read,
972 .write = rdbg_write,
973 .release = rdbg_release,
974};
975
976static int register_smp2p(char *node_name, struct gpio_info *gpio_info_ptr)
977{
978 struct device_node *node = NULL;
979 int cnt = 0;
980 int id = 0;
981
982 node = of_find_compatible_node(NULL, NULL, node_name);
983 if (node) {
984 cnt = of_gpio_count(node);
985 if (cnt && gpio_info_ptr) {
986 id = of_get_gpio(node, 0);
987 gpio_info_ptr->gpio_base_id = id;
988 gpio_info_ptr->irq_base_id = gpio_to_irq(id);
989 return 0;
990 }
991 }
992 return -EINVAL;
993}
994
995static int __init rdbg_init(void)
996{
997 int err = 0;
998 struct rdbg_device *rdbgdevice = &g_rdbg_instance;
999 int minor = 0;
1000 int major = 0;
1001 int minor_nodes_created = 0;
1002
1003 char *rdbg_compatible_string = "qcom,smp2pgpio_client_rdbg_";
1004 int max_len = strlen(rdbg_compatible_string) + strlen("xx_out");
1005
1006 char *node_name = kcalloc(max_len, sizeof(char), GFP_KERNEL);
1007
1008 if (!node_name) {
1009 pr_err("Not enough memory");
1010 err = -ENOMEM;
1011 goto bail;
1012 }
1013
1014 if (rdbgdevice->num_devices < 1 ||
1015 rdbgdevice->num_devices > SMP2P_NUM_PROCS) {
1016 pr_err("rgdb: invalid num_devices");
1017 err = -EDOM;
1018 goto name_bail;
1019 }
1020
1021 rdbgdevice->rdbg_data = kcalloc(rdbgdevice->num_devices,
1022 sizeof(struct rdbg_data), GFP_KERNEL);
1023 if (!rdbgdevice->rdbg_data) {
1024 pr_err("Not enough memory for rdbg devices");
1025 err = -ENOMEM;
1026 goto name_bail;
1027 }
1028
1029 err = alloc_chrdev_region(&rdbgdevice->dev_no, 0,
1030 rdbgdevice->num_devices, "rdbgctl");
1031 if (err) {
1032 pr_err("Error in alloc_chrdev_region.");
1033 goto data_bail;
1034 }
1035 major = MAJOR(rdbgdevice->dev_no);
1036
1037 cdev_init(&rdbgdevice->cdev, &rdbg_fops);
1038 rdbgdevice->cdev.owner = THIS_MODULE;
1039 err = cdev_add(&rdbgdevice->cdev, MKDEV(major, 0),
1040 rdbgdevice->num_devices);
1041 if (err) {
1042 pr_err("Error in cdev_add");
1043 goto chrdev_bail;
1044 }
1045
1046 rdbgdevice->class = class_create(THIS_MODULE, "rdbg");
1047 if (IS_ERR(rdbgdevice->class)) {
1048 err = PTR_ERR(rdbgdevice->class);
1049 pr_err("Error in class_create");
1050 goto cdev_bail;
1051 }
1052
1053 for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
1054 if (!proc_info[minor].name)
1055 continue;
1056
1057 if (snprintf(node_name, max_len, "%s%d_in",
1058 rdbg_compatible_string, minor) <= 0) {
1059 pr_err("Error in snprintf");
1060 err = -ENOMEM;
1061 goto device_bail;
1062 }
1063
1064 if (register_smp2p(node_name,
1065 &rdbgdevice->rdbg_data[minor].in)) {
1066 pr_debug("No incoming device tree entry found for %s",
1067 proc_info[minor].name);
1068 continue;
1069 }
1070
1071 if (snprintf(node_name, max_len, "%s%d_out",
1072 rdbg_compatible_string, minor) <= 0) {
1073 pr_err("Error in snprintf");
1074 err = -ENOMEM;
1075 goto device_bail;
1076 }
1077
1078 if (register_smp2p(node_name,
1079 &rdbgdevice->rdbg_data[minor].out)) {
1080 pr_err("No outgoing device tree entry found for %s",
1081 proc_info[minor].name);
1082 err = -EINVAL;
1083 goto device_bail;
1084 }
1085
1086 rdbgdevice->rdbg_data[minor].device = device_create(
1087 rdbgdevice->class, NULL, MKDEV(major, minor),
1088 NULL, "%s", proc_info[minor].name);
1089 if (IS_ERR(rdbgdevice->rdbg_data[minor].device)) {
1090 err = PTR_ERR(rdbgdevice->rdbg_data[minor].device);
1091 pr_err("Error in device_create");
1092 goto device_bail;
1093 }
1094 rdbgdevice->rdbg_data[minor].device_initialized = 1;
1095 minor_nodes_created++;
1096 dev_dbg(rdbgdevice->rdbg_data[minor].device,
1097 "%s: created /dev/%s c %d %d'", __func__,
1098 proc_info[minor].name, major, minor);
1099 }
1100
1101 if (!minor_nodes_created) {
1102 pr_err("No device tree entries found");
1103 err = -EINVAL;
1104 goto class_bail;
1105 }
1106
1107 goto name_bail;
1108
1109device_bail:
1110 for (--minor; minor >= 0; minor--) {
1111 if (rdbgdevice->rdbg_data[minor].device_initialized)
1112 device_destroy(rdbgdevice->class,
1113 MKDEV(MAJOR(rdbgdevice->dev_no), minor));
1114 }
1115class_bail:
1116 class_destroy(rdbgdevice->class);
1117cdev_bail:
1118 cdev_del(&rdbgdevice->cdev);
1119chrdev_bail:
1120 unregister_chrdev_region(rdbgdevice->dev_no, rdbgdevice->num_devices);
1121data_bail:
1122 kfree(rdbgdevice->rdbg_data);
1123name_bail:
1124 kfree(node_name);
1125bail:
1126 return err;
1127}
1128
1129static void __exit rdbg_exit(void)
1130{
1131 struct rdbg_device *rdbgdevice = &g_rdbg_instance;
1132 int minor;
1133
1134 for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
1135 if (rdbgdevice->rdbg_data[minor].device_initialized) {
1136 device_destroy(rdbgdevice->class,
1137 MKDEV(MAJOR(rdbgdevice->dev_no), minor));
1138 }
1139 }
1140 class_destroy(rdbgdevice->class);
1141 cdev_del(&rdbgdevice->cdev);
1142 unregister_chrdev_region(rdbgdevice->dev_no, 1);
1143 kfree(rdbgdevice->rdbg_data);
1144}
1145
1146module_init(rdbg_init);
1147module_exit(rdbg_exit);
1148
1149MODULE_DESCRIPTION("rdbg module");
1150MODULE_LICENSE("GPL v2");