blob: e868f92ce30c7ba6962c4d5de01a76c429d34947 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * dmxdev.c - DVB demultiplexer device
3 *
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004 * Copyright (C) 2000 Ralph Metzler & Marcus Metzler
5 * for convergence integrated media GmbH
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public License
9 * as published by the Free Software Foundation; either version 2.1
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 */
22
Alexey Dobriyana99bbaf2009-10-04 16:11:37 +040023#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/spinlock.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/poll.h>
29#include <linux/ioctl.h>
30#include <linux/wait.h>
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +053031#include <linux/uaccess.h>
32#include <linux/debugfs.h>
33#include <linux/seq_file.h>
34#include <linux/compat.h>
35#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "dmxdev.h"
37
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +053038static int overflow_auto_flush = 1;
39module_param(overflow_auto_flush, int, 0644);
40MODULE_PARM_DESC(overflow_auto_flush,
41 "Automatically flush buffer on overflow (default: on)");
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +053043#define DMX_DEFAULT_DECODER_BUFFER_SIZE (32768)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +053045static inline int dvb_dmxdev_verify_buffer_size(u32 size, u32 max_size,
46 u32 size_align)
47{
48 if (size_align)
49 return size <= max_size && !(size % size_align);
50 else
51 return size <= max_size;
52}
53
54static int dvb_filter_verify_buffer_size(struct dmxdev_filter *filter)
55{
56 struct dmx_caps caps;
57 size_t size = filter->buffer.size;
58
59 /*
60 * For backward compatibility, if no demux capabilities can
61 * be retrieved assume size is ok.
62 * Decoder filter buffer size is verified when decoder buffer is set.
63 */
64 if (filter->dev->demux->get_caps) {
65 filter->dev->demux->get_caps(filter->dev->demux, &caps);
66
67 if (filter->type == DMXDEV_TYPE_SEC)
68 return dvb_dmxdev_verify_buffer_size(
69 size,
70 caps.section.max_size,
71 caps.section.size_alignment);
72
73 if (filter->params.pes.output == DMX_OUT_TAP)
74 return dvb_dmxdev_verify_buffer_size(
75 size,
76 caps.pes.max_size,
77 caps.pes.size_alignment);
78
79 size = (filter->params.pes.output == DMX_OUT_TS_TAP) ?
80 filter->dev->dvr_buffer.size : size;
81
82 if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP ||
83 filter->params.pes.output == DMX_OUT_TS_TAP) {
84 if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
85 return dvb_dmxdev_verify_buffer_size(
86 size,
87 caps.recording_188_tsp.max_size,
88 caps.recording_188_tsp.size_alignment);
89
90 return dvb_dmxdev_verify_buffer_size(
91 size,
92 caps.recording_192_tsp.max_size,
93 caps.recording_192_tsp.size_alignment);
94 }
95 }
96
97 return 1;
98}
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Andreas Oberritter34731df2006-03-14 17:31:01 -0300100static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
101 const u8 *src, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102{
Andreas Oberritter34731df2006-03-14 17:31:01 -0300103 ssize_t free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105 if (!len)
106 return 0;
107 if (!buf->data)
108 return 0;
109
Andreas Oberritter34731df2006-03-14 17:31:01 -0300110 free = dvb_ringbuffer_free(buf);
111 if (len > free) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530112 pr_debug("dmxdev: buffer overflow\n");
Andreas Oberritter34731df2006-03-14 17:31:01 -0300113 return -EOVERFLOW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 }
Andreas Oberritter34731df2006-03-14 17:31:01 -0300115
116 return dvb_ringbuffer_write(buf, src, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117}
118
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530119static inline void dvb_dmxdev_notify_data_read(struct dmxdev_filter *filter,
120 int bytes_read)
121{
122 if (!filter)
123 return;
124
125 if (filter->type == DMXDEV_TYPE_SEC) {
126 if (filter->feed.sec.feed->notify_data_read)
127 filter->feed.sec.feed->notify_data_read(
128 filter->filter.sec,
129 bytes_read);
130 } else {
131 struct dmxdev_feed *feed;
132
133 /*
134 * All feeds of same demux-handle share the same output
135 * buffer, it is enough to notify on the buffer status
136 * on one of the feeds
137 */
138 feed = list_first_entry(&filter->feed.ts,
139 struct dmxdev_feed, next);
140
141 if (feed->ts->notify_data_read)
142 feed->ts->notify_data_read(
143 feed->ts,
144 bytes_read);
145 }
146}
147
148static inline u32 dvb_dmxdev_advance_event_idx(u32 index)
149{
150 index++;
151 if (index >= DMX_EVENT_QUEUE_SIZE)
152 index = 0;
153
154 return index;
155}
156
157static inline int dvb_dmxdev_events_is_full(struct dmxdev_events_queue *events)
158{
159 int new_write_index;
160
161 new_write_index = dvb_dmxdev_advance_event_idx(events->write_index);
162 if (new_write_index == events->read_index)
163 return 1;
164
165 return 0;
166
167}
168
169static inline void dvb_dmxdev_flush_events(struct dmxdev_events_queue *events)
170{
171 events->read_index = 0;
172 events->write_index = 0;
173 events->notified_index = 0;
174 events->bytes_read_no_event = 0;
175 events->current_event_data_size = 0;
176 events->wakeup_events_counter = 0;
177}
178
179static inline void dvb_dmxdev_flush_output(struct dvb_ringbuffer *buffer,
180 struct dmxdev_events_queue *events)
181{
182 dvb_dmxdev_flush_events(events);
183 dvb_ringbuffer_flush(buffer);
184}
185
186static int dvb_dmxdev_update_pes_event(struct dmx_filter_event *event,
187 int bytes_read)
188{
189 int start_delta;
190
191 if (event->params.pes.total_length <= bytes_read)
192 return event->params.pes.total_length;
193
194 /*
195 * only part of the data relevant to this event was read.
196 * Update the event's information to reflect the new state.
197 */
198 event->params.pes.total_length -= bytes_read;
199
200 start_delta = event->params.pes.start_offset -
201 event->params.pes.base_offset;
202
203 if (bytes_read <= start_delta) {
204 event->params.pes.base_offset +=
205 bytes_read;
206 } else {
207 start_delta =
208 bytes_read - start_delta;
209
210 event->params.pes.start_offset += start_delta;
211 event->params.pes.actual_length -= start_delta;
212
213 event->params.pes.base_offset =
214 event->params.pes.start_offset;
215 }
216
217 return 0;
218}
219
220static int dvb_dmxdev_update_section_event(struct dmx_filter_event *event,
221 int bytes_read)
222{
223 int start_delta;
224
225 if (event->params.section.total_length <= bytes_read)
226 return event->params.section.total_length;
227
228 /*
229 * only part of the data relevant to this event was read.
230 * Update the event's information to reflect the new state.
231 */
232
233 event->params.section.total_length -= bytes_read;
234
235 start_delta = event->params.section.start_offset -
236 event->params.section.base_offset;
237
238 if (bytes_read <= start_delta) {
239 event->params.section.base_offset +=
240 bytes_read;
241 } else {
242 start_delta =
243 bytes_read - start_delta;
244
245 event->params.section.start_offset += start_delta;
246 event->params.section.actual_length -= start_delta;
247
248 event->params.section.base_offset =
249 event->params.section.start_offset;
250 }
251
252 return 0;
253}
254
255static int dvb_dmxdev_update_rec_event(struct dmx_filter_event *event,
256 int bytes_read)
257{
258 if (event->params.recording_chunk.size <= bytes_read)
259 return event->params.recording_chunk.size;
260
261 /*
262 * only part of the data relevant to this event was read.
263 * Update the event's information to reflect the new state.
264 */
265 event->params.recording_chunk.size -= bytes_read;
266 event->params.recording_chunk.offset += bytes_read;
267
268 return 0;
269}
270
271static int dvb_dmxdev_add_event(struct dmxdev_events_queue *events,
272 struct dmx_filter_event *event)
273{
274 int res;
275 int new_write_index;
276 int data_event;
277
278 /* Check if the event is disabled */
279 if (events->event_mask.disable_mask & event->type)
280 return 0;
281
282 /* Check if we are adding an event that user already read its data */
283 if (events->bytes_read_no_event) {
284 data_event = 1;
285
286 if (event->type == DMX_EVENT_NEW_PES)
287 res = dvb_dmxdev_update_pes_event(event,
288 events->bytes_read_no_event);
289 else if (event->type == DMX_EVENT_NEW_SECTION)
290 res = dvb_dmxdev_update_section_event(event,
291 events->bytes_read_no_event);
292 else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
293 res = dvb_dmxdev_update_rec_event(event,
294 events->bytes_read_no_event);
295 else
296 data_event = 0;
297
298 if (data_event) {
299 if (res) {
300 /*
301 * Data relevant to this event was fully
302 * consumed already, discard event.
303 */
304 events->bytes_read_no_event -= res;
305 return 0;
306 }
307 events->bytes_read_no_event = 0;
308 } else {
309 /*
310 * data was read beyond the non-data event,
311 * making it not relevant anymore
312 */
313 return 0;
314 }
315 }
316
317 new_write_index = dvb_dmxdev_advance_event_idx(events->write_index);
318 if (new_write_index == events->read_index) {
319 pr_err("dmxdev: events overflow\n");
320 return -EOVERFLOW;
321 }
322
323 events->queue[events->write_index] = *event;
324 events->write_index = new_write_index;
325
326 if (!(events->event_mask.no_wakeup_mask & event->type))
327 events->wakeup_events_counter++;
328
329 return 0;
330}
331
332static int dvb_dmxdev_remove_event(struct dmxdev_events_queue *events,
333 struct dmx_filter_event *event)
334{
335 if (events->notified_index == events->write_index)
336 return -ENODATA;
337
338 *event = events->queue[events->notified_index];
339
340 events->notified_index =
341 dvb_dmxdev_advance_event_idx(events->notified_index);
342
343 if (!(events->event_mask.no_wakeup_mask & event->type))
344 events->wakeup_events_counter--;
345
346 return 0;
347}
348
349static int dvb_dmxdev_update_events(struct dmxdev_events_queue *events,
350 int bytes_read)
351{
352 struct dmx_filter_event *event;
353 int res;
354 int data_event;
355
356 /*
357 * If data events are not enabled on this filter,
358 * there's nothing to update.
359 */
360 if (events->data_read_event_masked)
361 return 0;
362
363 /*
364 * Go through all events that were notified and
365 * remove them from the events queue if their respective
366 * data was read.
367 */
368 while ((events->read_index != events->notified_index) &&
369 (bytes_read)) {
370 event = events->queue + events->read_index;
371
372 data_event = 1;
373
374 if (event->type == DMX_EVENT_NEW_PES)
375 res = dvb_dmxdev_update_pes_event(event, bytes_read);
376 else if (event->type == DMX_EVENT_NEW_SECTION)
377 res = dvb_dmxdev_update_section_event(event,
378 bytes_read);
379 else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
380 res = dvb_dmxdev_update_rec_event(event, bytes_read);
381 else
382 data_event = 0;
383
384 if (data_event) {
385 if (res) {
386 /*
387 * Data relevant to this event was
388 * fully consumed, remove it from the queue.
389 */
390 bytes_read -= res;
391 events->read_index =
392 dvb_dmxdev_advance_event_idx(
393 events->read_index);
394 } else {
395 bytes_read = 0;
396 }
397 } else {
398 /*
399 * non-data event was already notified,
400 * no need to keep it
401 */
402 events->read_index = dvb_dmxdev_advance_event_idx(
403 events->read_index);
404 }
405 }
406
407 if (!bytes_read)
408 return 0;
409
410 /*
411 * If we reached here it means:
412 * bytes_read != 0
413 * events->read_index == events->notified_index
414 * Check if there are pending events in the queue
415 * which the user didn't read while their relevant data
416 * was read.
417 */
418 while ((events->notified_index != events->write_index) &&
419 (bytes_read)) {
420 event = events->queue + events->notified_index;
421
422 data_event = 1;
423
424 if (event->type == DMX_EVENT_NEW_PES)
425 res = dvb_dmxdev_update_pes_event(event, bytes_read);
426 else if (event->type == DMX_EVENT_NEW_SECTION)
427 res = dvb_dmxdev_update_section_event(event,
428 bytes_read);
429 else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
430 res = dvb_dmxdev_update_rec_event(event, bytes_read);
431 else
432 data_event = 0;
433
434 if (data_event) {
435 if (res) {
436 /*
437 * Data relevant to this event was
438 * fully consumed, remove it from the queue.
439 */
440 bytes_read -= res;
441 events->notified_index =
442 dvb_dmxdev_advance_event_idx(
443 events->notified_index);
444 if (!(events->event_mask.no_wakeup_mask &
445 event->type))
446 events->wakeup_events_counter--;
447 } else {
448 bytes_read = 0;
449 }
450 } else {
451 if (bytes_read)
452 /*
453 * data was read beyond the non-data event,
454 * making it not relevant anymore
455 */
456 events->notified_index =
457 dvb_dmxdev_advance_event_idx(
458 events->notified_index);
459 if (!(events->event_mask.no_wakeup_mask &
460 event->type))
461 events->wakeup_events_counter--;
462 }
463
464 events->read_index = events->notified_index;
465 }
466
467 /*
468 * Check if data was read without having a respective
469 * event in the events-queue
470 */
471 if (bytes_read)
472 events->bytes_read_no_event += bytes_read;
473
474 return 0;
475}
476
477static inline int dvb_dmxdev_check_data(struct dmxdev_filter *filter,
478 struct dvb_ringbuffer *src)
479{
480 int data_status_change;
481
482 if (filter)
483 if (mutex_lock_interruptible(&filter->mutex))
484 return -ERESTARTSYS;
485
486 if (!src->data ||
487 !dvb_ringbuffer_empty(src) ||
488 src->error ||
489 (filter &&
490 (filter->state != DMXDEV_STATE_GO) &&
491 (filter->state != DMXDEV_STATE_DONE)))
492 data_status_change = 1;
493 else
494 data_status_change = 0;
495
496 if (filter)
497 mutex_unlock(&filter->mutex);
498
499 return data_status_change;
500}
501
502static ssize_t dvb_dmxdev_buffer_read(struct dmxdev_filter *filter,
503 struct dvb_ringbuffer *src,
504 int non_blocking, char __user *buf,
505 size_t count, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506{
Andreas Oberritter34731df2006-03-14 17:31:01 -0300507 size_t todo;
508 ssize_t avail;
509 ssize_t ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
511 if (!src->data)
512 return 0;
513
Andreas Oberritter34731df2006-03-14 17:31:01 -0300514 if (src->error) {
515 ret = src->error;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530516 src->error = 0;
Andreas Oberritter34731df2006-03-14 17:31:01 -0300517 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 }
519
Andreas Oberritter34731df2006-03-14 17:31:01 -0300520 for (todo = count; todo > 0; todo -= ret) {
521 if (non_blocking && dvb_ringbuffer_empty(src)) {
522 ret = -EWOULDBLOCK;
523 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 }
525
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530526 if (filter) {
527 if ((filter->state == DMXDEV_STATE_DONE) &&
528 dvb_ringbuffer_empty(src))
529 break;
530
531 mutex_unlock(&filter->mutex);
532 }
533
Andreas Oberritter34731df2006-03-14 17:31:01 -0300534 ret = wait_event_interruptible(src->queue,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530535 dvb_dmxdev_check_data(filter, src));
536
537 if (filter) {
538 if (mutex_lock_interruptible(&filter->mutex))
539 return -ERESTARTSYS;
540
541 if ((filter->state != DMXDEV_STATE_GO) &&
542 (filter->state != DMXDEV_STATE_DONE))
543 return -ENODEV;
544 }
545
Andreas Oberritter34731df2006-03-14 17:31:01 -0300546 if (ret < 0)
547 break;
548
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530549 if (!src->data)
550 return 0;
551
Andreas Oberritter34731df2006-03-14 17:31:01 -0300552 if (src->error) {
553 ret = src->error;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530554 src->error = 0;
Andreas Oberritter34731df2006-03-14 17:31:01 -0300555 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 }
Andreas Oberritter34731df2006-03-14 17:31:01 -0300557
558 avail = dvb_ringbuffer_avail(src);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300559 if (avail > todo)
560 avail = todo;
Andreas Oberritter34731df2006-03-14 17:31:01 -0300561
Al Virob0ba0e32008-06-22 14:20:29 -0300562 ret = dvb_ringbuffer_read_user(src, buf, avail);
Andreas Oberritter34731df2006-03-14 17:31:01 -0300563 if (ret < 0)
564 break;
565
566 buf += ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 }
Andreas Oberritter34731df2006-03-14 17:31:01 -0300568
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530569 if (count - todo) /* some data was read? */
570 wake_up_all(&src->queue);
571
Andreas Oberritter34731df2006-03-14 17:31:01 -0300572 return (count - todo) ? (count - todo) : ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573}
574
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300575static struct dmx_frontend *get_fe(struct dmx_demux *demux, int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576{
577 struct list_head *head, *pos;
578
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300579 head = demux->get_frontends(demux);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 if (!head)
581 return NULL;
582 list_for_each(pos, head)
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300583 if (DMX_FE_ENTRY(pos)->source == type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 return DMX_FE_ENTRY(pos);
585
586 return NULL;
587}
588
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530589static void dvb_dvr_oob_cmd(struct dmxdev *dmxdev, struct dmx_oob_command *cmd)
590{
591 int i;
592 struct dmxdev_filter *filter;
593 struct dmxdev_feed *feed;
594
595 for (i = 0; i < dmxdev->filternum; i++) {
596 filter = &dmxdev->filter[i];
597 if (!filter || filter->state != DMXDEV_STATE_GO)
598 continue;
599
600 switch (filter->type) {
601 case DMXDEV_TYPE_SEC:
602 filter->feed.sec.feed->oob_command(
603 filter->feed.sec.feed, cmd);
604 break;
605 case DMXDEV_TYPE_PES:
606 feed = list_first_entry(&filter->feed.ts,
607 struct dmxdev_feed, next);
608 feed->ts->oob_command(feed->ts, cmd);
609 break;
610 case DMXDEV_TYPE_NONE:
611 break;
612 default:
613 break;
614 }
615 }
616}
617
618static int dvb_dvr_feed_cmd(struct dmxdev *dmxdev, struct dvr_command *dvr_cmd)
619{
620 int ret = 0;
621 size_t todo;
622 int bytes_written = 0;
623 size_t split;
624 size_t tsp_size;
625 u8 *data_start;
626 struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
627
628 todo = dvr_cmd->cmd.data_feed_count;
629
630 if (dmxdev->demux->get_tsp_size)
631 tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
632 else
633 tsp_size = 188;
634
635 while (todo >= tsp_size) {
636 /* wait for input */
637 ret = wait_event_interruptible(
638 src->queue,
639 (dvb_ringbuffer_avail(src) >= tsp_size) ||
640 dmxdev->dvr_in_exit || src->error);
641
642 if (ret < 0)
643 break;
644
645 spin_lock(&dmxdev->dvr_in_lock);
646
647 if (dmxdev->exit || dmxdev->dvr_in_exit) {
648 spin_unlock(&dmxdev->dvr_in_lock);
649 ret = -ENODEV;
650 break;
651 }
652
653 if (src->error) {
654 spin_unlock(&dmxdev->dvr_in_lock);
655 wake_up_all(&src->queue);
656 ret = -EINVAL;
657 break;
658 }
659
660 dmxdev->dvr_processing_input = 1;
661
662 split = (src->pread + todo > src->size) ?
663 src->size - src->pread : 0;
664
665 /*
666 * In DVR PULL mode, write might block.
667 * Lock on DVR buffer is released before calling to
668 * write, if DVR was released meanwhile, dvr_in_exit is
669 * prompted. Lock is acquired when updating the read pointer
670 * again to preserve read/write pointers consistency.
671 *
672 * In protected input mode, DVR input buffer is not mapped
673 * to kernel memory. Underlying demux implementation
674 * should trigger HW to read from DVR input buffer
675 * based on current read offset.
676 */
677 if (split > 0) {
678 data_start = (dmxdev->demux->dvr_input_protected) ?
679 NULL : (src->data + src->pread);
680
681 spin_unlock(&dmxdev->dvr_in_lock);
682 ret = dmxdev->demux->write(dmxdev->demux,
683 data_start,
684 split);
685
686 if (ret < 0) {
687 pr_err("dmxdev: dvr write error %d\n", ret);
688 continue;
689 }
690
691 if (dmxdev->dvr_in_exit) {
692 ret = -ENODEV;
693 break;
694 }
695
696 spin_lock(&dmxdev->dvr_in_lock);
697
698 todo -= ret;
699 bytes_written += ret;
700 DVB_RINGBUFFER_SKIP(src, ret);
701 if (ret < split) {
702 dmxdev->dvr_processing_input = 0;
703 spin_unlock(&dmxdev->dvr_in_lock);
704 wake_up_all(&src->queue);
705 continue;
706 }
707 }
708
709 data_start = (dmxdev->demux->dvr_input_protected) ?
710 NULL : (src->data + src->pread);
711
712 spin_unlock(&dmxdev->dvr_in_lock);
713 ret = dmxdev->demux->write(dmxdev->demux,
714 data_start, todo);
715
716 if (ret < 0) {
717 pr_err("dmxdev: dvr write error %d\n", ret);
718 continue;
719 }
720
721 if (dmxdev->dvr_in_exit) {
722 ret = -ENODEV;
723 break;
724 }
725
726 spin_lock(&dmxdev->dvr_in_lock);
727
728 todo -= ret;
729 bytes_written += ret;
730 DVB_RINGBUFFER_SKIP(src, ret);
731 dmxdev->dvr_processing_input = 0;
732 spin_unlock(&dmxdev->dvr_in_lock);
733
734 wake_up_all(&src->queue);
735 }
736
737 if (ret < 0)
738 return ret;
739
740 return bytes_written;
741}
742
743static int dvr_input_thread_entry(void *arg)
744{
745 struct dmxdev *dmxdev = arg;
746 struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
747 struct dvr_command dvr_cmd;
748 int leftover = 0;
749 int ret;
750
751 while (1) {
752 /* wait for input */
753 ret = wait_event_interruptible(
754 cmdbuf->queue,
755 (!cmdbuf->data) ||
756 (dvb_ringbuffer_avail(cmdbuf) >= sizeof(dvr_cmd)) ||
757 (dmxdev->dvr_in_exit));
758
759 if (ret < 0)
760 break;
761
762 spin_lock(&dmxdev->dvr_in_lock);
763
764 if (!cmdbuf->data || dmxdev->exit || dmxdev->dvr_in_exit) {
765 spin_unlock(&dmxdev->dvr_in_lock);
766 break;
767 }
768
769 dvb_ringbuffer_read(cmdbuf, (u8 *)&dvr_cmd, sizeof(dvr_cmd));
770
771 spin_unlock(&dmxdev->dvr_in_lock);
772
773 if (dvr_cmd.type == DVR_DATA_FEED_CMD) {
774 dvr_cmd.cmd.data_feed_count += leftover;
775
776 ret = dvb_dvr_feed_cmd(dmxdev, &dvr_cmd);
777 if (ret < 0) {
778 pr_debug("%s: DVR data feed failed, ret=%d\n",
779 __func__, ret);
780 continue;
781 }
782
783 leftover = dvr_cmd.cmd.data_feed_count - ret;
784 } else {
785 /*
786 * For EOS, try to process leftover data in the input
787 * buffer.
788 */
789 if (dvr_cmd.cmd.oobcmd.type == DMX_OOB_CMD_EOS) {
790 struct dvr_command feed_cmd;
791
792 feed_cmd.type = DVR_DATA_FEED_CMD;
793 feed_cmd.cmd.data_feed_count =
794 dvb_ringbuffer_avail(
795 &dmxdev->dvr_input_buffer);
796 dvb_dvr_feed_cmd(dmxdev, &feed_cmd);
797 }
798
799 dvb_dvr_oob_cmd(dmxdev, &dvr_cmd.cmd.oobcmd);
800 }
801 }
802
803 set_current_state(TASK_INTERRUPTIBLE);
804 while (!kthread_should_stop()) {
805 schedule();
806 set_current_state(TASK_INTERRUPTIBLE);
807 }
808 set_current_state(TASK_RUNNING);
809
810 return 0;
811}
812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813static int dvb_dvr_open(struct inode *inode, struct file *file)
814{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -0700815 struct dvb_device *dvbdev = file->private_data;
816 struct dmxdev *dmxdev = dvbdev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 struct dmx_frontend *front;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530818 void *mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530820 pr_debug("function : %s(%X)\n", __func__, (file->f_flags & O_ACCMODE));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821
Ingo Molnar3593cab2006-02-07 06:49:14 -0200822 if (mutex_lock_interruptible(&dmxdev->mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 return -ERESTARTSYS;
824
Markus Rechberger57861b42007-04-14 10:19:18 -0300825 if (dmxdev->exit) {
826 mutex_unlock(&dmxdev->mutex);
827 return -ENODEV;
828 }
829
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300830 if ((file->f_flags & O_ACCMODE) == O_RDWR) {
831 if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) {
Ingo Molnar3593cab2006-02-07 06:49:14 -0200832 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 return -EOPNOTSUPP;
834 }
835 }
836
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300837 if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
Trent Piepho5e85bd02006-03-30 15:53:32 -0300838 if (!dvbdev->readers) {
839 mutex_unlock(&dmxdev->mutex);
840 return -EBUSY;
841 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530842 mem = vmalloc_user(DVR_BUFFER_SIZE);
Andreas Oberritter34731df2006-03-14 17:31:01 -0300843 if (!mem) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300844 mutex_unlock(&dmxdev->mutex);
845 return -ENOMEM;
846 }
Andreas Oberritter34731df2006-03-14 17:31:01 -0300847 dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530848 dvb_dmxdev_flush_events(&dmxdev->dvr_output_events);
849 dmxdev->dvr_output_events.event_mask.disable_mask = 0;
850 dmxdev->dvr_output_events.event_mask.no_wakeup_mask = 0;
851 dmxdev->dvr_output_events.event_mask.wakeup_threshold = 1;
852 dmxdev->dvr_feeds_count = 0;
853 dmxdev->dvr_buffer_mode = DMX_BUFFER_MODE_INTERNAL;
854 dmxdev->dvr_priv_buff_handle = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530856 dvbdev->readers--;
857 } else if (!dvbdev->writers) {
858 dmxdev->dvr_in_exit = 0;
859 dmxdev->dvr_processing_input = 0;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300860 dmxdev->dvr_orig_fe = dmxdev->demux->frontend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
862 if (!dmxdev->demux->write) {
Ingo Molnar3593cab2006-02-07 06:49:14 -0200863 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 return -EOPNOTSUPP;
865 }
866
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300867 front = get_fe(dmxdev->demux, DMX_MEMORY_FE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
869 if (!front) {
Ingo Molnar3593cab2006-02-07 06:49:14 -0200870 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 return -EINVAL;
872 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530873
874 mem = vmalloc_user(DVR_BUFFER_SIZE);
875 if (!mem) {
876 mutex_unlock(&dmxdev->mutex);
877 return -ENOMEM;
878 }
879
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 dmxdev->demux->disconnect_frontend(dmxdev->demux);
881 dmxdev->demux->connect_frontend(dmxdev->demux, front);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530882 dmxdev->dvr_input_buffer_mode = DMX_BUFFER_MODE_INTERNAL;
883
884 dvb_ringbuffer_init(&dmxdev->dvr_input_buffer,
885 mem,
886 DVR_BUFFER_SIZE);
887
888 dmxdev->demux->dvr_input.priv_handle = NULL;
889 dmxdev->demux->dvr_input.ringbuff = &dmxdev->dvr_input_buffer;
890 dmxdev->demux->dvr_input_protected = 0;
891 mem = vmalloc(DVR_CMDS_BUFFER_SIZE);
892 if (!mem) {
893 vfree(dmxdev->dvr_input_buffer.data);
894 dmxdev->dvr_input_buffer.data = NULL;
895 mutex_unlock(&dmxdev->mutex);
896 return -ENOMEM;
897 }
898 dvb_ringbuffer_init(&dmxdev->dvr_cmd_buffer, mem,
899 DVR_CMDS_BUFFER_SIZE);
900 dvbdev->writers--;
901
902 dmxdev->dvr_input_thread =
903 kthread_run(
904 dvr_input_thread_entry,
905 (void *)dmxdev,
906 "dvr_input");
907
908 if (IS_ERR(dmxdev->dvr_input_thread)) {
909 vfree(dmxdev->dvr_input_buffer.data);
910 vfree(dmxdev->dvr_cmd_buffer.data);
911 dmxdev->dvr_input_buffer.data = NULL;
912 dmxdev->dvr_cmd_buffer.data = NULL;
913 mutex_unlock(&dmxdev->mutex);
914 return -ENOMEM;
915 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530917
Markus Rechberger57861b42007-04-14 10:19:18 -0300918 dvbdev->users++;
Ingo Molnar3593cab2006-02-07 06:49:14 -0200919 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return 0;
921}
922
923static int dvb_dvr_release(struct inode *inode, struct file *file)
924{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -0700925 struct dvb_device *dvbdev = file->private_data;
926 struct dmxdev *dmxdev = dvbdev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
Simon Arlottc2788502007-03-10 06:21:25 -0300928 mutex_lock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300930 if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
Trent Piepho5e85bd02006-03-30 15:53:32 -0300931 dvbdev->readers++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 if (dmxdev->dvr_buffer.data) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300933 void *mem = dmxdev->dvr_buffer.data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 mb();
935 spin_lock_irq(&dmxdev->lock);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300936 dmxdev->dvr_buffer.data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 spin_unlock_irq(&dmxdev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530938 wake_up_all(&dmxdev->dvr_buffer.queue);
939
940 if (dmxdev->dvr_buffer_mode == DMX_BUFFER_MODE_INTERNAL)
941 vfree(mem);
942 }
943
944 if ((dmxdev->dvr_buffer_mode == DMX_BUFFER_MODE_EXTERNAL) &&
945 dmxdev->dvr_priv_buff_handle) {
946 dmxdev->demux->unmap_buffer(dmxdev->demux,
947 dmxdev->dvr_priv_buff_handle);
948 dmxdev->dvr_priv_buff_handle = NULL;
949 }
950 } else {
951 int i;
952
953 spin_lock(&dmxdev->dvr_in_lock);
954 dmxdev->dvr_in_exit = 1;
955 spin_unlock(&dmxdev->dvr_in_lock);
956
957 wake_up_all(&dmxdev->dvr_cmd_buffer.queue);
958
959 /*
960 * There might be dmx filters reading now from DVR
961 * device, in PULL mode, they might be also stalled
962 * on output, signal to them that DVR is exiting.
963 */
964 if (dmxdev->playback_mode == DMX_PB_MODE_PULL) {
965 wake_up_all(&dmxdev->dvr_buffer.queue);
966
967 for (i = 0; i < dmxdev->filternum; i++)
968 if (dmxdev->filter[i].state == DMXDEV_STATE_GO)
969 wake_up_all(
970 &dmxdev->filter[i].buffer.queue);
971 }
972
973 /* notify kernel demux that we are canceling */
974 if (dmxdev->demux->write_cancel)
975 dmxdev->demux->write_cancel(dmxdev->demux);
976
977 /*
978 * Now stop dvr-input thread so that no one
979 * would process data from dvr input buffer any more
980 * before it gets freed.
981 */
982 kthread_stop(dmxdev->dvr_input_thread);
983
984 dvbdev->writers++;
985 dmxdev->demux->disconnect_frontend(dmxdev->demux);
986 dmxdev->demux->connect_frontend(dmxdev->demux,
987 dmxdev->dvr_orig_fe);
988
989 if (dmxdev->dvr_input_buffer.data) {
990 void *mem = dmxdev->dvr_input_buffer.data;
991 /*
992 * Ensure all the operations on the DVR input buffer
993 * are completed before it gets freed.
994 */
995 mb();
996 spin_lock_irq(&dmxdev->dvr_in_lock);
997 dmxdev->dvr_input_buffer.data = NULL;
998 spin_unlock_irq(&dmxdev->dvr_in_lock);
999
1000 if (dmxdev->dvr_input_buffer_mode ==
1001 DMX_BUFFER_MODE_INTERNAL)
1002 vfree(mem);
1003 }
1004
1005 if ((dmxdev->dvr_input_buffer_mode ==
1006 DMX_BUFFER_MODE_EXTERNAL) &&
1007 (dmxdev->demux->dvr_input.priv_handle)) {
1008 if (!dmxdev->demux->dvr_input_protected)
1009 dmxdev->demux->unmap_buffer(dmxdev->demux,
1010 dmxdev->demux->dvr_input.priv_handle);
1011 dmxdev->demux->dvr_input.priv_handle = NULL;
1012 }
1013
1014 if (dmxdev->dvr_cmd_buffer.data) {
1015 void *mem = dmxdev->dvr_cmd_buffer.data;
1016 /*
1017 * Ensure all the operations on the DVR command buffer
1018 * are completed before it gets freed.
1019 */
1020 mb();
1021 spin_lock_irq(&dmxdev->dvr_in_lock);
1022 dmxdev->dvr_cmd_buffer.data = NULL;
1023 spin_unlock_irq(&dmxdev->dvr_in_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 vfree(mem);
1025 }
1026 }
Markus Rechberger57861b42007-04-14 10:19:18 -03001027 /* TODO */
1028 dvbdev->users--;
Jiri Slaby1c488ea2010-07-18 15:34:18 -03001029 if (dvbdev->users == 1 && dmxdev->exit == 1) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301030 fops_put(file->f_op);
1031 file->f_op = NULL;
Markus Rechberger57861b42007-04-14 10:19:18 -03001032 mutex_unlock(&dmxdev->mutex);
1033 wake_up(&dvbdev->wait_queue);
1034 } else
1035 mutex_unlock(&dmxdev->mutex);
1036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 return 0;
1038}
1039
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301040
1041static int dvb_dvr_mmap(struct file *filp, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042{
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301043 struct dvb_device *dvbdev = filp->private_data;
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07001044 struct dmxdev *dmxdev = dvbdev->priv;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301045 struct dvb_ringbuffer *buffer;
1046 enum dmx_buffer_mode buffer_mode;
1047 int vma_size;
1048 int buffer_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 int ret;
1050
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301051 if (((filp->f_flags & O_ACCMODE) == O_RDONLY) &&
1052 (vma->vm_flags & VM_WRITE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 return -EINVAL;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301054
Ingo Molnar3593cab2006-02-07 06:49:14 -02001055 if (mutex_lock_interruptible(&dmxdev->mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 return -ERESTARTSYS;
Markus Rechberger57861b42007-04-14 10:19:18 -03001057
1058 if (dmxdev->exit) {
1059 mutex_unlock(&dmxdev->mutex);
1060 return -ENODEV;
1061 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301062
1063 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
1064 buffer = &dmxdev->dvr_buffer;
1065 buffer_mode = dmxdev->dvr_buffer_mode;
1066 } else {
1067 buffer = &dmxdev->dvr_input_buffer;
1068 buffer_mode = dmxdev->dvr_input_buffer_mode;
1069 }
1070
1071 if (buffer_mode == DMX_BUFFER_MODE_EXTERNAL) {
1072 mutex_unlock(&dmxdev->mutex);
1073 return -EINVAL;
1074 }
1075
1076 vma_size = vma->vm_end - vma->vm_start;
1077
1078 /* Make sure requested mapping is not larger than buffer size */
1079 buffer_size = buffer->size + (PAGE_SIZE-1);
1080 buffer_size = buffer_size & ~(PAGE_SIZE-1);
1081
1082 if (vma_size != buffer_size) {
1083 mutex_unlock(&dmxdev->mutex);
1084 return -EINVAL;
1085 }
1086
1087 ret = remap_vmalloc_range(vma, buffer->data, 0);
1088 if (ret) {
1089 mutex_unlock(&dmxdev->mutex);
1090 return ret;
1091 }
1092
1093 vma->vm_flags |= VM_DONTDUMP;
1094 vma->vm_flags |= VM_DONTEXPAND;
1095
Ingo Molnar3593cab2006-02-07 06:49:14 -02001096 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 return ret;
1098}
1099
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301100static void dvb_dvr_queue_data_feed(struct dmxdev *dmxdev, size_t count)
1101{
1102 struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
1103 struct dvr_command *dvr_cmd;
1104 int last_dvr_cmd;
1105
1106 spin_lock(&dmxdev->dvr_in_lock);
1107
1108 /* Peek at the last DVR command queued, try to coalesce FEED commands */
1109 if (dvb_ringbuffer_avail(cmdbuf) >= sizeof(*dvr_cmd)) {
1110 last_dvr_cmd = cmdbuf->pwrite - sizeof(*dvr_cmd);
1111 if (last_dvr_cmd < 0)
1112 last_dvr_cmd += cmdbuf->size;
1113
1114 dvr_cmd = (struct dvr_command *)&cmdbuf->data[last_dvr_cmd];
1115 if (dvr_cmd->type == DVR_DATA_FEED_CMD) {
1116 dvr_cmd->cmd.data_feed_count += count;
1117 spin_unlock(&dmxdev->dvr_in_lock);
1118 return;
1119 }
1120 }
1121
1122 /*
1123 * We assume command buffer is large enough so that overflow should not
1124 * happen. Overflow to the command buffer means data previously written
1125 * to the input buffer is 'orphan' - does not have a matching FEED
1126 * command. Issue a warning if this ever happens.
1127 * Orphan data might still be processed if EOS is issued.
1128 */
1129 if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd)) {
1130 pr_err("%s: DVR command buffer overflow\n", __func__);
1131 spin_unlock(&dmxdev->dvr_in_lock);
1132 return;
1133 }
1134
1135 dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite];
1136 dvr_cmd->type = DVR_DATA_FEED_CMD;
1137 dvr_cmd->cmd.data_feed_count = count;
1138 DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd));
1139 spin_unlock(&dmxdev->dvr_in_lock);
1140
1141 wake_up_all(&cmdbuf->queue);
1142}
1143
1144static int dvb_dvr_external_input_only(struct dmxdev *dmxdev)
1145{
1146 struct dmx_caps caps;
1147 int is_external_only;
1148 int flags;
1149 size_t tsp_size;
1150
1151 if (dmxdev->demux->get_tsp_size)
1152 tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
1153 else
1154 tsp_size = 188;
1155
1156 /*
1157 * For backward compatibility, default assumes that
1158 * external only buffers are not supported.
1159 */
1160 flags = 0;
1161 if (dmxdev->demux->get_caps) {
1162 dmxdev->demux->get_caps(dmxdev->demux, &caps);
1163
1164 if (tsp_size == 188)
1165 flags = caps.playback_188_tsp.flags;
1166 else
1167 flags = caps.playback_192_tsp.flags;
1168 }
1169
1170 if (!(flags & DMX_BUFFER_INTERNAL_SUPPORT) &&
1171 (flags & DMX_BUFFER_EXTERNAL_SUPPORT))
1172 is_external_only = 1;
1173 else
1174 is_external_only = 0;
1175
1176 return is_external_only;
1177}
1178
1179static int dvb_dvr_verify_buffer_size(struct dmxdev *dmxdev,
1180 unsigned int f_flags,
1181 unsigned long size)
1182{
1183 struct dmx_caps caps;
1184 int tsp_size;
1185
1186 if (!dmxdev->demux->get_caps)
1187 return 1;
1188
1189 if (dmxdev->demux->get_tsp_size)
1190 tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
1191 else
1192 tsp_size = 188;
1193
1194 dmxdev->demux->get_caps(dmxdev->demux, &caps);
1195 if ((f_flags & O_ACCMODE) == O_RDONLY)
1196 return (tsp_size == 188 && dvb_dmxdev_verify_buffer_size(size,
1197 caps.recording_188_tsp.max_size,
1198 caps.recording_188_tsp.size_alignment)) ||
1199 (tsp_size == 192 && dvb_dmxdev_verify_buffer_size(size,
1200 caps.recording_192_tsp.max_size,
1201 caps.recording_192_tsp.size_alignment));
1202
1203 return (tsp_size == 188 && dvb_dmxdev_verify_buffer_size(size,
1204 caps.playback_188_tsp.max_size,
1205 caps.playback_188_tsp.size_alignment)) ||
1206 (tsp_size == 192 && dvb_dmxdev_verify_buffer_size(size,
1207 caps.playback_192_tsp.max_size,
1208 caps.playback_192_tsp.size_alignment));
1209}
1210
1211static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
1212 size_t count, loff_t *ppos)
1213{
1214 struct dvb_device *dvbdev = file->private_data;
1215 struct dmxdev *dmxdev = dvbdev->priv;
1216 struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
1217 struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
1218 int ret;
1219 size_t todo;
1220 ssize_t free_space;
1221
1222 if (!dmxdev->demux->write)
1223 return -EOPNOTSUPP;
1224
1225 if (!dvb_dvr_verify_buffer_size(dmxdev, file->f_flags, src->size) ||
1226 ((file->f_flags & O_ACCMODE) == O_RDONLY) ||
1227 !src->data || !cmdbuf->data ||
1228 (dvb_dvr_external_input_only(dmxdev) &&
1229 (dmxdev->dvr_input_buffer_mode == DMX_BUFFER_MODE_INTERNAL)))
1230 return -EINVAL;
1231
1232 if ((file->f_flags & O_NONBLOCK) &&
1233 (dvb_ringbuffer_free(src) == 0))
1234 return -EWOULDBLOCK;
1235
1236 ret = 0;
1237 for (todo = count; todo > 0; todo -= ret) {
1238 ret = wait_event_interruptible(src->queue,
1239 (dvb_ringbuffer_free(src)) ||
1240 !src->data || !cmdbuf->data ||
1241 (src->error != 0) || dmxdev->dvr_in_exit);
1242
1243 if (ret < 0)
1244 return ret;
1245
1246 if (mutex_lock_interruptible(&dmxdev->mutex))
1247 return -ERESTARTSYS;
1248
1249 if ((!src->data) || (!cmdbuf->data)) {
1250 mutex_unlock(&dmxdev->mutex);
1251 return 0;
1252 }
1253
1254 if (dmxdev->exit || dmxdev->dvr_in_exit) {
1255 mutex_unlock(&dmxdev->mutex);
1256 return -ENODEV;
1257 }
1258
1259 if (src->error) {
1260 ret = src->error;
1261 dvb_ringbuffer_flush(src);
1262 mutex_unlock(&dmxdev->mutex);
1263 wake_up_all(&src->queue);
1264 return ret;
1265 }
1266
1267 free_space = dvb_ringbuffer_free(src);
1268
1269 if (free_space > todo)
1270 free_space = todo;
1271
1272 ret = dvb_ringbuffer_write_user(src, buf, free_space);
1273
1274 if (ret < 0) {
1275 mutex_unlock(&dmxdev->mutex);
1276 return ret;
1277 }
1278
1279 buf += ret;
1280
1281 dvb_dvr_queue_data_feed(dmxdev, ret);
1282
1283 mutex_unlock(&dmxdev->mutex);
1284 }
1285
1286 return (count - todo) ? (count - todo) : ret;
1287}
1288
1289static int dvb_dmxdev_flush_data(struct dmxdev_filter *filter, size_t length)
1290{
1291 int ret = 0;
1292 unsigned long flags;
1293
1294 struct dvb_ringbuffer *buffer = &filter->buffer;
1295 struct dmxdev_events_queue *events = &filter->events;
1296
1297 if (filter->type == DMXDEV_TYPE_PES &&
1298 filter->params.pes.output == DMX_OUT_TS_TAP) {
1299 buffer = &filter->dev->dvr_buffer;
1300 events = &filter->dev->dvr_output_events;
1301 }
1302
1303 /*
1304 * Drop 'length' pending data bytes from the ringbuffer and update
1305 * event queue accordingly, similarly to dvb_dmxdev_release_data().
1306 */
1307 spin_lock_irqsave(&filter->dev->lock, flags);
1308 DVB_RINGBUFFER_SKIP(buffer, length);
1309 buffer->error = 0;
1310 dvb_dmxdev_flush_events(events);
1311 events->current_event_start_offset = buffer->pwrite;
1312 spin_unlock_irqrestore(&filter->dev->lock, flags);
1313
1314 if (filter->type == DMXDEV_TYPE_PES) {
1315 struct dmxdev_feed *feed;
1316
1317 feed = list_first_entry(&filter->feed.ts,
1318 struct dmxdev_feed, next);
1319
1320 if (feed->ts->flush_buffer)
1321 return feed->ts->flush_buffer(feed->ts, length);
1322 } else if (filter->type == DMXDEV_TYPE_SEC &&
1323 filter->feed.sec.feed->flush_buffer) {
1324 return filter->feed.sec.feed->flush_buffer(
1325 filter->feed.sec.feed, length);
1326 }
1327
1328 return ret;
1329}
1330
1331static inline void dvb_dmxdev_auto_flush_buffer(struct dmxdev_filter *filter,
1332 struct dvb_ringbuffer *buf)
1333{
1334 size_t flush_len;
1335
1336 /*
1337 * When buffer overflowed, demux-dev marked the buffer in
1338 * error state. If auto-flush is enabled discard current
1339 * pending data in buffer.
1340 */
1341 if (overflow_auto_flush) {
1342 flush_len = dvb_ringbuffer_avail(buf);
1343 dvb_dmxdev_flush_data(filter, flush_len);
1344 }
1345}
1346
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001348 loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349{
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301350 ssize_t res;
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07001351 struct dvb_device *dvbdev = file->private_data;
1352 struct dmxdev *dmxdev = dvbdev->priv;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301353 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
Simon Arlotteda9f752009-05-12 17:39:28 -03001355 if (dmxdev->exit)
Markus Rechberger57861b42007-04-14 10:19:18 -03001356 return -ENODEV;
Markus Rechberger57861b42007-04-14 10:19:18 -03001357
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301358 if (!dvb_dvr_verify_buffer_size(dmxdev, file->f_flags,
1359 dmxdev->dvr_buffer.size))
1360 return -EINVAL;
1361
1362 res = dvb_dmxdev_buffer_read(NULL, &dmxdev->dvr_buffer,
1363 file->f_flags & O_NONBLOCK,
1364 buf, count, ppos);
1365
1366 if (res > 0) {
1367 dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, res);
1368 spin_lock_irqsave(&dmxdev->lock, flags);
1369 dvb_dmxdev_update_events(&dmxdev->dvr_output_events, res);
1370 spin_unlock_irqrestore(&dmxdev->lock, flags);
1371
1372 /*
1373 * in PULL mode, we might be stalling on
1374 * event queue, so need to wake-up waiters
1375 */
1376 if (dmxdev->playback_mode == DMX_PB_MODE_PULL)
1377 wake_up_all(&dmxdev->dvr_buffer.queue);
1378 } else if (res == -EOVERFLOW) {
1379 dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed,
1380 &dmxdev->dvr_buffer);
1381 }
1382
1383 return res;
1384}
1385
1386/*
1387 * dvb_dvr_push_oob_cmd
1388 *
1389 * Note: this function assume dmxdev->mutex was taken, so command buffer cannot
1390 * be released during its operation.
1391 */
1392static int dvb_dvr_push_oob_cmd(struct dmxdev *dmxdev, unsigned int f_flags,
1393 struct dmx_oob_command *cmd)
1394{
1395 struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
1396 struct dvr_command *dvr_cmd;
1397
1398 if ((f_flags & O_ACCMODE) == O_RDONLY ||
1399 dmxdev->source < DMX_SOURCE_DVR0)
1400 return -EPERM;
1401
1402 if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd))
1403 return -ENOMEM;
1404
1405 dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite];
1406 dvr_cmd->type = DVR_OOB_CMD;
1407 dvr_cmd->cmd.oobcmd = *cmd;
1408 DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd));
1409 wake_up_all(&cmdbuf->queue);
1410
1411 return 0;
1412}
1413
1414static int dvb_dvr_flush_buffer(struct dmxdev *dmxdev, unsigned int f_flags)
1415{
1416 size_t flush_len;
1417 int ret;
1418
1419 if ((f_flags & O_ACCMODE) != O_RDONLY)
1420 return -EINVAL;
1421
1422 flush_len = dvb_ringbuffer_avail(&dmxdev->dvr_buffer);
1423 ret = dvb_dmxdev_flush_data(dmxdev->dvr_feed, flush_len);
1424
1425 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426}
1427
Andrea Odettia095be42008-04-20 19:14:51 -03001428static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301429 unsigned int f_flags,
1430 unsigned long size)
Andrea Odettia095be42008-04-20 19:14:51 -03001431{
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301432 struct dvb_ringbuffer *buf;
Andrea Odettia095be42008-04-20 19:14:51 -03001433 void *newmem;
1434 void *oldmem;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301435 spinlock_t *lock;
1436 enum dmx_buffer_mode buffer_mode;
Andrea Odettia095be42008-04-20 19:14:51 -03001437
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301438 pr_debug("function : %s\n", __func__);
1439
1440 if ((f_flags & O_ACCMODE) == O_RDONLY) {
1441 buf = &dmxdev->dvr_buffer;
1442 lock = &dmxdev->lock;
1443 buffer_mode = dmxdev->dvr_buffer_mode;
1444 } else {
1445 buf = &dmxdev->dvr_input_buffer;
1446 lock = &dmxdev->dvr_in_lock;
1447 buffer_mode = dmxdev->dvr_input_buffer_mode;
1448 }
Andrea Odettia095be42008-04-20 19:14:51 -03001449
1450 if (buf->size == size)
1451 return 0;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301452 if (!size || (buffer_mode == DMX_BUFFER_MODE_EXTERNAL))
Andrea Odettia095be42008-04-20 19:14:51 -03001453 return -EINVAL;
1454
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301455 newmem = vmalloc_user(size);
Andrea Odettia095be42008-04-20 19:14:51 -03001456 if (!newmem)
1457 return -ENOMEM;
1458
1459 oldmem = buf->data;
1460
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301461 spin_lock_irq(lock);
1462
1463 if (((f_flags & O_ACCMODE) != O_RDONLY) &&
1464 (dmxdev->dvr_processing_input)) {
1465 spin_unlock_irq(lock);
1466 vfree(oldmem);
1467 return -EBUSY;
1468 }
1469
Andrea Odettia095be42008-04-20 19:14:51 -03001470 buf->data = newmem;
1471 buf->size = size;
1472
1473 /* reset and not flush in case the buffer shrinks */
1474 dvb_ringbuffer_reset(buf);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301475
1476 spin_unlock_irq(lock);
Andrea Odettia095be42008-04-20 19:14:51 -03001477
1478 vfree(oldmem);
1479
1480 return 0;
1481}
1482
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301483static int dvb_dvr_set_buffer_mode(struct dmxdev *dmxdev,
1484 unsigned int f_flags, enum dmx_buffer_mode mode)
1485{
1486 struct dvb_ringbuffer *buf;
1487 spinlock_t *lock;
1488 enum dmx_buffer_mode *buffer_mode;
1489 void **buff_handle;
1490 void *oldmem;
1491 int *is_protected;
1492
1493 if ((mode != DMX_BUFFER_MODE_INTERNAL) &&
1494 (mode != DMX_BUFFER_MODE_EXTERNAL))
1495 return -EINVAL;
1496
1497 if ((mode == DMX_BUFFER_MODE_EXTERNAL) &&
1498 (!dmxdev->demux->map_buffer || !dmxdev->demux->unmap_buffer))
1499 return -EINVAL;
1500
1501 if ((f_flags & O_ACCMODE) == O_RDONLY) {
1502 buf = &dmxdev->dvr_buffer;
1503 lock = &dmxdev->lock;
1504 buffer_mode = &dmxdev->dvr_buffer_mode;
1505 buff_handle = &dmxdev->dvr_priv_buff_handle;
1506 is_protected = NULL;
1507 } else {
1508 buf = &dmxdev->dvr_input_buffer;
1509 lock = &dmxdev->dvr_in_lock;
1510 buffer_mode = &dmxdev->dvr_input_buffer_mode;
1511 buff_handle = &dmxdev->demux->dvr_input.priv_handle;
1512 is_protected = &dmxdev->demux->dvr_input_protected;
1513 }
1514
1515 if (mode == *buffer_mode)
1516 return 0;
1517
1518 oldmem = buf->data;
1519 spin_lock_irq(lock);
1520 buf->data = NULL;
1521 spin_unlock_irq(lock);
1522
1523 *buffer_mode = mode;
1524
1525 if (mode == DMX_BUFFER_MODE_INTERNAL) {
1526 /* switched from external to internal */
1527 if (*buff_handle) {
1528 dmxdev->demux->unmap_buffer(dmxdev->demux,
1529 *buff_handle);
1530 *buff_handle = NULL;
1531 }
1532
1533 if (is_protected)
1534 *is_protected = 0;
1535
1536 /* set default internal buffer */
1537 dvb_dvr_set_buffer_size(dmxdev, f_flags, DVR_BUFFER_SIZE);
1538 } else if (oldmem) {
1539 /* switched from internal to external */
1540 vfree(oldmem);
1541 }
1542
1543 return 0;
1544}
1545
1546static int dvb_dvr_set_buffer(struct dmxdev *dmxdev,
1547 unsigned int f_flags, struct dmx_buffer *dmx_buffer)
1548{
1549 struct dvb_ringbuffer *buf;
1550 spinlock_t *lock;
1551 enum dmx_buffer_mode buffer_mode;
1552 void **buff_handle;
1553 void *newmem;
1554 void *oldmem;
1555 int *is_protected;
1556 struct dmx_caps caps;
1557
1558 if (dmxdev->demux->get_caps)
1559 dmxdev->demux->get_caps(dmxdev->demux, &caps);
1560 else
1561 caps.caps = 0;
1562
1563 if ((f_flags & O_ACCMODE) == O_RDONLY) {
1564 buf = &dmxdev->dvr_buffer;
1565 lock = &dmxdev->lock;
1566 buffer_mode = dmxdev->dvr_buffer_mode;
1567 buff_handle = &dmxdev->dvr_priv_buff_handle;
1568 is_protected = NULL;
1569 } else {
1570 buf = &dmxdev->dvr_input_buffer;
1571 lock = &dmxdev->dvr_in_lock;
1572 buffer_mode = dmxdev->dvr_input_buffer_mode;
1573 buff_handle = &dmxdev->demux->dvr_input.priv_handle;
1574 is_protected = &dmxdev->demux->dvr_input_protected;
1575 if (!(caps.caps & DMX_CAP_SECURED_INPUT_PLAYBACK) &&
1576 dmx_buffer->is_protected)
1577 return -EINVAL;
1578 }
1579
1580 if (!dmx_buffer->size ||
1581 (buffer_mode == DMX_BUFFER_MODE_INTERNAL))
1582 return -EINVAL;
1583
1584 oldmem = *buff_handle;
1585
1586 /*
1587 * Protected buffer is relevant only for DVR input buffer
1588 * when DVR device is opened for write. In such case,
1589 * buffer is mapped only if the buffer is not protected one.
1590 */
1591 if (!is_protected || !dmx_buffer->is_protected) {
1592 if (dmxdev->demux->map_buffer(dmxdev->demux, dmx_buffer,
1593 buff_handle, &newmem))
1594 return -ENOMEM;
1595 } else {
1596 newmem = NULL;
1597 *buff_handle = NULL;
1598 }
1599
1600 spin_lock_irq(lock);
1601 buf->data = newmem;
1602 buf->size = dmx_buffer->size;
1603 if (is_protected)
1604 *is_protected = dmx_buffer->is_protected;
1605 dvb_ringbuffer_reset(buf);
1606 spin_unlock_irq(lock);
1607
1608 if (oldmem)
1609 dmxdev->demux->unmap_buffer(dmxdev->demux, oldmem);
1610
1611 return 0;
1612}
1613
1614static int dvb_dvr_get_event(struct dmxdev *dmxdev,
1615 unsigned int f_flags,
1616 struct dmx_filter_event *event)
1617{
1618 int res = 0;
1619
1620 if (!((f_flags & O_ACCMODE) == O_RDONLY))
1621 return -EINVAL;
1622
1623 spin_lock_irq(&dmxdev->lock);
1624
1625 if (dmxdev->dvr_buffer.error == -EOVERFLOW) {
1626 event->type = DMX_EVENT_BUFFER_OVERFLOW;
1627 dmxdev->dvr_buffer.error = 0;
1628 } else {
1629 res = dvb_dmxdev_remove_event(&dmxdev->dvr_output_events,
1630 event);
1631 if (res) {
1632 spin_unlock_irq(&dmxdev->lock);
1633 return res;
1634 }
1635 }
1636
1637 spin_unlock_irq(&dmxdev->lock);
1638
1639 if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
1640 dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed,
1641 &dmxdev->dvr_buffer);
1642
1643 /*
1644 * in PULL mode, we might be stalling on
1645 * event queue, so need to wake-up waiters
1646 */
1647 if (dmxdev->playback_mode == DMX_PB_MODE_PULL)
1648 wake_up_all(&dmxdev->dvr_buffer.queue);
1649
1650 return res;
1651}
1652
1653static int dvb_dvr_get_buffer_status(struct dmxdev *dmxdev,
1654 unsigned int f_flags,
1655 struct dmx_buffer_status *dmx_buffer_status)
1656{
1657 struct dvb_ringbuffer *buf;
1658 spinlock_t *lock;
1659
1660 if ((f_flags & O_ACCMODE) == O_RDONLY) {
1661 buf = &dmxdev->dvr_buffer;
1662 lock = &dmxdev->lock;
1663 } else {
1664 buf = &dmxdev->dvr_input_buffer;
1665 lock = &dmxdev->dvr_in_lock;
1666 }
1667
1668 spin_lock_irq(lock);
1669
1670 dmx_buffer_status->error = buf->error;
1671 dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
1672 dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
1673 dmx_buffer_status->read_offset = buf->pread;
1674 dmx_buffer_status->write_offset = buf->pwrite;
1675 dmx_buffer_status->size = buf->size;
1676 buf->error = 0;
1677
1678 spin_unlock_irq(lock);
1679
1680 if (dmx_buffer_status->error == -EOVERFLOW)
1681 dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed, buf);
1682
1683 return 0;
1684}
1685
1686static int dvb_dvr_release_data(struct dmxdev *dmxdev,
1687 unsigned int f_flags,
1688 u32 bytes_count)
1689{
1690 ssize_t buff_fullness;
1691
1692 if (!((f_flags & O_ACCMODE) == O_RDONLY))
1693 return -EINVAL;
1694
1695 if (!bytes_count)
1696 return 0;
1697
1698 buff_fullness = dvb_ringbuffer_avail(&dmxdev->dvr_buffer);
1699
1700 if (bytes_count > buff_fullness)
1701 return -EINVAL;
1702
1703 DVB_RINGBUFFER_SKIP(&dmxdev->dvr_buffer, bytes_count);
1704
1705 dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, bytes_count);
1706 spin_lock_irq(&dmxdev->lock);
1707 dvb_dmxdev_update_events(&dmxdev->dvr_output_events, bytes_count);
1708 spin_unlock_irq(&dmxdev->lock);
1709
1710 wake_up_all(&dmxdev->dvr_buffer.queue);
1711 return 0;
1712}
1713
1714/*
1715 * dvb_dvr_feed_data - Notify new data in DVR input buffer
1716 *
1717 * @dmxdev - demux device instance
1718 * @f_flags - demux device file flag (access mode)
1719 * @bytes_count - how many bytes were written to the input buffer
1720 *
1721 * Note: this function assume dmxdev->mutex was taken, so buffer cannot
1722 * be released during its operation.
1723 */
1724static int dvb_dvr_feed_data(struct dmxdev *dmxdev,
1725 unsigned int f_flags,
1726 u32 bytes_count)
1727{
1728 ssize_t free_space;
1729 struct dvb_ringbuffer *buffer = &dmxdev->dvr_input_buffer;
1730
1731 if ((f_flags & O_ACCMODE) == O_RDONLY)
1732 return -EINVAL;
1733
1734 if (!bytes_count)
1735 return 0;
1736
1737 free_space = dvb_ringbuffer_free(buffer);
1738
1739 if (bytes_count > free_space)
1740 return -EINVAL;
1741
1742 DVB_RINGBUFFER_PUSH(buffer, bytes_count);
1743
1744 dvb_dvr_queue_data_feed(dmxdev, bytes_count);
1745
1746 return 0;
1747}
1748
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001749static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter
1750 *dmxdevfilter, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751{
1752 spin_lock_irq(&dmxdevfilter->dev->lock);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001753 dmxdevfilter->state = state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 spin_unlock_irq(&dmxdevfilter->dev->lock);
1755}
1756
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001757static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter,
1758 unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759{
Andreas Oberritter34731df2006-03-14 17:31:01 -03001760 struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
Andrea Odettia095be42008-04-20 19:14:51 -03001761 void *newmem;
1762 void *oldmem;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001764 if (buf->size == size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 return 0;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301766 if (!size ||
1767 (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL))
Andrea Odettia095be42008-04-20 19:14:51 -03001768 return -EINVAL;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001769 if (dmxdevfilter->state >= DMXDEV_STATE_GO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 return -EBUSY;
Andrea Odettia095be42008-04-20 19:14:51 -03001771
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301772 newmem = vmalloc_user(size);
Andrea Odettia095be42008-04-20 19:14:51 -03001773 if (!newmem)
1774 return -ENOMEM;
1775
1776 oldmem = buf->data;
1777
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 spin_lock_irq(&dmxdevfilter->dev->lock);
Andrea Odettia095be42008-04-20 19:14:51 -03001779 buf->data = newmem;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001780 buf->size = size;
Andrea Odetti48c01a92008-04-20 18:37:45 -03001781
1782 /* reset and not flush in case the buffer shrinks */
1783 dvb_ringbuffer_reset(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 spin_unlock_irq(&dmxdevfilter->dev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785
Andrea Odettia095be42008-04-20 19:14:51 -03001786 vfree(oldmem);
1787
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 return 0;
1789}
1790
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301791static int dvb_dmxdev_set_buffer_mode(struct dmxdev_filter *dmxdevfilter,
1792 enum dmx_buffer_mode mode)
1793{
1794 struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
1795 struct dmxdev *dmxdev = dmxdevfilter->dev;
1796 void *oldmem;
1797
1798 if (dmxdevfilter->state >= DMXDEV_STATE_GO)
1799 return -EBUSY;
1800
1801 if ((mode != DMX_BUFFER_MODE_INTERNAL) &&
1802 (mode != DMX_BUFFER_MODE_EXTERNAL))
1803 return -EINVAL;
1804
1805 if ((mode == DMX_BUFFER_MODE_EXTERNAL) &&
1806 (!dmxdev->demux->map_buffer || !dmxdev->demux->unmap_buffer))
1807 return -EINVAL;
1808
1809 if (mode == dmxdevfilter->buffer_mode)
1810 return 0;
1811
1812 oldmem = buf->data;
1813 spin_lock_irq(&dmxdevfilter->dev->lock);
1814 buf->data = NULL;
1815 spin_unlock_irq(&dmxdevfilter->dev->lock);
1816
1817 dmxdevfilter->buffer_mode = mode;
1818
1819 if (mode == DMX_BUFFER_MODE_INTERNAL) {
1820 /* switched from external to internal */
1821 if (dmxdevfilter->priv_buff_handle) {
1822 dmxdev->demux->unmap_buffer(dmxdev->demux,
1823 dmxdevfilter->priv_buff_handle);
1824 dmxdevfilter->priv_buff_handle = NULL;
1825 }
1826 } else if (oldmem) {
1827 /* switched from internal to external */
1828 vfree(oldmem);
1829 }
1830
1831 return 0;
1832}
1833
1834static int dvb_dmxdev_set_buffer(struct dmxdev_filter *dmxdevfilter,
1835 struct dmx_buffer *buffer)
1836{
1837 struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
1838 struct dmxdev *dmxdev = dmxdevfilter->dev;
1839 void *newmem;
1840 void *oldmem;
1841
1842 if (dmxdevfilter->state >= DMXDEV_STATE_GO)
1843 return -EBUSY;
1844
1845 if ((!buffer->size) ||
1846 (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_INTERNAL))
1847 return -EINVAL;
1848
1849 oldmem = dmxdevfilter->priv_buff_handle;
1850 if (dmxdev->demux->map_buffer(dmxdev->demux, buffer,
1851 &dmxdevfilter->priv_buff_handle, &newmem))
1852 return -ENOMEM;
1853
1854 spin_lock_irq(&dmxdevfilter->dev->lock);
1855 buf->data = newmem;
1856 buf->size = buffer->size;
1857 dvb_ringbuffer_reset(buf);
1858 spin_unlock_irq(&dmxdevfilter->dev->lock);
1859
1860 if (oldmem)
1861 dmxdev->demux->unmap_buffer(dmxdev->demux, oldmem);
1862
1863 return 0;
1864}
1865
1866static int dvb_dmxdev_set_tsp_out_format(struct dmxdev_filter *dmxdevfilter,
1867 enum dmx_tsp_format_t dmx_tsp_format)
1868{
1869 if (dmxdevfilter->state >= DMXDEV_STATE_GO)
1870 return -EBUSY;
1871
1872 if ((dmx_tsp_format > DMX_TSP_FORMAT_192_HEAD) ||
1873 (dmx_tsp_format < DMX_TSP_FORMAT_188))
1874 return -EINVAL;
1875
1876 dmxdevfilter->dmx_tsp_format = dmx_tsp_format;
1877
1878 return 0;
1879}
1880
1881static int dvb_dmxdev_set_decoder_buffer_size(
1882 struct dmxdev_filter *dmxdevfilter,
1883 unsigned long size)
1884{
1885 struct dmx_caps caps;
1886 struct dmx_demux *demux = dmxdevfilter->dev->demux;
1887
1888 if (demux->get_caps) {
1889 demux->get_caps(demux, &caps);
1890 if (!dvb_dmxdev_verify_buffer_size(size, caps.decoder.max_size,
1891 caps.decoder.size_alignment))
1892 return -EINVAL;
1893 }
1894
1895 if (size == 0)
1896 return -EINVAL;
1897
1898 if (dmxdevfilter->decoder_buffers.buffers_size == size)
1899 return 0;
1900
1901 if (dmxdevfilter->state >= DMXDEV_STATE_GO)
1902 return -EBUSY;
1903
1904 /*
1905 * In case decoder buffers were already set before to some external
1906 * buffers, setting the decoder buffer size alone implies transition
1907 * to internal buffer mode.
1908 */
1909 dmxdevfilter->decoder_buffers.buffers_size = size;
1910 dmxdevfilter->decoder_buffers.buffers_num = 0;
1911 dmxdevfilter->decoder_buffers.is_linear = 0;
1912 return 0;
1913}
1914
1915static int dvb_dmxdev_set_source(struct dmxdev_filter *dmxdevfilter,
1916 dmx_source_t *source)
1917{
1918 int ret = 0;
1919 struct dmxdev *dev;
1920
1921 if (dmxdevfilter->state == DMXDEV_STATE_GO)
1922 return -EBUSY;
1923
1924 dev = dmxdevfilter->dev;
1925 if (dev->demux->set_source)
1926 ret = dev->demux->set_source(dev->demux, source);
1927
1928 if (!ret)
1929 dev->source = *source;
1930
1931 return ret;
1932}
1933
1934static int dvb_dmxdev_reuse_decoder_buf(struct dmxdev_filter *dmxdevfilter,
1935 int cookie)
1936{
1937 struct dmxdev_feed *feed;
1938
1939 if (dmxdevfilter->state != DMXDEV_STATE_GO ||
1940 (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
1941 (dmxdevfilter->params.pes.output != DMX_OUT_DECODER) ||
1942 (dmxdevfilter->events.event_mask.disable_mask &
1943 DMX_EVENT_NEW_ES_DATA))
1944 return -EPERM;
1945
1946 /* Only one feed should be in the list in case of decoder */
1947 feed = list_first_entry(&dmxdevfilter->feed.ts,
1948 struct dmxdev_feed, next);
1949 if (feed && feed->ts && feed->ts->reuse_decoder_buffer)
1950 return feed->ts->reuse_decoder_buffer(feed->ts, cookie);
1951
1952 return -ENODEV;
1953}
1954
1955static int dvb_dmxdev_set_event_mask(struct dmxdev_filter *dmxdevfilter,
1956 struct dmx_events_mask *event_mask)
1957{
1958 if (!event_mask ||
1959 (event_mask->wakeup_threshold >= DMX_EVENT_QUEUE_SIZE))
1960 return -EINVAL;
1961
1962 if (dmxdevfilter->state == DMXDEV_STATE_GO)
1963 return -EBUSY;
1964
1965 /*
1966 * Overflow event is not allowed to be masked.
1967 * This is because if overflow occurs, demux stops outputting data
1968 * until user is notified. If user is using events to read the data,
1969 * the overflow event must be always enabled or otherwise we would
1970 * never recover from overflow state.
1971 */
1972 event_mask->disable_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
1973 event_mask->no_wakeup_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
1974
1975 dmxdevfilter->events.event_mask = *event_mask;
1976
1977 return 0;
1978}
1979
1980static int dvb_dmxdev_get_event_mask(struct dmxdev_filter *dmxdevfilter,
1981 struct dmx_events_mask *event_mask)
1982{
1983 if (!event_mask)
1984 return -EINVAL;
1985
1986 *event_mask = dmxdevfilter->events.event_mask;
1987
1988 return 0;
1989}
1990
1991static int dvb_dmxdev_set_indexing_params(struct dmxdev_filter *dmxdevfilter,
1992 struct dmx_indexing_params *idx_params)
1993{
1994 int found_pid;
1995 struct dmxdev_feed *feed;
1996 struct dmxdev_feed *ts_feed = NULL;
1997 struct dmx_caps caps;
1998 int ret = 0;
1999
2000 if (!dmxdevfilter->dev->demux->get_caps)
2001 return -EINVAL;
2002
2003 dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
2004
2005 if (!idx_params ||
2006 !(caps.caps & DMX_CAP_VIDEO_INDEXING) ||
2007 (dmxdevfilter->state < DMXDEV_STATE_SET) ||
2008 (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
2009 ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
2010 (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
2011 return -EINVAL;
2012
2013 if (idx_params->enable && !idx_params->types)
2014 return -EINVAL;
2015
2016 found_pid = 0;
2017 list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
2018 if (feed->pid == idx_params->pid) {
2019 found_pid = 1;
2020 ts_feed = feed;
2021 ts_feed->idx_params = *idx_params;
2022 if ((dmxdevfilter->state == DMXDEV_STATE_GO) &&
2023 ts_feed->ts->set_idx_params)
2024 ret = ts_feed->ts->set_idx_params(
2025 ts_feed->ts, idx_params);
2026 break;
2027 }
2028 }
2029
2030 if (!found_pid)
2031 return -EINVAL;
2032
2033 return ret;
2034}
2035
2036static int dvb_dmxdev_get_scrambling_bits(struct dmxdev_filter *filter,
2037 struct dmx_scrambling_bits *scrambling_bits)
2038{
2039 struct dmxdev_feed *feed;
2040
2041 if (!scrambling_bits ||
2042 (filter->state != DMXDEV_STATE_GO))
2043 return -EINVAL;
2044
2045 if (filter->type == DMXDEV_TYPE_SEC) {
2046 if (filter->feed.sec.feed->get_scrambling_bits)
2047 return filter->feed.sec.feed->get_scrambling_bits(
2048 filter->feed.sec.feed,
2049 &scrambling_bits->value);
2050 return -EINVAL;
2051 }
2052
2053 list_for_each_entry(feed, &filter->feed.ts, next) {
2054 if (feed->pid == scrambling_bits->pid) {
2055 if (feed->ts->get_scrambling_bits)
2056 return feed->ts->get_scrambling_bits(feed->ts,
2057 &scrambling_bits->value);
2058 return -EINVAL;
2059 }
2060 }
2061
2062 return -EINVAL;
2063}
2064
2065static void dvb_dmxdev_ts_insertion_work(struct work_struct *worker)
2066{
2067 struct ts_insertion_buffer *ts_buffer =
2068 container_of(to_delayed_work(worker),
2069 struct ts_insertion_buffer, dwork);
2070 struct dmxdev_feed *feed;
2071 size_t free_bytes;
2072 struct dmx_ts_feed *ts;
2073
2074 mutex_lock(&ts_buffer->dmxdevfilter->mutex);
2075
2076 if (ts_buffer->abort ||
2077 (ts_buffer->dmxdevfilter->state != DMXDEV_STATE_GO)) {
2078 mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
2079 return;
2080 }
2081
2082 feed = list_first_entry(&ts_buffer->dmxdevfilter->feed.ts,
2083 struct dmxdev_feed, next);
2084 ts = feed->ts;
2085 free_bytes = dvb_ringbuffer_free(&ts_buffer->dmxdevfilter->buffer);
2086
2087 mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
2088
2089 if (ts_buffer->size < free_bytes)
2090 ts->ts_insertion_insert_buffer(ts,
2091 ts_buffer->buffer, ts_buffer->size);
2092
2093 if (ts_buffer->repetition_time && !ts_buffer->abort)
2094 schedule_delayed_work(&ts_buffer->dwork,
2095 msecs_to_jiffies(ts_buffer->repetition_time));
2096}
2097
2098static void dvb_dmxdev_queue_ts_insertion(
2099 struct ts_insertion_buffer *ts_buffer)
2100{
2101 size_t tsp_size;
2102
2103 if (ts_buffer->dmxdevfilter->dmx_tsp_format == DMX_TSP_FORMAT_188)
2104 tsp_size = 188;
2105 else
2106 tsp_size = 192;
2107
2108 if (ts_buffer->size % tsp_size) {
2109 pr_err("%s: Wrong buffer alignment, size=%zu, tsp_size=%zu\n",
2110 __func__, ts_buffer->size, tsp_size);
2111 return;
2112 }
2113
2114 ts_buffer->abort = 0;
2115 schedule_delayed_work(&ts_buffer->dwork, 0);
2116}
2117
2118static void dvb_dmxdev_cancel_ts_insertion(
2119 struct ts_insertion_buffer *ts_buffer)
2120{
2121 /*
2122 * This function assumes it is called while mutex
2123 * of demux filter is taken. Since work in workqueue
2124 * captures the filter's mutex to protect against the DB,
2125 * mutex needs to be released before waiting for the work
2126 * to get finished otherwise work in workqueue will
2127 * never be finished.
2128 */
2129 if (!mutex_is_locked(&ts_buffer->dmxdevfilter->mutex)) {
2130 pr_err("%s: mutex is not locked!\n", __func__);
2131 return;
2132 }
2133
2134 ts_buffer->abort = 1;
2135
2136 mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
2137 cancel_delayed_work_sync(&ts_buffer->dwork);
2138 mutex_lock(&ts_buffer->dmxdevfilter->mutex);
2139}
2140
2141static int dvb_dmxdev_set_ts_insertion(struct dmxdev_filter *dmxdevfilter,
2142 struct dmx_set_ts_insertion *params)
2143{
2144 int ret = 0;
2145 int first_buffer;
2146 struct dmxdev_feed *feed;
2147 struct ts_insertion_buffer *ts_buffer;
2148 struct dmx_caps caps;
2149
2150 if (!dmxdevfilter->dev->demux->get_caps)
2151 return -EINVAL;
2152
2153 dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
2154
2155 if (!params ||
2156 !params->size ||
2157 !(caps.caps & DMX_CAP_TS_INSERTION) ||
2158 (dmxdevfilter->state < DMXDEV_STATE_SET) ||
2159 (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
2160 ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
2161 (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
2162 return -EINVAL;
2163
2164 ts_buffer = vmalloc(sizeof(struct ts_insertion_buffer));
2165 if (!ts_buffer)
2166 return -ENOMEM;
2167
2168 ts_buffer->buffer = vmalloc(params->size);
2169 if (!ts_buffer->buffer) {
2170 vfree(ts_buffer);
2171 return -ENOMEM;
2172 }
2173
2174 if (copy_from_user(ts_buffer->buffer,
2175 params->ts_packets, params->size)) {
2176 vfree(ts_buffer->buffer);
2177 vfree(ts_buffer);
2178 return -EFAULT;
2179 }
2180
2181 if (params->repetition_time &&
2182 params->repetition_time < DMX_MIN_INSERTION_REPETITION_TIME)
2183 params->repetition_time = DMX_MIN_INSERTION_REPETITION_TIME;
2184
2185 ts_buffer->size = params->size;
2186 ts_buffer->identifier = params->identifier;
2187 ts_buffer->repetition_time = params->repetition_time;
2188 ts_buffer->dmxdevfilter = dmxdevfilter;
2189 INIT_DELAYED_WORK(&ts_buffer->dwork, dvb_dmxdev_ts_insertion_work);
2190
2191 first_buffer = list_empty(&dmxdevfilter->insertion_buffers);
2192 list_add_tail(&ts_buffer->next, &dmxdevfilter->insertion_buffers);
2193
2194 if (dmxdevfilter->state != DMXDEV_STATE_GO)
2195 return 0;
2196
2197 feed = list_first_entry(&dmxdevfilter->feed.ts,
2198 struct dmxdev_feed, next);
2199
2200 if (first_buffer && feed->ts->ts_insertion_init)
2201 ret = feed->ts->ts_insertion_init(feed->ts);
2202
2203 if (!ret) {
2204 dvb_dmxdev_queue_ts_insertion(ts_buffer);
2205 } else {
2206 list_del(&ts_buffer->next);
2207 vfree(ts_buffer->buffer);
2208 vfree(ts_buffer);
2209 }
2210
2211 return ret;
2212}
2213
2214static int dvb_dmxdev_abort_ts_insertion(struct dmxdev_filter *dmxdevfilter,
2215 struct dmx_abort_ts_insertion *params)
2216{
2217 int ret = 0;
2218 int found_buffer;
2219 struct dmxdev_feed *feed;
2220 struct ts_insertion_buffer *ts_buffer, *tmp;
2221 struct dmx_caps caps;
2222
2223 if (!dmxdevfilter->dev->demux->get_caps)
2224 return -EINVAL;
2225
2226 dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
2227
2228 if (!params ||
2229 !(caps.caps & DMX_CAP_TS_INSERTION) ||
2230 (dmxdevfilter->state < DMXDEV_STATE_SET) ||
2231 (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
2232 ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
2233 (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
2234 return -EINVAL;
2235
2236 found_buffer = 0;
2237 list_for_each_entry_safe(ts_buffer, tmp,
2238 &dmxdevfilter->insertion_buffers, next) {
2239 if (ts_buffer->identifier == params->identifier) {
2240 list_del(&ts_buffer->next);
2241 found_buffer = 1;
2242 break;
2243 }
2244 }
2245
2246 if (!found_buffer)
2247 return -EINVAL;
2248
2249 if (dmxdevfilter->state == DMXDEV_STATE_GO) {
2250 dvb_dmxdev_cancel_ts_insertion(ts_buffer);
2251 if (list_empty(&dmxdevfilter->insertion_buffers)) {
2252 feed = list_first_entry(&dmxdevfilter->feed.ts,
2253 struct dmxdev_feed, next);
2254 if (feed->ts->ts_insertion_terminate)
2255 ret = feed->ts->ts_insertion_terminate(
2256 feed->ts);
2257 }
2258 }
2259
2260 vfree(ts_buffer->buffer);
2261 vfree(ts_buffer);
2262
2263 return ret;
2264}
2265
2266static int dvb_dmxdev_ts_fullness_callback(struct dmx_ts_feed *filter,
2267 int required_space, int wait)
2268{
2269 struct dmxdev_filter *dmxdevfilter = filter->priv;
2270 struct dvb_ringbuffer *src;
2271 struct dmxdev_events_queue *events;
2272 int ret;
2273
2274 if (!dmxdevfilter) {
2275 pr_err("%s: NULL demux filter object!\n", __func__);
2276 return -ENODEV;
2277 }
2278
2279 if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
2280 src = &dmxdevfilter->buffer;
2281 events = &dmxdevfilter->events;
2282 } else {
2283 src = &dmxdevfilter->dev->dvr_buffer;
2284 events = &dmxdevfilter->dev->dvr_output_events;
2285 }
2286
2287 do {
2288 ret = 0;
2289
2290 if (dmxdevfilter->dev->dvr_in_exit)
2291 return -ENODEV;
2292
2293 spin_lock(&dmxdevfilter->dev->lock);
2294
2295 if ((!src->data) ||
2296 (dmxdevfilter->state != DMXDEV_STATE_GO))
2297 ret = -EINVAL;
2298 else if (src->error)
2299 ret = src->error;
2300
2301 if (ret) {
2302 spin_unlock(&dmxdevfilter->dev->lock);
2303 return ret;
2304 }
2305
2306 if ((required_space <= dvb_ringbuffer_free(src)) &&
2307 (!dvb_dmxdev_events_is_full(events))) {
2308 spin_unlock(&dmxdevfilter->dev->lock);
2309 return 0;
2310 }
2311
2312 spin_unlock(&dmxdevfilter->dev->lock);
2313
2314 if (!wait)
2315 return -ENOSPC;
2316
2317 ret = wait_event_interruptible(src->queue,
2318 (!src->data) ||
2319 ((dvb_ringbuffer_free(src) >= required_space) &&
2320 (!dvb_dmxdev_events_is_full(events))) ||
2321 (src->error != 0) ||
2322 (dmxdevfilter->state != DMXDEV_STATE_GO) ||
2323 dmxdevfilter->dev->dvr_in_exit);
2324
2325 if (ret < 0)
2326 return ret;
2327 } while (1);
2328}
2329
2330static int dvb_dmxdev_sec_fullness_callback(
2331 struct dmx_section_filter *filter,
2332 int required_space, int wait)
2333{
2334 struct dmxdev_filter *dmxdevfilter = filter->priv;
2335 struct dvb_ringbuffer *src;
2336 struct dmxdev_events_queue *events;
2337 int ret;
2338
2339 if (!dmxdevfilter) {
2340 pr_err("%s: NULL demux filter object!\n", __func__);
2341 return -ENODEV;
2342 }
2343
2344 src = &dmxdevfilter->buffer;
2345 events = &dmxdevfilter->events;
2346
2347 do {
2348 ret = 0;
2349
2350 if (dmxdevfilter->dev->dvr_in_exit)
2351 return -ENODEV;
2352
2353 spin_lock(&dmxdevfilter->dev->lock);
2354
2355 if ((!src->data) ||
2356 (dmxdevfilter->state != DMXDEV_STATE_GO))
2357 ret = -EINVAL;
2358 else if (src->error)
2359 ret = src->error;
2360
2361 if (ret) {
2362 spin_unlock(&dmxdevfilter->dev->lock);
2363 return ret;
2364 }
2365
2366 if ((required_space <= dvb_ringbuffer_free(src)) &&
2367 (!dvb_dmxdev_events_is_full(events))) {
2368 spin_unlock(&dmxdevfilter->dev->lock);
2369 return 0;
2370 }
2371
2372 spin_unlock(&dmxdevfilter->dev->lock);
2373
2374 if (!wait)
2375 return -ENOSPC;
2376
2377 ret = wait_event_interruptible(src->queue,
2378 (!src->data) ||
2379 ((dvb_ringbuffer_free(src) >= required_space) &&
2380 (!dvb_dmxdev_events_is_full(events))) ||
2381 (src->error != 0) ||
2382 (dmxdevfilter->state != DMXDEV_STATE_GO) ||
2383 dmxdevfilter->dev->dvr_in_exit);
2384
2385 if (ret < 0)
2386 return ret;
2387 } while (1);
2388}
2389
2390static int dvb_dmxdev_set_playback_mode(struct dmxdev_filter *dmxdevfilter,
2391 enum dmx_playback_mode_t playback_mode)
2392{
2393 struct dmxdev *dmxdev = dmxdevfilter->dev;
2394 struct dmx_caps caps;
2395
2396 if (dmxdev->demux->get_caps)
2397 dmxdev->demux->get_caps(dmxdev->demux, &caps);
2398 else
2399 caps.caps = 0;
2400
2401 if ((playback_mode != DMX_PB_MODE_PUSH) &&
2402 (playback_mode != DMX_PB_MODE_PULL))
2403 return -EINVAL;
2404
2405 if (dmxdev->demux->set_playback_mode == NULL)
2406 return -EINVAL;
2407
2408 if (((dmxdev->source < DMX_SOURCE_DVR0) ||
2409 !(caps.caps & DMX_CAP_PULL_MODE)) &&
2410 (playback_mode == DMX_PB_MODE_PULL))
2411 return -EPERM;
2412
2413 if (dmxdevfilter->state == DMXDEV_STATE_GO)
2414 return -EBUSY;
2415
2416 dmxdev->playback_mode = playback_mode;
2417
2418 return dmxdev->demux->set_playback_mode(
2419 dmxdev->demux,
2420 dmxdev->playback_mode,
2421 dvb_dmxdev_ts_fullness_callback,
2422 dvb_dmxdev_sec_fullness_callback);
2423}
2424
2425static int dvb_dmxdev_flush_buffer(struct dmxdev_filter *filter)
2426{
2427 size_t flush_len;
2428 int ret;
2429
2430 if (filter->state != DMXDEV_STATE_GO)
2431 return -EINVAL;
2432
2433 flush_len = dvb_ringbuffer_avail(&filter->buffer);
2434 ret = dvb_dmxdev_flush_data(filter, flush_len);
2435
2436 return ret;
2437}
2438
2439static int dvb_dmxdev_get_buffer_status(
2440 struct dmxdev_filter *dmxdevfilter,
2441 struct dmx_buffer_status *dmx_buffer_status)
2442{
2443 struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
2444
2445 /*
2446 * Note: Taking the dmxdevfilter->dev->lock spinlock is required only
2447 * when getting the status of the Demux-userspace data ringbuffer .
2448 * In case we are getting the status of a decoder buffer, taking this
2449 * spinlock is not required and in fact might lead to a deadlock.
2450 */
2451 if ((dmxdevfilter->type == DMXDEV_TYPE_PES) &&
2452 (dmxdevfilter->params.pes.output == DMX_OUT_DECODER)) {
2453 struct dmxdev_feed *feed;
2454 int ret;
2455
2456 /* Only one feed should be in the list in case of decoder */
2457 feed = list_first_entry(&dmxdevfilter->feed.ts,
2458 struct dmxdev_feed, next);
2459
2460 /* Ask for status of decoder's buffer from underlying HW */
2461 if (feed->ts->get_decoder_buff_status)
2462 ret = feed->ts->get_decoder_buff_status(
2463 feed->ts,
2464 dmx_buffer_status);
2465 else
2466 ret = -ENODEV;
2467
2468 return ret;
2469 }
2470
2471 spin_lock_irq(&dmxdevfilter->dev->lock);
2472
2473 if (!buf->data) {
2474 spin_unlock_irq(&dmxdevfilter->dev->lock);
2475 return -EINVAL;
2476 }
2477
2478 dmx_buffer_status->error = buf->error;
2479 dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
2480 dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
2481 dmx_buffer_status->read_offset = buf->pread;
2482 dmx_buffer_status->write_offset = buf->pwrite;
2483 dmx_buffer_status->size = buf->size;
2484 buf->error = 0;
2485
2486 spin_unlock_irq(&dmxdevfilter->dev->lock);
2487
2488 if (dmx_buffer_status->error == -EOVERFLOW)
2489 dvb_dmxdev_auto_flush_buffer(dmxdevfilter, buf);
2490
2491 return 0;
2492}
2493
2494static int dvb_dmxdev_release_data(struct dmxdev_filter *dmxdevfilter,
2495 u32 bytes_count)
2496{
2497 ssize_t buff_fullness;
2498
2499 if (!dmxdevfilter->buffer.data)
2500 return -EINVAL;
2501
2502 if (!bytes_count)
2503 return 0;
2504
2505 buff_fullness = dvb_ringbuffer_avail(&dmxdevfilter->buffer);
2506
2507 if (bytes_count > buff_fullness)
2508 return -EINVAL;
2509
2510 DVB_RINGBUFFER_SKIP(&dmxdevfilter->buffer, bytes_count);
2511
2512 dvb_dmxdev_notify_data_read(dmxdevfilter, bytes_count);
2513 spin_lock_irq(&dmxdevfilter->dev->lock);
2514 dvb_dmxdev_update_events(&dmxdevfilter->events, bytes_count);
2515 spin_unlock_irq(&dmxdevfilter->dev->lock);
2516
2517 wake_up_all(&dmxdevfilter->buffer.queue);
2518
2519 return 0;
2520}
2521
2522static int dvb_dmxdev_get_event(struct dmxdev_filter *dmxdevfilter,
2523 struct dmx_filter_event *event)
2524{
2525 int res = 0;
2526
2527 spin_lock_irq(&dmxdevfilter->dev->lock);
2528
2529 /* Check first for filter overflow */
2530 if (dmxdevfilter->buffer.error == -EOVERFLOW) {
2531 event->type = DMX_EVENT_BUFFER_OVERFLOW;
2532 } else {
2533 res = dvb_dmxdev_remove_event(&dmxdevfilter->events, event);
2534 if (res) {
2535 spin_unlock_irq(&dmxdevfilter->dev->lock);
2536 return res;
2537 }
2538 }
2539
2540 /* clear buffer error now that user was notified */
2541 if (event->type == DMX_EVENT_BUFFER_OVERFLOW ||
2542 event->type == DMX_EVENT_SECTION_TIMEOUT)
2543 dmxdevfilter->buffer.error = 0;
2544
2545 spin_unlock_irq(&dmxdevfilter->dev->lock);
2546
2547 if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
2548 dvb_dmxdev_auto_flush_buffer(dmxdevfilter,
2549 &dmxdevfilter->buffer);
2550
2551 spin_lock_irq(&dmxdevfilter->dev->lock);
2552
2553 /*
2554 * If no-data events are enabled on this filter,
2555 * the events can be removed from the queue when
2556 * user gets them.
2557 * For filters with data events enabled, the event is removed
2558 * from the queue only when the respective data is read.
2559 */
2560 if (event->type != DMX_EVENT_BUFFER_OVERFLOW &&
2561 dmxdevfilter->events.data_read_event_masked)
2562 dmxdevfilter->events.read_index =
2563 dvb_dmxdev_advance_event_idx(
2564 dmxdevfilter->events.read_index);
2565
2566 spin_unlock_irq(&dmxdevfilter->dev->lock);
2567
2568 /*
2569 * in PULL mode, we might be stalling on
2570 * event queue, so need to wake-up waiters
2571 */
2572 if (dmxdevfilter->dev->playback_mode == DMX_PB_MODE_PULL)
2573 wake_up_all(&dmxdevfilter->buffer.queue);
2574
2575 return res;
2576}
2577
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578static void dvb_dmxdev_filter_timeout(unsigned long data)
2579{
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002580 struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302581 struct dmx_filter_event event;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002583 dmxdevfilter->buffer.error = -ETIMEDOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 spin_lock_irq(&dmxdevfilter->dev->lock);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002585 dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302586 event.type = DMX_EVENT_SECTION_TIMEOUT;
2587 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 spin_unlock_irq(&dmxdevfilter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302589 wake_up_all(&dmxdevfilter->buffer.queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590}
2591
2592static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
2593{
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002594 struct dmx_sct_filter_params *para = &dmxdevfilter->params.sec;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595
2596 del_timer(&dmxdevfilter->timer);
2597 if (para->timeout) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002598 dmxdevfilter->timer.function = dvb_dmxdev_filter_timeout;
2599 dmxdevfilter->timer.data = (unsigned long)dmxdevfilter;
2600 dmxdevfilter->timer.expires =
2601 jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 add_timer(&dmxdevfilter->timer);
2603 }
2604}
2605
2606static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002607 const u8 *buffer2, size_t buffer2_len,
Mauro Carvalho Chehab2f684b22015-10-06 19:53:02 -03002608 struct dmx_section_filter *filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07002610 struct dmxdev_filter *dmxdevfilter = filter->priv;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302611 struct dmx_filter_event event;
2612 ssize_t free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302614
2615 if (!dmxdevfilter) {
2616 pr_err("%s: null filter.\n", __func__);
2617 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302619
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002620 spin_lock(&dmxdevfilter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302621
2622 if (dmxdevfilter->buffer.error ||
2623 dmxdevfilter->state != DMXDEV_STATE_GO ||
2624 dmxdevfilter->eos_state) {
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002625 spin_unlock(&dmxdevfilter->dev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 return 0;
2627 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302628
2629 /* Discard section data if event cannot be notified */
2630 if (!(dmxdevfilter->events.event_mask.disable_mask &
2631 DMX_EVENT_NEW_SECTION) &&
2632 dvb_dmxdev_events_is_full(&dmxdevfilter->events)) {
2633 spin_unlock(&dmxdevfilter->dev->lock);
2634 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302636
2637 if ((buffer1_len + buffer2_len) == 0) {
2638 if (buffer1 == NULL && buffer2 == NULL) {
2639 /* Section was dropped due to CRC error */
2640 event.type = DMX_EVENT_SECTION_CRC_ERROR;
2641 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2642
2643 spin_unlock(&dmxdevfilter->dev->lock);
2644 wake_up_all(&dmxdevfilter->buffer.queue);
2645 } else {
2646 spin_unlock(&dmxdevfilter->dev->lock);
2647 }
2648
2649 return 0;
2650 }
2651
2652 event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
2653 event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
2654
2655 del_timer(&dmxdevfilter->timer);
2656
2657 /* Verify output buffer has sufficient space, or report overflow */
2658 free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
2659 if (free < (buffer1_len + buffer2_len)) {
2660 pr_debug("%s: section filter overflow (pid=%u)\n",
2661 __func__, dmxdevfilter->params.sec.pid);
2662 dmxdevfilter->buffer.error = -EOVERFLOW;
2663 spin_unlock(&dmxdevfilter->dev->lock);
2664 wake_up_all(&dmxdevfilter->buffer.queue);
2665 return 0;
2666 }
2667
2668 dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1, buffer1_len);
2669 dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2, buffer2_len);
2670
2671 event.type = DMX_EVENT_NEW_SECTION;
2672 event.params.section.total_length = buffer1_len + buffer2_len;
2673 event.params.section.actual_length =
2674 event.params.section.total_length;
2675
2676 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2677
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002678 if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
2679 dmxdevfilter->state = DMXDEV_STATE_DONE;
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002680 spin_unlock(&dmxdevfilter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302681 wake_up_all(&dmxdevfilter->buffer.queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 return 0;
2683}
2684
2685static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002686 const u8 *buffer2, size_t buffer2_len,
Mauro Carvalho Chehab2f684b22015-10-06 19:53:02 -03002687 struct dmx_ts_feed *feed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07002689 struct dmxdev_filter *dmxdevfilter = feed->priv;
Andreas Oberritter34731df2006-03-14 17:31:01 -03002690 struct dvb_ringbuffer *buffer;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302691 struct dmxdev_events_queue *events;
2692 struct dmx_filter_event event;
2693 ssize_t free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302695 if (!dmxdevfilter) {
2696 pr_err("%s: null filter (feed->is_filtering=%d)\n",
2697 __func__, feed->is_filtering);
2698 return -EINVAL;
2699 }
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002700 spin_lock(&dmxdevfilter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302701
2702 if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER ||
2703 dmxdevfilter->state != DMXDEV_STATE_GO ||
2704 dmxdevfilter->eos_state) {
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002705 spin_unlock(&dmxdevfilter->dev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 return 0;
2707 }
2708
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302709 if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002710 buffer = &dmxdevfilter->buffer;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302711 events = &dmxdevfilter->events;
2712 } else {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002713 buffer = &dmxdevfilter->dev->dvr_buffer;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302714 events = &dmxdevfilter->dev->dvr_output_events;
2715 }
2716
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717 if (buffer->error) {
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002718 spin_unlock(&dmxdevfilter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302719 wake_up_all(&buffer->queue);
2720 return buffer->error;
2721 }
2722
2723 if (!events->current_event_data_size)
2724 events->current_event_start_offset = buffer->pwrite;
2725
2726 /* Verify output buffer has sufficient space, or report overflow */
2727 free = dvb_ringbuffer_free(buffer);
2728 if (free < (buffer1_len + buffer2_len)) {
2729 pr_debug("%s: buffer overflow error, pid=%u\n",
2730 __func__, dmxdevfilter->params.pes.pid);
2731 buffer->error = -EOVERFLOW;
2732 spin_unlock(&dmxdevfilter->dev->lock);
2733 wake_up_all(&buffer->queue);
2734
2735 return -EOVERFLOW;
2736 }
2737
2738 if (buffer1_len + buffer2_len) {
2739 dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
2740 dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
2741
2742 events->current_event_data_size += (buffer1_len + buffer2_len);
2743
2744 if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP ||
2745 dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
2746 && events->current_event_data_size >=
2747 dmxdevfilter->params.pes.rec_chunk_size) {
2748 event.type = DMX_EVENT_NEW_REC_CHUNK;
2749 event.params.recording_chunk.offset =
2750 events->current_event_start_offset;
2751 event.params.recording_chunk.size =
2752 events->current_event_data_size;
2753
2754 dvb_dmxdev_add_event(events, &event);
2755 events->current_event_data_size = 0;
2756 }
2757 }
2758
2759 spin_unlock(&dmxdevfilter->dev->lock);
2760 wake_up_all(&buffer->queue);
2761 return 0;
2762}
2763
2764static int dvb_dmxdev_section_event_cb(struct dmx_section_filter *filter,
2765 struct dmx_data_ready *dmx_data_ready)
2766{
2767 int res = 0;
2768 struct dmxdev_filter *dmxdevfilter = filter->priv;
2769 struct dmx_filter_event event;
2770 ssize_t free;
2771
2772 if (!dmxdevfilter) {
2773 pr_err("%s: null filter. event type=%d (length=%d) will be discarded\n",
2774 __func__, dmx_data_ready->status,
2775 dmx_data_ready->data_length);
2776 return -EINVAL;
2777 }
2778
2779 spin_lock(&dmxdevfilter->dev->lock);
2780
2781 if (dmxdevfilter->buffer.error == -ETIMEDOUT ||
2782 dmxdevfilter->state != DMXDEV_STATE_GO ||
2783 dmxdevfilter->eos_state) {
2784 spin_unlock(&dmxdevfilter->dev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 return 0;
2786 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302787
2788 if (dmx_data_ready->data_length == 0) {
2789 if (dmx_data_ready->status == DMX_CRC_ERROR) {
2790 /* Section was dropped due to CRC error */
2791 event.type = DMX_EVENT_SECTION_CRC_ERROR;
2792 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2793
2794 spin_unlock(&dmxdevfilter->dev->lock);
2795 wake_up_all(&dmxdevfilter->buffer.queue);
2796 } else if (dmx_data_ready->status == DMX_OK_EOS) {
2797 event.type = DMX_EVENT_EOS;
2798 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2799 spin_unlock(&dmxdevfilter->dev->lock);
2800 wake_up_all(&dmxdevfilter->buffer.queue);
2801 } else if (dmx_data_ready->status == DMX_OK_MARKER) {
2802 event.type = DMX_EVENT_MARKER;
2803 event.params.marker.id = dmx_data_ready->marker.id;
2804 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2805 spin_unlock(&dmxdevfilter->dev->lock);
2806 wake_up_all(&dmxdevfilter->buffer.queue);
2807 } else if (dmx_data_ready->status == DMX_OK_SCRAMBLING_STATUS) {
2808 event.type = DMX_EVENT_SCRAMBLING_STATUS_CHANGE;
2809 event.params.scrambling_status =
2810 dmx_data_ready->scrambling_bits;
2811 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2812 spin_unlock(&dmxdevfilter->dev->lock);
2813 wake_up_all(&dmxdevfilter->buffer.queue);
2814 } else if (dmx_data_ready->status == DMX_OVERRUN_ERROR) {
2815 pr_debug("dmxdev: section filter overflow (pid=%u)\n",
2816 dmxdevfilter->params.sec.pid);
2817 /* Set buffer error to notify user overflow occurred */
2818 dmxdevfilter->buffer.error = -EOVERFLOW;
2819 spin_unlock(&dmxdevfilter->dev->lock);
2820 wake_up_all(&dmxdevfilter->buffer.queue);
2821 } else {
2822 spin_unlock(&dmxdevfilter->dev->lock);
2823 }
2824 return 0;
2825 }
2826
2827 event.type = DMX_EVENT_NEW_SECTION;
2828 event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
2829 event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
2830 event.params.section.total_length = dmx_data_ready->data_length;
2831 event.params.section.actual_length = dmx_data_ready->data_length;
2832
2833 if (dmx_data_ready->status == DMX_MISSED_ERROR)
2834 event.params.section.flags = DMX_FILTER_CC_ERROR;
2835 else
2836 event.params.section.flags = 0;
2837
2838 free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
2839 if (free < dmx_data_ready->data_length) {
2840 pr_err("%s: invalid data length: data_length=%d > free=%zd\n",
2841 __func__, dmx_data_ready->data_length, free);
2842 } else {
2843 res = dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2844 DVB_RINGBUFFER_PUSH(&dmxdevfilter->buffer,
2845 dmx_data_ready->data_length);
2846 }
2847
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002848 spin_unlock(&dmxdevfilter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302849 wake_up_all(&dmxdevfilter->buffer.queue);
2850
2851 return res;
2852}
2853
2854static int dvb_dmxdev_ts_event_cb(struct dmx_ts_feed *feed,
2855 struct dmx_data_ready *dmx_data_ready)
2856{
2857 struct dmxdev_filter *dmxdevfilter = feed->priv;
2858 struct dvb_ringbuffer *buffer;
2859 struct dmxdev_events_queue *events;
2860 struct dmx_filter_event event;
2861 ssize_t free;
2862
2863 if (!dmxdevfilter) {
2864 pr_err("%s: null filter (feed->is_filtering=%d) event type=%d (length=%d) will be discarded\n",
2865 __func__, feed->is_filtering,
2866 dmx_data_ready->status,
2867 dmx_data_ready->data_length);
2868 return -EINVAL;
2869 }
2870
2871 spin_lock(&dmxdevfilter->dev->lock);
2872
2873 if (dmxdevfilter->state != DMXDEV_STATE_GO ||
2874 dmxdevfilter->eos_state) {
2875 spin_unlock(&dmxdevfilter->dev->lock);
2876 return 0;
2877 }
2878
2879 if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
2880 buffer = &dmxdevfilter->buffer;
2881 events = &dmxdevfilter->events;
2882 } else {
2883 buffer = &dmxdevfilter->dev->dvr_buffer;
2884 events = &dmxdevfilter->dev->dvr_output_events;
2885 }
2886
2887 if (!buffer->error && dmx_data_ready->status == DMX_OVERRUN_ERROR) {
2888 pr_debug("dmxdev: %s filter buffer overflow (pid=%u)\n",
2889 dmxdevfilter->params.pes.output == DMX_OUT_DECODER ?
2890 "decoder" : "",
2891 dmxdevfilter->params.pes.pid);
2892 /* Set buffer error to notify user overflow occurred */
2893 buffer->error = -EOVERFLOW;
2894 spin_unlock(&dmxdevfilter->dev->lock);
2895 wake_up_all(&buffer->queue);
2896 return 0;
2897 }
2898
2899 if (dmx_data_ready->status == DMX_OK_EOS) {
2900 /* Report partial recording chunk */
2901 if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP ||
2902 dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
2903 && events->current_event_data_size) {
2904 event.type = DMX_EVENT_NEW_REC_CHUNK;
2905 event.params.recording_chunk.offset =
2906 events->current_event_start_offset;
2907 event.params.recording_chunk.size =
2908 events->current_event_data_size;
2909 events->current_event_start_offset =
2910 (events->current_event_start_offset +
2911 events->current_event_data_size) %
2912 buffer->size;
2913 events->current_event_data_size = 0;
2914 dvb_dmxdev_add_event(events, &event);
2915 }
2916
2917 dmxdevfilter->eos_state = 1;
2918 pr_debug("dmxdev: DMX_OK_EOS - entering EOS state\n");
2919 event.type = DMX_EVENT_EOS;
2920 dvb_dmxdev_add_event(events, &event);
2921 spin_unlock(&dmxdevfilter->dev->lock);
2922 wake_up_all(&buffer->queue);
2923 return 0;
2924 }
2925
2926 if (dmx_data_ready->status == DMX_OK_MARKER) {
2927 pr_debug("dmxdev: DMX_OK_MARKER - id=%llu\n",
2928 dmx_data_ready->marker.id);
2929 event.type = DMX_EVENT_MARKER;
2930 event.params.marker.id = dmx_data_ready->marker.id;
2931 dvb_dmxdev_add_event(events, &event);
2932 spin_unlock(&dmxdevfilter->dev->lock);
2933 wake_up_all(&buffer->queue);
2934 return 0;
2935 }
2936
2937 if (dmx_data_ready->status == DMX_OK_PCR) {
2938 pr_debug("dmxdev: event callback DMX_OK_PCR\n");
2939 event.type = DMX_EVENT_NEW_PCR;
2940 event.params.pcr.pcr = dmx_data_ready->pcr.pcr;
2941 event.params.pcr.stc = dmx_data_ready->pcr.stc;
2942 if (dmx_data_ready->pcr.disc_indicator_set)
2943 event.params.pcr.flags =
2944 DMX_FILTER_DISCONTINUITY_INDICATOR;
2945 else
2946 event.params.pcr.flags = 0;
2947
2948 dvb_dmxdev_add_event(events, &event);
2949 spin_unlock(&dmxdevfilter->dev->lock);
2950 wake_up_all(&buffer->queue);
2951 return 0;
2952 }
2953
2954 if (dmx_data_ready->status == DMX_OK_IDX) {
2955 pr_debug("dmxdev: event callback DMX_OK_IDX\n");
2956 event.type = DMX_EVENT_NEW_INDEX_ENTRY;
2957 event.params.index = dmx_data_ready->idx_event;
2958
2959 dvb_dmxdev_add_event(events, &event);
2960 spin_unlock(&dmxdevfilter->dev->lock);
2961 wake_up_all(&buffer->queue);
2962 return 0;
2963 }
2964
2965 if (dmx_data_ready->status == DMX_OK_SCRAMBLING_STATUS) {
2966 event.type = DMX_EVENT_SCRAMBLING_STATUS_CHANGE;
2967 event.params.scrambling_status =
2968 dmx_data_ready->scrambling_bits;
2969 dvb_dmxdev_add_event(events, &event);
2970 spin_unlock(&dmxdevfilter->dev->lock);
2971 wake_up_all(&buffer->queue);
2972 return 0;
2973 }
2974
2975 if (dmx_data_ready->status == DMX_OK_DECODER_BUF) {
2976 event.type = DMX_EVENT_NEW_ES_DATA;
2977 event.params.es_data.buf_handle = dmx_data_ready->buf.handle;
2978 event.params.es_data.cookie = dmx_data_ready->buf.cookie;
2979 event.params.es_data.offset = dmx_data_ready->buf.offset;
2980 event.params.es_data.data_len = dmx_data_ready->buf.len;
2981 event.params.es_data.pts_valid = dmx_data_ready->buf.pts_exists;
2982 event.params.es_data.pts = dmx_data_ready->buf.pts;
2983 event.params.es_data.dts_valid = dmx_data_ready->buf.dts_exists;
2984 event.params.es_data.dts = dmx_data_ready->buf.dts;
2985 event.params.es_data.stc = dmx_data_ready->buf.stc;
2986 event.params.es_data.transport_error_indicator_counter =
2987 dmx_data_ready->buf.tei_counter;
2988 event.params.es_data.continuity_error_counter =
2989 dmx_data_ready->buf.cont_err_counter;
2990 event.params.es_data.ts_packets_num =
2991 dmx_data_ready->buf.ts_packets_num;
2992 event.params.es_data.ts_dropped_bytes =
2993 dmx_data_ready->buf.ts_dropped_bytes;
2994 dvb_dmxdev_add_event(events, &event);
2995 spin_unlock(&dmxdevfilter->dev->lock);
2996 wake_up_all(&buffer->queue);
2997 return 0;
2998 }
2999
3000 if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
3001 spin_unlock(&dmxdevfilter->dev->lock);
3002 wake_up_all(&buffer->queue);
3003 return 0;
3004 }
3005
3006 free = dvb_ringbuffer_free(buffer);
3007 if (free < dmx_data_ready->data_length) {
3008 pr_err("%s: invalid data length: data_length=%d > free=%zd\n",
3009 __func__, dmx_data_ready->data_length, free);
3010
3011 spin_unlock(&dmxdevfilter->dev->lock);
3012 wake_up_all(&buffer->queue);
3013 return 0;
3014 }
3015
3016 if (dmxdevfilter->params.pes.output == DMX_OUT_TAP) {
3017 if (dmx_data_ready->status == DMX_OK &&
3018 !events->current_event_data_size) {
3019 events->current_event_start_offset = buffer->pwrite;
3020 } else if (dmx_data_ready->status == DMX_OK_PES_END) {
3021 event.type = DMX_EVENT_NEW_PES;
3022
3023 event.params.pes.base_offset =
3024 events->current_event_start_offset;
3025 event.params.pes.start_offset =
3026 (events->current_event_start_offset +
3027 dmx_data_ready->pes_end.start_gap) %
3028 buffer->size;
3029
3030 event.params.pes.actual_length =
3031 dmx_data_ready->pes_end.actual_length;
3032 event.params.pes.total_length =
3033 events->current_event_data_size;
3034
3035 event.params.pes.flags = 0;
3036 if (dmx_data_ready->pes_end.disc_indicator_set)
3037 event.params.pes.flags |=
3038 DMX_FILTER_DISCONTINUITY_INDICATOR;
3039 if (dmx_data_ready->pes_end.pes_length_mismatch)
3040 event.params.pes.flags |=
3041 DMX_FILTER_PES_LENGTH_ERROR;
3042
3043 event.params.pes.stc = dmx_data_ready->pes_end.stc;
3044 event.params.pes.transport_error_indicator_counter =
3045 dmx_data_ready->pes_end.tei_counter;
3046 event.params.pes.continuity_error_counter =
3047 dmx_data_ready->pes_end.cont_err_counter;
3048 event.params.pes.ts_packets_num =
3049 dmx_data_ready->pes_end.ts_packets_num;
3050
3051 /* Do not report zero length PES */
3052 if (event.params.pes.total_length)
3053 dvb_dmxdev_add_event(events, &event);
3054
3055 events->current_event_data_size = 0;
3056 }
3057 } else if (!events->current_event_data_size) {
3058 events->current_event_start_offset = buffer->pwrite;
3059 }
3060
3061 events->current_event_data_size += dmx_data_ready->data_length;
3062 DVB_RINGBUFFER_PUSH(buffer, dmx_data_ready->data_length);
3063
3064 if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) ||
3065 (dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)) {
3066 while (events->current_event_data_size >=
3067 dmxdevfilter->params.pes.rec_chunk_size) {
3068 event.type = DMX_EVENT_NEW_REC_CHUNK;
3069 event.params.recording_chunk.offset =
3070 events->current_event_start_offset;
3071 event.params.recording_chunk.size =
3072 dmxdevfilter->params.pes.rec_chunk_size;
3073 events->current_event_data_size =
3074 events->current_event_data_size -
3075 dmxdevfilter->params.pes.rec_chunk_size;
3076 events->current_event_start_offset =
3077 (events->current_event_start_offset +
3078 dmxdevfilter->params.pes.rec_chunk_size) %
3079 buffer->size;
3080
3081 dvb_dmxdev_add_event(events, &event);
3082 }
3083 }
3084 spin_unlock(&dmxdevfilter->dev->lock);
3085 wake_up_all(&buffer->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 return 0;
3087}
3088
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089/* stop feed but only mark the specified filter as stopped (state set) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090static int dvb_dmxdev_feed_stop(struct dmxdev_filter *dmxdevfilter)
3091{
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003092 struct dmxdev_feed *feed;
3093
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
3095
3096 switch (dmxdevfilter->type) {
3097 case DMXDEV_TYPE_SEC:
3098 del_timer(&dmxdevfilter->timer);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303099 dmxdevfilter->feed.sec.feed->stop_filtering(
3100 dmxdevfilter->feed.sec.feed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 break;
3102 case DMXDEV_TYPE_PES:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303103 list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
3104 if (dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) {
3105 dmxdevfilter->dev->dvr_feeds_count--;
3106 if (!dmxdevfilter->dev->dvr_feeds_count)
3107 dmxdevfilter->dev->dvr_feed = NULL;
3108 }
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003109 feed->ts->stop_filtering(feed->ts);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 break;
3112 default:
3113 return -EINVAL;
3114 }
3115 return 0;
3116}
3117
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118/* start feed associated with the specified filter */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119static int dvb_dmxdev_feed_start(struct dmxdev_filter *filter)
3120{
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003121 struct dmxdev_feed *feed;
3122 int ret;
3123
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003124 dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125
3126 switch (filter->type) {
3127 case DMXDEV_TYPE_SEC:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303128 return filter->feed.sec.feed->start_filtering(
3129 filter->feed.sec.feed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 case DMXDEV_TYPE_PES:
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003131 list_for_each_entry(feed, &filter->feed.ts, next) {
3132 ret = feed->ts->start_filtering(feed->ts);
3133 if (ret < 0) {
3134 dvb_dmxdev_feed_stop(filter);
3135 return ret;
3136 }
3137 }
3138 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 default:
3140 return -EINVAL;
3141 }
3142
3143 return 0;
3144}
3145
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146/* restart section feed if it has filters left associated with it,
3147 otherwise release the feed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148static int dvb_dmxdev_feed_restart(struct dmxdev_filter *filter)
3149{
3150 int i;
3151 struct dmxdev *dmxdev = filter->dev;
3152 u16 pid = filter->params.sec.pid;
3153
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003154 for (i = 0; i < dmxdev->filternum; i++)
3155 if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
3156 dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
3157 dmxdev->filter[i].params.sec.pid == pid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158 dvb_dmxdev_feed_start(&dmxdev->filter[i]);
3159 return 0;
3160 }
3161
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003162 filter->dev->demux->release_section_feed(dmxdev->demux,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303163 filter->feed.sec.feed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164
3165 return 0;
3166}
3167
3168static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter)
3169{
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003170 struct dmxdev_feed *feed;
3171 struct dmx_demux *demux;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303172 struct ts_insertion_buffer *ts_buffer;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003173
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003174 if (dmxdevfilter->state < DMXDEV_STATE_GO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 return 0;
3176
3177 switch (dmxdevfilter->type) {
3178 case DMXDEV_TYPE_SEC:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303179 if (!dmxdevfilter->feed.sec.feed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 break;
3181 dvb_dmxdev_feed_stop(dmxdevfilter);
3182 if (dmxdevfilter->filter.sec)
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303183 dmxdevfilter->feed.sec.feed->
3184 release_filter(dmxdevfilter->feed.sec.feed,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003185 dmxdevfilter->filter.sec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186 dvb_dmxdev_feed_restart(dmxdevfilter);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303187 dmxdevfilter->feed.sec.feed = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 break;
3189 case DMXDEV_TYPE_PES:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 dvb_dmxdev_feed_stop(dmxdevfilter);
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003191 demux = dmxdevfilter->dev->demux;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303192
3193 if (!list_empty(&dmxdevfilter->insertion_buffers)) {
3194 feed = list_first_entry(&dmxdevfilter->feed.ts,
3195 struct dmxdev_feed, next);
3196
3197 list_for_each_entry(ts_buffer,
3198 &dmxdevfilter->insertion_buffers, next)
3199 dvb_dmxdev_cancel_ts_insertion(ts_buffer);
3200 if (feed->ts->ts_insertion_terminate)
3201 feed->ts->ts_insertion_terminate(feed->ts);
3202 }
3203
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003204 list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
3205 demux->release_ts_feed(demux, feed->ts);
3206 feed->ts = NULL;
3207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 break;
3209 default:
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003210 if (dmxdevfilter->state == DMXDEV_STATE_ALLOCATED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 return 0;
3212 return -EINVAL;
3213 }
Andreas Oberritter34731df2006-03-14 17:31:01 -03003214
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303215 spin_lock_irq(&dmxdevfilter->dev->lock);
3216 dvb_dmxdev_flush_output(&dmxdevfilter->buffer, &dmxdevfilter->events);
3217 dvb_ringbuffer_reset(&dmxdevfilter->buffer);
3218 spin_unlock_irq(&dmxdevfilter->dev->lock);
3219
3220 wake_up_all(&dmxdevfilter->buffer.queue);
3221
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 return 0;
3223}
3224
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003225static void dvb_dmxdev_delete_pids(struct dmxdev_filter *dmxdevfilter)
3226{
3227 struct dmxdev_feed *feed, *tmp;
3228
3229 /* delete all PIDs */
3230 list_for_each_entry_safe(feed, tmp, &dmxdevfilter->feed.ts, next) {
3231 list_del(&feed->next);
3232 kfree(feed);
3233 }
3234
3235 BUG_ON(!list_empty(&dmxdevfilter->feed.ts));
3236}
3237
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238static inline int dvb_dmxdev_filter_reset(struct dmxdev_filter *dmxdevfilter)
3239{
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003240 if (dmxdevfilter->state < DMXDEV_STATE_SET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 return 0;
3242
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003243 if (dmxdevfilter->type == DMXDEV_TYPE_PES)
3244 dvb_dmxdev_delete_pids(dmxdevfilter);
3245
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003246 dmxdevfilter->type = DMXDEV_TYPE_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
3248 return 0;
3249}
3250
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003251static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
3252 struct dmxdev_filter *filter,
3253 struct dmxdev_feed *feed)
3254{
Arnd Bergmanne95be152016-06-17 17:46:28 -03003255 ktime_t timeout = ktime_set(0, 0);
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003256 struct dmx_pes_filter_params *para = &filter->params.pes;
3257 dmx_output_t otype;
3258 int ret;
3259 int ts_type;
Mauro Carvalho Chehabfde04ab2013-04-04 13:25:30 -03003260 enum dmx_ts_pes ts_pes;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003261 struct dmx_ts_feed *tsfeed;
3262
3263 feed->ts = NULL;
3264 otype = para->output;
3265
Mauro Carvalho Chehab9ae2ae32010-12-27 11:41:14 -03003266 ts_pes = para->pes_type;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003267
3268 if (ts_pes < DMX_PES_OTHER)
3269 ts_type = TS_DECODER;
3270 else
3271 ts_type = 0;
3272
3273 if (otype == DMX_OUT_TS_TAP)
3274 ts_type |= TS_PACKET;
3275 else if (otype == DMX_OUT_TSDEMUX_TAP)
3276 ts_type |= TS_PACKET | TS_DEMUX;
3277 else if (otype == DMX_OUT_TAP)
3278 ts_type |= TS_PACKET | TS_DEMUX | TS_PAYLOAD_ONLY;
3279
3280 ret = dmxdev->demux->allocate_ts_feed(dmxdev->demux, &feed->ts,
3281 dvb_dmxdev_ts_callback);
3282 if (ret < 0)
3283 return ret;
3284
3285 tsfeed = feed->ts;
3286 tsfeed->priv = filter;
3287
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303288 if (filter->params.pes.output == DMX_OUT_TS_TAP) {
3289 tsfeed->buffer.ringbuff = &dmxdev->dvr_buffer;
3290 tsfeed->buffer.priv_handle = dmxdev->dvr_priv_buff_handle;
3291 if (!dmxdev->dvr_feeds_count)
3292 dmxdev->dvr_feed = filter;
3293 dmxdev->dvr_feeds_count++;
3294 } else if (filter->params.pes.output == DMX_OUT_DECODER) {
3295 tsfeed->buffer.ringbuff = &filter->buffer;
3296 tsfeed->decoder_buffers = &filter->decoder_buffers;
3297 tsfeed->buffer.priv_handle = filter->priv_buff_handle;
3298 } else {
3299 tsfeed->buffer.ringbuff = &filter->buffer;
3300 tsfeed->buffer.priv_handle = filter->priv_buff_handle;
3301 }
3302
3303 if (tsfeed->data_ready_cb) {
3304 ret = tsfeed->data_ready_cb(tsfeed, dvb_dmxdev_ts_event_cb);
3305
3306 if (ret < 0) {
3307 dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
3308 return ret;
3309 }
3310 }
3311
3312 ret = tsfeed->set(tsfeed, feed->pid,
3313 ts_type, ts_pes,
3314 filter->decoder_buffers.buffers_size,
3315 timeout);
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003316 if (ret < 0) {
3317 dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
3318 return ret;
3319 }
3320
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303321 if (tsfeed->set_tsp_out_format)
3322 tsfeed->set_tsp_out_format(tsfeed, filter->dmx_tsp_format);
3323
3324 if (tsfeed->set_secure_mode)
3325 tsfeed->set_secure_mode(tsfeed, &filter->sec_mode);
3326
3327 if (tsfeed->set_cipher_ops)
3328 tsfeed->set_cipher_ops(tsfeed, &feed->cipher_ops);
3329
3330 if ((para->pes_type == DMX_PES_VIDEO0) ||
3331 (para->pes_type == DMX_PES_VIDEO1) ||
3332 (para->pes_type == DMX_PES_VIDEO2) ||
3333 (para->pes_type == DMX_PES_VIDEO3)) {
3334 if (tsfeed->set_video_codec) {
3335 ret = tsfeed->set_video_codec(tsfeed,
3336 para->video_codec);
3337
3338 if (ret < 0) {
3339 dmxdev->demux->release_ts_feed(dmxdev->demux,
3340 tsfeed);
3341 return ret;
3342 }
3343 }
3344 }
3345
3346 if ((filter->params.pes.output == DMX_OUT_TS_TAP) ||
3347 (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP))
3348 if (tsfeed->set_idx_params) {
3349 ret = tsfeed->set_idx_params(
3350 tsfeed, &feed->idx_params);
3351 if (ret) {
3352 dmxdev->demux->release_ts_feed(dmxdev->demux,
3353 tsfeed);
3354 return ret;
3355 }
3356 }
3357
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003358 ret = tsfeed->start_filtering(tsfeed);
3359 if (ret < 0) {
3360 dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
3361 return ret;
3362 }
3363
3364 return 0;
3365}
3366
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303367static int dvb_filter_external_buffer_only(struct dmxdev *dmxdev,
3368 struct dmxdev_filter *filter)
3369{
3370 struct dmx_caps caps;
3371 int is_external_only;
3372 int flags;
3373
3374 /*
3375 * For backward compatibility, default assumes that
3376 * external only buffers are not supported.
3377 */
3378 flags = 0;
3379 if (dmxdev->demux->get_caps) {
3380 dmxdev->demux->get_caps(dmxdev->demux, &caps);
3381
3382 if (filter->type == DMXDEV_TYPE_SEC)
3383 flags = caps.section.flags;
3384 else if (filter->params.pes.output == DMX_OUT_DECODER)
3385 /* For decoder filters dmxdev buffer is not required */
3386 flags = 0;
3387 else if (filter->params.pes.output == DMX_OUT_TAP)
3388 flags = caps.pes.flags;
3389 else if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
3390 flags = caps.recording_188_tsp.flags;
3391 else
3392 flags = caps.recording_192_tsp.flags;
3393 }
3394
3395 if (!(flags & DMX_BUFFER_INTERNAL_SUPPORT) &&
3396 (flags & DMX_BUFFER_EXTERNAL_SUPPORT))
3397 is_external_only = 1;
3398 else
3399 is_external_only = 0;
3400
3401 return is_external_only;
3402}
3403
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
3405{
3406 struct dmxdev *dmxdev = filter->dev;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003407 struct dmxdev_feed *feed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 void *mem;
3409 int ret, i;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303410 size_t tsp_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411
3412 if (filter->state < DMXDEV_STATE_SET)
3413 return -EINVAL;
3414
3415 if (filter->state >= DMXDEV_STATE_GO)
3416 dvb_dmxdev_filter_stop(filter);
3417
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303418 if (!dvb_filter_verify_buffer_size(filter))
3419 return -EINVAL;
3420
Andreas Oberritter34731df2006-03-14 17:31:01 -03003421 if (!filter->buffer.data) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303422 /*
3423 * dmxdev buffer in decoder filters is not really used
3424 * to exchange data with applications. Decoder buffers
3425 * can be set using DMX_SET_DECODER_BUFFER, which
3426 * would not update the filter->buffer.data at all.
3427 * Therefore we should not treat this filter as
3428 * other regular filters and should not fail here
3429 * even if user sets the buffer in deocder
3430 * filter as external buffer.
3431 */
3432 if (filter->type == DMXDEV_TYPE_PES &&
3433 (filter->params.pes.output == DMX_OUT_DECODER ||
3434 filter->params.pes.output == DMX_OUT_TS_TAP))
3435 filter->buffer_mode = DMX_BUFFER_MODE_INTERNAL;
3436
3437 if (!(filter->type == DMXDEV_TYPE_PES &&
3438 filter->params.pes.output == DMX_OUT_TS_TAP) &&
3439 (filter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL ||
3440 dvb_filter_external_buffer_only(dmxdev, filter)))
3441 return -ENOMEM;
3442
3443 mem = vmalloc_user(filter->buffer.size);
Andreas Oberritter34731df2006-03-14 17:31:01 -03003444 if (!mem)
3445 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446 spin_lock_irq(&filter->dev->lock);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003447 filter->buffer.data = mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448 spin_unlock_irq(&filter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303449 } else if ((filter->buffer_mode == DMX_BUFFER_MODE_INTERNAL) &&
3450 dvb_filter_external_buffer_only(dmxdev, filter)) {
3451 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 }
3453
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303454 filter->eos_state = 0;
3455
3456 spin_lock_irq(&filter->dev->lock);
3457 dvb_dmxdev_flush_output(&filter->buffer, &filter->events);
3458 spin_unlock_irq(&filter->dev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459
3460 switch (filter->type) {
3461 case DMXDEV_TYPE_SEC:
3462 {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003463 struct dmx_sct_filter_params *para = &filter->params.sec;
3464 struct dmx_section_filter **secfilter = &filter->filter.sec;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303465 struct dmx_section_feed **secfeed = &filter->feed.sec.feed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003467 *secfilter = NULL;
3468 *secfeed = NULL;
3469
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 /* find active filter/feed with same PID */
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003471 for (i = 0; i < dmxdev->filternum; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472 if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
Andreas Oberritter09794a62006-03-10 15:21:28 -03003473 dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
3474 dmxdev->filter[i].params.sec.pid == para->pid) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303475 *secfeed = dmxdev->filter[i].feed.sec.feed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 break;
3477 }
3478 }
3479
3480 /* if no feed found, try to allocate new one */
3481 if (!*secfeed) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003482 ret = dmxdev->demux->allocate_section_feed(dmxdev->demux,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303483 secfeed,
3484 dvb_dmxdev_section_callback);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003485 if (ret < 0) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303486 pr_err("DVB (%s): could not alloc feed\n",
Harvey Harrison46b4f7c2008-04-08 23:20:00 -03003487 __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 return ret;
3489 }
3490
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303491 if ((*secfeed)->data_ready_cb) {
3492 ret = (*secfeed)->data_ready_cb(
3493 *secfeed,
3494 dvb_dmxdev_section_event_cb);
3495
3496 if (ret < 0) {
3497 pr_err(
3498 "DVB (%s): could not set event cb\n",
3499 __func__);
3500 dvb_dmxdev_feed_restart(filter);
3501 return ret;
3502 }
3503 }
3504
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003505 ret = (*secfeed)->set(*secfeed, para->pid, 32768,
3506 (para->flags & DMX_CHECK_CRC) ? 1 : 0);
3507 if (ret < 0) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303508 pr_err("DVB (%s): could not set feed\n",
3509 __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 dvb_dmxdev_feed_restart(filter);
3511 return ret;
3512 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303513
3514 if ((*secfeed)->set_secure_mode)
3515 (*secfeed)->set_secure_mode(*secfeed,
3516 &filter->sec_mode);
3517
3518 if ((*secfeed)->set_cipher_ops)
3519 (*secfeed)->set_cipher_ops(*secfeed,
3520 &filter->feed.sec.cipher_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521 } else {
3522 dvb_dmxdev_feed_stop(filter);
3523 }
3524
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003525 ret = (*secfeed)->allocate_filter(*secfeed, secfilter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526 if (ret < 0) {
3527 dvb_dmxdev_feed_restart(filter);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303528 filter->feed.sec.feed->start_filtering(*secfeed);
3529 pr_debug("could not get filter\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 return ret;
3531 }
3532
3533 (*secfilter)->priv = filter;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303534 (*secfilter)->buffer.ringbuff = &filter->buffer;
3535 (*secfilter)->buffer.priv_handle = filter->priv_buff_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536
3537 memcpy(&((*secfilter)->filter_value[3]),
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003538 &(para->filter.filter[1]), DMX_FILTER_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 memcpy(&(*secfilter)->filter_mask[3],
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003540 &para->filter.mask[1], DMX_FILTER_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541 memcpy(&(*secfilter)->filter_mode[3],
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003542 &para->filter.mode[1], DMX_FILTER_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003544 (*secfilter)->filter_value[0] = para->filter.filter[0];
3545 (*secfilter)->filter_mask[0] = para->filter.mask[0];
3546 (*secfilter)->filter_mode[0] = para->filter.mode[0];
3547 (*secfilter)->filter_mask[1] = 0;
3548 (*secfilter)->filter_mask[2] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549
3550 filter->todo = 0;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303551 filter->events.data_read_event_masked =
3552 filter->events.event_mask.disable_mask &
3553 DMX_EVENT_NEW_SECTION;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303555 ret = filter->feed.sec.feed->start_filtering(
3556 filter->feed.sec.feed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 if (ret < 0)
3558 return ret;
3559
3560 dvb_dmxdev_filter_timer(filter);
3561 break;
3562 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 case DMXDEV_TYPE_PES:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303564 if (filter->params.pes.rec_chunk_size <
3565 DMX_REC_BUFF_CHUNK_MIN_SIZE)
3566 filter->params.pes.rec_chunk_size =
3567 DMX_REC_BUFF_CHUNK_MIN_SIZE;
3568
3569 if (filter->params.pes.rec_chunk_size >=
3570 filter->buffer.size)
3571 filter->params.pes.rec_chunk_size =
3572 filter->buffer.size >> 2;
3573
3574 /* Align rec-chunk based on output format */
3575 if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
3576 tsp_size = 188;
3577 else
3578 tsp_size = 192;
3579
3580 filter->params.pes.rec_chunk_size /= tsp_size;
3581 filter->params.pes.rec_chunk_size *= tsp_size;
3582
3583 if (filter->params.pes.output == DMX_OUT_TS_TAP)
3584 dmxdev->dvr_output_events.data_read_event_masked =
3585 dmxdev->dvr_output_events.event_mask.disable_mask &
3586 DMX_EVENT_NEW_REC_CHUNK;
3587 else if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
3588 filter->events.data_read_event_masked =
3589 filter->events.event_mask.disable_mask &
3590 DMX_EVENT_NEW_REC_CHUNK;
3591 else if (filter->params.pes.output == DMX_OUT_TAP)
3592 filter->events.data_read_event_masked =
3593 filter->events.event_mask.disable_mask &
3594 DMX_EVENT_NEW_PES;
3595 else
3596 filter->events.data_read_event_masked = 1;
3597
3598 ret = 0;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003599 list_for_each_entry(feed, &filter->feed.ts, next) {
3600 ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303601 if (ret)
3602 break;
3603 }
3604
3605 if (!ret)
3606 break;
3607
3608 /* cleanup feeds that were started before the failure */
3609 list_for_each_entry(feed, &filter->feed.ts, next) {
3610 if (!feed->ts)
3611 continue;
3612 feed->ts->stop_filtering(feed->ts);
3613 dmxdev->demux->release_ts_feed(dmxdev->demux, feed->ts);
3614 feed->ts = NULL;
3615
3616 if (filter->params.pes.output == DMX_OUT_TS_TAP) {
3617 filter->dev->dvr_feeds_count--;
3618 if (!filter->dev->dvr_feeds_count)
3619 filter->dev->dvr_feed = NULL;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303622 return ret;
3623
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 default:
3625 return -EINVAL;
3626 }
3627
3628 dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303629
3630 if ((filter->type == DMXDEV_TYPE_PES) &&
3631 !list_empty(&filter->insertion_buffers)) {
3632 struct ts_insertion_buffer *ts_buffer;
3633
3634 feed = list_first_entry(&filter->feed.ts,
3635 struct dmxdev_feed, next);
3636
3637 ret = 0;
3638 if (feed->ts->ts_insertion_init)
3639 ret = feed->ts->ts_insertion_init(feed->ts);
3640 if (!ret) {
3641 list_for_each_entry(ts_buffer,
3642 &filter->insertion_buffers, next)
3643 dvb_dmxdev_queue_ts_insertion(
3644 ts_buffer);
3645 } else {
3646 pr_err("%s: ts_insertion_init failed, err %d\n",
3647 __func__, ret);
3648 }
3649 }
3650
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 return 0;
3652}
3653
3654static int dvb_demux_open(struct inode *inode, struct file *file)
3655{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07003656 struct dvb_device *dvbdev = file->private_data;
3657 struct dmxdev *dmxdev = dvbdev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658 int i;
3659 struct dmxdev_filter *dmxdevfilter;
3660
3661 if (!dmxdev->filter)
3662 return -EINVAL;
3663
Ingo Molnar3593cab2006-02-07 06:49:14 -02003664 if (mutex_lock_interruptible(&dmxdev->mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665 return -ERESTARTSYS;
3666
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003667 for (i = 0; i < dmxdev->filternum; i++)
3668 if (dmxdev->filter[i].state == DMXDEV_STATE_FREE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669 break;
3670
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003671 if (i == dmxdev->filternum) {
Ingo Molnar3593cab2006-02-07 06:49:14 -02003672 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673 return -EMFILE;
3674 }
3675
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003676 dmxdevfilter = &dmxdev->filter[i];
Ingo Molnar3593cab2006-02-07 06:49:14 -02003677 mutex_init(&dmxdevfilter->mutex);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003678 file->private_data = dmxdevfilter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303680 memset(&dmxdevfilter->decoder_buffers,
3681 0,
3682 sizeof(dmxdevfilter->decoder_buffers));
3683 dmxdevfilter->decoder_buffers.buffers_size =
3684 DMX_DEFAULT_DECODER_BUFFER_SIZE;
3685 dmxdevfilter->buffer_mode = DMX_BUFFER_MODE_INTERNAL;
3686 dmxdevfilter->priv_buff_handle = NULL;
Andreas Oberritter34731df2006-03-14 17:31:01 -03003687 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303688 dvb_dmxdev_flush_events(&dmxdevfilter->events);
3689 dmxdevfilter->events.event_mask.disable_mask = DMX_EVENT_NEW_ES_DATA;
3690 dmxdevfilter->events.event_mask.no_wakeup_mask = 0;
3691 dmxdevfilter->events.event_mask.wakeup_threshold = 1;
3692
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003693 dmxdevfilter->type = DMXDEV_TYPE_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695 init_timer(&dmxdevfilter->timer);
3696
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303697 dmxdevfilter->sec_mode.is_secured = 0;
3698
3699 INIT_LIST_HEAD(&dmxdevfilter->insertion_buffers);
3700
3701 dmxdevfilter->dmx_tsp_format = DMX_TSP_FORMAT_188;
Markus Rechberger57861b42007-04-14 10:19:18 -03003702 dvbdev->users++;
3703
Ingo Molnar3593cab2006-02-07 06:49:14 -02003704 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 return 0;
3706}
3707
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003708static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev,
3709 struct dmxdev_filter *dmxdevfilter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710{
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303711 struct ts_insertion_buffer *ts_buffer, *tmp;
3712
Simon Arlottc2788502007-03-10 06:21:25 -03003713 mutex_lock(&dmxdev->mutex);
3714 mutex_lock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715
3716 dvb_dmxdev_filter_stop(dmxdevfilter);
3717 dvb_dmxdev_filter_reset(dmxdevfilter);
3718
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303719 list_for_each_entry_safe(ts_buffer, tmp,
3720 &dmxdevfilter->insertion_buffers, next) {
3721 list_del(&ts_buffer->next);
3722 vfree(ts_buffer->buffer);
3723 vfree(ts_buffer);
3724 }
3725
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726 if (dmxdevfilter->buffer.data) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003727 void *mem = dmxdevfilter->buffer.data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728
3729 spin_lock_irq(&dmxdev->lock);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003730 dmxdevfilter->buffer.data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731 spin_unlock_irq(&dmxdev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303732 if (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_INTERNAL)
3733 vfree(mem);
3734 }
3735
3736 if ((dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL) &&
3737 dmxdevfilter->priv_buff_handle) {
3738 dmxdev->demux->unmap_buffer(dmxdev->demux,
3739 dmxdevfilter->priv_buff_handle);
3740 dmxdevfilter->priv_buff_handle = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 }
3742
3743 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303744 wake_up_all(&dmxdevfilter->buffer.queue);
Ingo Molnar3593cab2006-02-07 06:49:14 -02003745 mutex_unlock(&dmxdevfilter->mutex);
3746 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 return 0;
3748}
3749
3750static inline void invert_mode(dmx_filter_t *filter)
3751{
3752 int i;
3753
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003754 for (i = 0; i < DMX_FILTER_SIZE; i++)
3755 filter->mode[i] ^= 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756}
3757
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003758static int dvb_dmxdev_add_pid(struct dmxdev *dmxdev,
3759 struct dmxdev_filter *filter, u16 pid)
3760{
3761 struct dmxdev_feed *feed;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303762 int ret = 0;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003763
3764 if ((filter->type != DMXDEV_TYPE_PES) ||
3765 (filter->state < DMXDEV_STATE_SET))
3766 return -EINVAL;
3767
3768 /* only TS packet filters may have multiple PIDs */
3769 if ((filter->params.pes.output != DMX_OUT_TSDEMUX_TAP) &&
3770 (!list_empty(&filter->feed.ts)))
3771 return -EINVAL;
3772
3773 feed = kzalloc(sizeof(struct dmxdev_feed), GFP_KERNEL);
3774 if (feed == NULL)
3775 return -ENOMEM;
3776
3777 feed->pid = pid;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303778 feed->cipher_ops.operations_count = 0;
3779 feed->idx_params.enable = 0;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003780
3781 if (filter->state >= DMXDEV_STATE_GO)
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303782 ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003783
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303784 if (!ret)
3785 list_add(&feed->next, &filter->feed.ts);
3786 else
3787 kfree(feed);
3788
3789 return ret;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003790}
3791
3792static int dvb_dmxdev_remove_pid(struct dmxdev *dmxdev,
3793 struct dmxdev_filter *filter, u16 pid)
3794{
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303795 int feed_count;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003796 struct dmxdev_feed *feed, *tmp;
3797
3798 if ((filter->type != DMXDEV_TYPE_PES) ||
3799 (filter->state < DMXDEV_STATE_SET))
3800 return -EINVAL;
3801
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303802 feed_count = 0;
3803 list_for_each_entry(tmp, &filter->feed.ts, next)
3804 feed_count++;
3805
3806 if (feed_count <= 1)
3807 return -EINVAL;
3808
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003809 list_for_each_entry_safe(feed, tmp, &filter->feed.ts, next) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303810 if (feed->pid == pid) {
3811 if (feed->ts != NULL) {
3812 feed->ts->stop_filtering(feed->ts);
3813 filter->dev->demux->release_ts_feed(
3814 filter->dev->demux,
3815 feed->ts);
3816 }
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003817 list_del(&feed->next);
3818 kfree(feed);
3819 }
3820 }
3821
3822 return 0;
3823}
3824
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003826 struct dmxdev_filter *dmxdevfilter,
3827 struct dmx_sct_filter_params *params)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828{
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303829 pr_debug("function : %s, PID=0x%04x, flags=%02x, timeout=%d\n",
Mauro Carvalho Chehab17e67d42013-03-01 15:20:25 -03003830 __func__, params->pid, params->flags, params->timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831
3832 dvb_dmxdev_filter_stop(dmxdevfilter);
3833
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003834 dmxdevfilter->type = DMXDEV_TYPE_SEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 memcpy(&dmxdevfilter->params.sec,
3836 params, sizeof(struct dmx_sct_filter_params));
3837 invert_mode(&dmxdevfilter->params.sec.filter);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303838 dmxdevfilter->feed.sec.cipher_ops.operations_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
3840
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003841 if (params->flags & DMX_IMMEDIATE_START)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842 return dvb_dmxdev_filter_start(dmxdevfilter);
3843
3844 return 0;
3845}
3846
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303847static int dvb_dmxdev_set_secure_mode(
3848 struct dmxdev *dmxdev,
3849 struct dmxdev_filter *filter,
3850 struct dmx_secure_mode *sec_mode)
3851{
3852 if (!dmxdev || !filter || !sec_mode)
3853 return -EINVAL;
3854
3855 if (filter->state == DMXDEV_STATE_GO) {
3856 pr_err("%s: invalid filter state\n", __func__);
3857 return -EBUSY;
3858 }
3859
3860 pr_debug("%s: secure=%d\n", __func__, sec_mode->is_secured);
3861
3862 filter->sec_mode = *sec_mode;
3863
3864 return 0;
3865}
3866
3867static int dvb_dmxdev_set_cipher(struct dmxdev *dmxdev,
3868 struct dmxdev_filter *filter,
3869 struct dmx_cipher_operations *cipher_ops)
3870{
3871 struct dmxdev_feed *feed;
3872 struct dmxdev_feed *ts_feed = NULL;
3873 struct dmxdev_sec_feed *sec_feed = NULL;
3874 struct dmx_caps caps;
3875
3876 if (!dmxdev || !dmxdev->demux->get_caps)
3877 return -EINVAL;
3878
3879 dmxdev->demux->get_caps(dmxdev->demux, &caps);
3880
3881 if (!filter || !cipher_ops ||
3882 (cipher_ops->operations_count > caps.num_cipher_ops) ||
3883 (cipher_ops->operations_count >
3884 DMX_MAX_CIPHER_OPERATIONS_COUNT))
3885 return -EINVAL;
3886
3887 pr_debug("%s: pid=%d, operations=%d\n", __func__,
3888 cipher_ops->pid, cipher_ops->operations_count);
3889
3890 if (filter->state < DMXDEV_STATE_SET ||
3891 filter->state > DMXDEV_STATE_GO) {
3892 pr_err("%s: invalid filter state\n", __func__);
3893 return -EPERM;
3894 }
3895
3896 if (!filter->sec_mode.is_secured && cipher_ops->operations_count) {
3897 pr_err("%s: secure mode must be enabled to set cipher ops\n",
3898 __func__);
3899 return -EPERM;
3900 }
3901
3902 switch (filter->type) {
3903 case DMXDEV_TYPE_PES:
3904 list_for_each_entry(feed, &filter->feed.ts, next) {
3905 if (feed->pid == cipher_ops->pid) {
3906 ts_feed = feed;
3907 ts_feed->cipher_ops = *cipher_ops;
3908 if (filter->state == DMXDEV_STATE_GO &&
3909 ts_feed->ts->set_cipher_ops)
3910 ts_feed->ts->set_cipher_ops(
3911 ts_feed->ts, cipher_ops);
3912 break;
3913 }
3914 }
3915 break;
3916 case DMXDEV_TYPE_SEC:
3917 if (filter->params.sec.pid == cipher_ops->pid) {
3918 sec_feed = &filter->feed.sec;
3919 sec_feed->cipher_ops = *cipher_ops;
3920 if (filter->state == DMXDEV_STATE_GO &&
3921 sec_feed->feed->set_cipher_ops)
3922 sec_feed->feed->set_cipher_ops(sec_feed->feed,
3923 cipher_ops);
3924 }
3925 break;
3926
3927 default:
3928 return -EINVAL;
3929 }
3930
3931 if (!ts_feed && !sec_feed) {
3932 pr_err("%s: pid %d is undefined for this filter\n",
3933 __func__, cipher_ops->pid);
3934 return -EINVAL;
3935 }
3936
3937 return 0;
3938}
3939
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003941 struct dmxdev_filter *dmxdevfilter,
3942 struct dmx_pes_filter_params *params)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943{
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003944 int ret;
3945
Linus Torvalds1da177e2005-04-16 15:20:36 -07003946 dvb_dmxdev_filter_stop(dmxdevfilter);
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003947 dvb_dmxdev_filter_reset(dmxdevfilter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948
Mauro Carvalho Chehab31becf02012-10-27 15:30:47 -03003949 if ((unsigned)params->pes_type > DMX_PES_OTHER)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950 return -EINVAL;
3951
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003952 dmxdevfilter->type = DMXDEV_TYPE_PES;
3953 memcpy(&dmxdevfilter->params, params,
3954 sizeof(struct dmx_pes_filter_params));
Francesco Lavra691c9ae2010-02-07 09:49:58 -03003955 INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956
3957 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
3958
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003959 ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter,
3960 dmxdevfilter->params.pes.pid);
3961 if (ret < 0)
3962 return ret;
3963
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003964 if (params->flags & DMX_IMMEDIATE_START)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003965 return dvb_dmxdev_filter_start(dmxdevfilter);
3966
3967 return 0;
3968}
3969
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303970static int dvb_dmxdev_set_decoder_buffer(struct dmxdev *dmxdev,
3971 struct dmxdev_filter *filter,
3972 struct dmx_decoder_buffers *buffs)
3973{
3974 int i;
3975 struct dmx_decoder_buffers *dec_buffs;
3976 struct dmx_caps caps;
3977
3978 if (!dmxdev || !filter || !buffs)
3979 return -EINVAL;
3980
3981 dec_buffs = &filter->decoder_buffers;
3982 if (!dmxdev->demux->get_caps)
3983 return -EINVAL;
3984
3985 dmxdev->demux->get_caps(dmxdev->demux, &caps);
3986 if (!dvb_dmxdev_verify_buffer_size(buffs->buffers_size,
3987 caps.decoder.max_size, caps.decoder.size_alignment))
3988 return -EINVAL;
3989
3990 if ((buffs->buffers_size == 0) ||
3991 (buffs->is_linear &&
3992 ((buffs->buffers_num <= 1) ||
3993 (buffs->buffers_num > DMX_MAX_DECODER_BUFFER_NUM))))
3994 return -EINVAL;
3995
3996 if (buffs->buffers_num == 0) {
3997 /* Internal mode - linear buffers not supported in this mode */
3998 if (!(caps.decoder.flags & DMX_BUFFER_INTERNAL_SUPPORT) ||
3999 buffs->is_linear)
4000 return -EINVAL;
4001 } else {
4002 /* External buffer(s) mode */
4003 if ((!(caps.decoder.flags & DMX_BUFFER_LINEAR_GROUP_SUPPORT) &&
4004 buffs->buffers_num > 1) ||
4005 !(caps.decoder.flags & DMX_BUFFER_EXTERNAL_SUPPORT) ||
4006 buffs->buffers_num > caps.decoder.max_buffer_num)
4007 return -EINVAL;
4008
4009 dec_buffs->is_linear = buffs->is_linear;
4010 dec_buffs->buffers_num = buffs->buffers_num;
4011 dec_buffs->buffers_size = buffs->buffers_size;
4012 for (i = 0; i < dec_buffs->buffers_num; i++)
4013 dec_buffs->handles[i] = buffs->handles[i];
4014 }
4015
4016 return 0;
4017}
4018
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004020 struct file *file, char __user *buf,
4021 size_t count, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022{
4023 int result, hcount;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004024 int done = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004026 if (dfil->todo <= 0) {
4027 hcount = 3 + dfil->todo;
4028 if (hcount > count)
4029 hcount = count;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304030 result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004031 file->f_flags & O_NONBLOCK,
4032 buf, hcount, ppos);
4033 if (result < 0) {
4034 dfil->todo = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035 return result;
4036 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004037 if (copy_from_user(dfil->secheader - dfil->todo, buf, result))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 return -EFAULT;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004039 buf += result;
4040 done = result;
4041 count -= result;
4042 dfil->todo -= result;
4043 if (dfil->todo > -3)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044 return done;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004045 dfil->todo = ((dfil->secheader[1] << 8) | dfil->secheader[2]) & 0xfff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046 if (!count)
4047 return done;
4048 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004049 if (count > dfil->todo)
4050 count = dfil->todo;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304051 result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004052 file->f_flags & O_NONBLOCK,
4053 buf, count, ppos);
4054 if (result < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055 return result;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004056 dfil->todo -= result;
4057 return (result + done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058}
4059
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060static ssize_t
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004061dvb_demux_read(struct file *file, char __user *buf, size_t count,
4062 loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063{
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004064 struct dmxdev_filter *dmxdevfilter = file->private_data;
4065 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066
Ingo Molnar3593cab2006-02-07 06:49:14 -02004067 if (mutex_lock_interruptible(&dmxdevfilter->mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068 return -ERESTARTSYS;
4069
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304070 if (dmxdevfilter->eos_state &&
4071 dvb_ringbuffer_empty(&dmxdevfilter->buffer)) {
4072 mutex_unlock(&dmxdevfilter->mutex);
4073 return 0;
4074 }
4075
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004076 if (dmxdevfilter->type == DMXDEV_TYPE_SEC)
4077 ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078 else
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304079 ret = dvb_dmxdev_buffer_read(dmxdevfilter,
4080 &dmxdevfilter->buffer,
4081 file->f_flags & O_NONBLOCK,
4082 buf, count, ppos);
4083
4084 if (ret > 0) {
4085 dvb_dmxdev_notify_data_read(dmxdevfilter, ret);
4086 spin_lock_irq(&dmxdevfilter->dev->lock);
4087 dvb_dmxdev_update_events(&dmxdevfilter->events, ret);
4088 spin_unlock_irq(&dmxdevfilter->dev->lock);
4089
4090 /*
4091 * in PULL mode, we might be stalling on
4092 * event queue, so need to wake-up waiters
4093 */
4094 if (dmxdevfilter->dev->playback_mode == DMX_PB_MODE_PULL)
4095 wake_up_all(&dmxdevfilter->buffer.queue);
4096 } else if (ret == -EOVERFLOW) {
4097 dvb_dmxdev_auto_flush_buffer(dmxdevfilter,
4098 &dmxdevfilter->buffer);
4099 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100
Ingo Molnar3593cab2006-02-07 06:49:14 -02004101 mutex_unlock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102 return ret;
4103}
4104
Arnd Bergmann16ef8de2010-04-27 00:24:00 +02004105static int dvb_demux_do_ioctl(struct file *file,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106 unsigned int cmd, void *parg)
4107{
Peter Beutner3ec4a302005-07-07 17:57:39 -07004108 struct dmxdev_filter *dmxdevfilter = file->private_data;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004109 struct dmxdev *dmxdev = dmxdevfilter->dev;
4110 unsigned long arg = (unsigned long)parg;
4111 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112
Ingo Molnar3593cab2006-02-07 06:49:14 -02004113 if (mutex_lock_interruptible(&dmxdev->mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114 return -ERESTARTSYS;
4115
4116 switch (cmd) {
4117 case DMX_START:
Ingo Molnar3593cab2006-02-07 06:49:14 -02004118 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4119 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120 return -ERESTARTSYS;
4121 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004122 if (dmxdevfilter->state < DMXDEV_STATE_SET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123 ret = -EINVAL;
4124 else
4125 ret = dvb_dmxdev_filter_start(dmxdevfilter);
Ingo Molnar3593cab2006-02-07 06:49:14 -02004126 mutex_unlock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127 break;
4128
4129 case DMX_STOP:
Ingo Molnar3593cab2006-02-07 06:49:14 -02004130 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4131 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132 return -ERESTARTSYS;
4133 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004134 ret = dvb_dmxdev_filter_stop(dmxdevfilter);
Ingo Molnar3593cab2006-02-07 06:49:14 -02004135 mutex_unlock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004136 break;
4137
4138 case DMX_SET_FILTER:
Ingo Molnar3593cab2006-02-07 06:49:14 -02004139 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4140 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004141 return -ERESTARTSYS;
4142 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004143 ret = dvb_dmxdev_filter_set(dmxdev, dmxdevfilter, parg);
Ingo Molnar3593cab2006-02-07 06:49:14 -02004144 mutex_unlock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145 break;
4146
4147 case DMX_SET_PES_FILTER:
Ingo Molnar3593cab2006-02-07 06:49:14 -02004148 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4149 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150 return -ERESTARTSYS;
4151 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004152 ret = dvb_dmxdev_pes_filter_set(dmxdev, dmxdevfilter, parg);
Ingo Molnar3593cab2006-02-07 06:49:14 -02004153 mutex_unlock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154 break;
4155
4156 case DMX_SET_BUFFER_SIZE:
Ingo Molnar3593cab2006-02-07 06:49:14 -02004157 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4158 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159 return -ERESTARTSYS;
4160 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004161 ret = dvb_dmxdev_set_buffer_size(dmxdevfilter, arg);
Ingo Molnar3593cab2006-02-07 06:49:14 -02004162 mutex_unlock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 break;
4164
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304165 case DMX_SET_BUFFER_MODE:
4166 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4167 mutex_unlock(&dmxdev->mutex);
4168 return -ERESTARTSYS;
4169 }
4170 ret = dvb_dmxdev_set_buffer_mode(dmxdevfilter,
4171 *(enum dmx_buffer_mode *)parg);
4172 mutex_unlock(&dmxdevfilter->mutex);
4173 break;
4174
4175 case DMX_SET_BUFFER:
4176 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4177 mutex_unlock(&dmxdev->mutex);
4178 return -ERESTARTSYS;
4179 }
4180 ret = dvb_dmxdev_set_buffer(dmxdevfilter, parg);
4181 mutex_unlock(&dmxdevfilter->mutex);
4182 break;
4183
4184 case DMX_GET_BUFFER_STATUS:
4185 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4186 mutex_unlock(&dmxdev->mutex);
4187 return -ERESTARTSYS;
4188 }
4189 ret = dvb_dmxdev_get_buffer_status(dmxdevfilter, parg);
4190 mutex_unlock(&dmxdevfilter->mutex);
4191 break;
4192
4193 case DMX_RELEASE_DATA:
4194 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4195 mutex_unlock(&dmxdev->mutex);
4196 return -ERESTARTSYS;
4197 }
4198 ret = dvb_dmxdev_release_data(dmxdevfilter, arg);
4199 mutex_unlock(&dmxdevfilter->mutex);
4200 break;
4201
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 case DMX_GET_PES_PIDS:
4203 if (!dmxdev->demux->get_pes_pids) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004204 ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205 break;
4206 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004207 dmxdev->demux->get_pes_pids(dmxdev->demux, parg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208 break;
4209
Andreas Oberritterc0510052005-09-09 13:02:21 -07004210 case DMX_GET_CAPS:
4211 if (!dmxdev->demux->get_caps) {
4212 ret = -EINVAL;
4213 break;
4214 }
4215 ret = dmxdev->demux->get_caps(dmxdev->demux, parg);
4216 break;
4217
4218 case DMX_SET_SOURCE:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304219 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4220 mutex_unlock(&dmxdev->mutex);
4221 return -ERESTARTSYS;
4222 }
4223 ret = dvb_dmxdev_set_source(dmxdevfilter, parg);
4224 mutex_unlock(&dmxdevfilter->mutex);
4225 break;
4226
4227 case DMX_SET_TS_PACKET_FORMAT:
4228 if (!dmxdev->demux->set_tsp_format) {
Andreas Oberritterc0510052005-09-09 13:02:21 -07004229 ret = -EINVAL;
4230 break;
4231 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304232
4233 if (dmxdevfilter->state >= DMXDEV_STATE_GO) {
4234 ret = -EBUSY;
4235 break;
4236 }
4237 ret = dmxdev->demux->set_tsp_format(
4238 dmxdev->demux,
4239 *(enum dmx_tsp_format_t *)parg);
Andreas Oberritterc0510052005-09-09 13:02:21 -07004240 break;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304241
4242 case DMX_SET_TS_OUT_FORMAT:
4243 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4244 mutex_unlock(&dmxdev->mutex);
4245 return -ERESTARTSYS;
4246 }
4247
4248 ret = dvb_dmxdev_set_tsp_out_format(dmxdevfilter,
4249 *(enum dmx_tsp_format_t *)parg);
4250
4251 mutex_unlock(&dmxdevfilter->mutex);
4252 break;
4253
4254 case DMX_SET_DECODER_BUFFER_SIZE:
4255 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4256 mutex_unlock(&dmxdev->mutex);
4257 return -ERESTARTSYS;
4258 }
4259
4260 ret = dvb_dmxdev_set_decoder_buffer_size(dmxdevfilter, arg);
4261 mutex_unlock(&dmxdevfilter->mutex);
4262 break;
4263
4264 case DMX_SET_PLAYBACK_MODE:
4265 ret = dvb_dmxdev_set_playback_mode(
4266 dmxdevfilter,
4267 *(enum dmx_playback_mode_t *)parg);
4268 break;
4269
4270 case DMX_GET_EVENT:
4271 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4272 mutex_unlock(&dmxdev->mutex);
4273 return -ERESTARTSYS;
4274 }
4275 ret = dvb_dmxdev_get_event(dmxdevfilter, parg);
4276 mutex_unlock(&dmxdevfilter->mutex);
4277 break;
Andreas Oberritterc0510052005-09-09 13:02:21 -07004278
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279 case DMX_GET_STC:
4280 if (!dmxdev->demux->get_stc) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004281 ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282 break;
4283 }
4284 ret = dmxdev->demux->get_stc(dmxdev->demux,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004285 ((struct dmx_stc *)parg)->num,
4286 &((struct dmx_stc *)parg)->stc,
4287 &((struct dmx_stc *)parg)->base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288 break;
4289
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03004290 case DMX_ADD_PID:
4291 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4292 ret = -ERESTARTSYS;
4293 break;
4294 }
4295 ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter, *(u16 *)parg);
4296 mutex_unlock(&dmxdevfilter->mutex);
4297 break;
4298
4299 case DMX_REMOVE_PID:
4300 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4301 ret = -ERESTARTSYS;
4302 break;
4303 }
4304 ret = dvb_dmxdev_remove_pid(dmxdev, dmxdevfilter, *(u16 *)parg);
4305 mutex_unlock(&dmxdevfilter->mutex);
4306 break;
4307
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304308 case DMX_SET_DECODER_BUFFER:
4309 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4310 ret = -ERESTARTSYS;
4311 break;
4312 }
4313 ret = dvb_dmxdev_set_decoder_buffer(dmxdev, dmxdevfilter, parg);
4314 mutex_unlock(&dmxdevfilter->mutex);
4315 break;
4316
4317 case DMX_SET_SECURE_MODE:
4318 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4319 ret = -ERESTARTSYS;
4320 break;
4321 }
4322 ret = dvb_dmxdev_set_secure_mode(dmxdev, dmxdevfilter, parg);
4323 mutex_unlock(&dmxdevfilter->mutex);
4324 break;
4325
4326 case DMX_SET_CIPHER:
4327 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4328 ret = -ERESTARTSYS;
4329 break;
4330 }
4331 ret = dvb_dmxdev_set_cipher(dmxdev, dmxdevfilter, parg);
4332 mutex_unlock(&dmxdevfilter->mutex);
4333 break;
4334
4335 case DMX_REUSE_DECODER_BUFFER:
4336 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4337 mutex_unlock(&dmxdev->mutex);
4338 return -ERESTARTSYS;
4339 }
4340 ret = dvb_dmxdev_reuse_decoder_buf(dmxdevfilter, arg);
4341 mutex_unlock(&dmxdevfilter->mutex);
4342 break;
4343
4344 case DMX_SET_EVENTS_MASK:
4345 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4346 mutex_unlock(&dmxdev->mutex);
4347 return -ERESTARTSYS;
4348 }
4349 ret = dvb_dmxdev_set_event_mask(dmxdevfilter, parg);
4350 mutex_unlock(&dmxdevfilter->mutex);
4351 break;
4352
4353 case DMX_GET_EVENTS_MASK:
4354 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4355 mutex_unlock(&dmxdev->mutex);
4356 return -ERESTARTSYS;
4357 }
4358 ret = dvb_dmxdev_get_event_mask(dmxdevfilter, parg);
4359 mutex_unlock(&dmxdevfilter->mutex);
4360 break;
4361
4362 case DMX_SET_INDEXING_PARAMS:
4363 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4364 mutex_unlock(&dmxdev->mutex);
4365 return -ERESTARTSYS;
4366 }
4367 ret = dvb_dmxdev_set_indexing_params(dmxdevfilter, parg);
4368 mutex_unlock(&dmxdevfilter->mutex);
4369 break;
4370
4371 case DMX_SET_TS_INSERTION:
4372 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4373 mutex_unlock(&dmxdev->mutex);
4374 return -ERESTARTSYS;
4375 }
4376 ret = dvb_dmxdev_set_ts_insertion(dmxdevfilter, parg);
4377 mutex_unlock(&dmxdevfilter->mutex);
4378 break;
4379
4380 case DMX_ABORT_TS_INSERTION:
4381 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4382 mutex_unlock(&dmxdev->mutex);
4383 return -ERESTARTSYS;
4384 }
4385 ret = dvb_dmxdev_abort_ts_insertion(dmxdevfilter, parg);
4386 mutex_unlock(&dmxdevfilter->mutex);
4387 break;
4388
4389 case DMX_GET_SCRAMBLING_BITS:
4390 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4391 mutex_unlock(&dmxdev->mutex);
4392 return -ERESTARTSYS;
4393 }
4394 ret = dvb_dmxdev_get_scrambling_bits(dmxdevfilter, parg);
4395 mutex_unlock(&dmxdevfilter->mutex);
4396 break;
4397
4398 case DMX_FLUSH_BUFFER:
4399 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4400 mutex_unlock(&dmxdev->mutex);
4401 return -ERESTARTSYS;
4402 }
4403 ret = dvb_dmxdev_flush_buffer(dmxdevfilter);
4404 mutex_unlock(&dmxdevfilter->mutex);
4405 break;
4406
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407 default:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304408 pr_err("%s: unknown ioctl code (0x%x)\n",
4409 __func__, cmd);
4410 ret = -ENOIOCTLCMD;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004411 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412 }
Ingo Molnar3593cab2006-02-07 06:49:14 -02004413 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414 return ret;
4415}
4416
Arnd Bergmann16ef8de2010-04-27 00:24:00 +02004417static long dvb_demux_ioctl(struct file *file, unsigned int cmd,
4418 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419{
Arnd Bergmann72024f12010-09-11 19:56:45 +02004420 return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421}
4422
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304423#ifdef CONFIG_COMPAT
4424
4425struct dmx_set_ts_insertion32 {
4426 __u32 identifier;
4427 __u32 repetition_time;
4428 compat_uptr_t ts_packets;
4429 compat_size_t size;
4430};
4431
4432static long dmx_set_ts_insertion32_wrapper(struct file *file, unsigned int cmd,
4433 unsigned long arg)
4434{
4435 int ret;
4436 struct dmx_set_ts_insertion32 dmx_ts_insert32;
4437 struct dmx_set_ts_insertion dmx_ts_insert;
4438
4439 ret = copy_from_user(&dmx_ts_insert32, (void __user *)arg,
4440 sizeof(dmx_ts_insert32));
4441 if (ret) {
4442 pr_err(
4443 "%s: copy dmx_set_ts_insertion32 from user failed, ret=%d\n",
4444 __func__, ret);
4445 return -EFAULT;
4446 }
4447
4448 memset(&dmx_ts_insert, 0, sizeof(dmx_ts_insert));
4449 dmx_ts_insert.identifier = dmx_ts_insert32.identifier;
4450 dmx_ts_insert.repetition_time = dmx_ts_insert32.repetition_time;
4451 dmx_ts_insert.ts_packets = compat_ptr(dmx_ts_insert32.ts_packets);
4452 dmx_ts_insert.size = dmx_ts_insert32.size;
4453
4454 ret = dvb_demux_do_ioctl(file, DMX_SET_TS_INSERTION, &dmx_ts_insert);
4455
4456 return ret;
4457}
4458
4459#define DMX_SET_TS_INSERTION32 _IOW('o', 70, struct dmx_set_ts_insertion32)
4460
4461/*
4462 * compat ioctl is called whenever compatibility is required, i.e when a 32bit
4463 * process calls an ioctl for a 64bit kernel.
4464 */
4465static long dvb_demux_compat_ioctl(struct file *file, unsigned int cmd,
4466 unsigned long arg)
4467{
4468 long ret = 0;
4469
4470 switch (cmd) {
4471 case DMX_SET_TS_INSERTION32:
4472 ret = dmx_set_ts_insertion32_wrapper(file, cmd, arg);
4473 break;
4474 case DMX_SET_TS_INSERTION:
4475 pr_err("%s: 64bit ioctl code (0x%lx) used by 32bit userspace\n",
4476 __func__, DMX_SET_TS_INSERTION);
4477 ret = -ENOIOCTLCMD;
4478 break;
4479 default:
4480 /* use regular ioctl */
4481 ret = dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
4482 }
4483
4484 return ret;
4485}
4486#endif
4487
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004488static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489{
Peter Beutner3ec4a302005-07-07 17:57:39 -07004490 struct dmxdev_filter *dmxdevfilter = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491 unsigned int mask = 0;
4492
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304493 if (!dmxdevfilter)
4494 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495
4496 poll_wait(file, &dmxdevfilter->buffer.queue, wait);
4497
4498 if (dmxdevfilter->state != DMXDEV_STATE_GO &&
4499 dmxdevfilter->state != DMXDEV_STATE_DONE &&
4500 dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT)
4501 return 0;
4502
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304503 if (dmxdevfilter->buffer.error) {
4504 mask |= (POLLIN | POLLRDNORM | POLLERR);
4505 if (dmxdevfilter->buffer.error == -EOVERFLOW)
4506 mask |= POLLPRI;
4507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508
Andreas Oberritter34731df2006-03-14 17:31:01 -03004509 if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304510 mask |= (POLLIN | POLLRDNORM);
4511
4512 if (dmxdevfilter->events.wakeup_events_counter >=
4513 dmxdevfilter->events.event_mask.wakeup_threshold)
4514 mask |= POLLPRI;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515
4516 return mask;
4517}
4518
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304519static int dvb_demux_mmap(struct file *filp, struct vm_area_struct *vma)
4520{
4521 struct dmxdev_filter *dmxdevfilter = filp->private_data;
4522 struct dmxdev *dmxdev = dmxdevfilter->dev;
4523 int ret;
4524 int vma_size;
4525 int buffer_size;
4526
4527 vma_size = vma->vm_end - vma->vm_start;
4528
4529 if (vma->vm_flags & VM_WRITE)
4530 return -EINVAL;
4531
4532 if (mutex_lock_interruptible(&dmxdev->mutex))
4533 return -ERESTARTSYS;
4534
4535 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4536 mutex_unlock(&dmxdev->mutex);
4537 return -ERESTARTSYS;
4538 }
4539
4540 if ((!dmxdevfilter->buffer.data) ||
4541 (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL)) {
4542 mutex_unlock(&dmxdevfilter->mutex);
4543 mutex_unlock(&dmxdev->mutex);
4544 return -EINVAL;
4545 }
4546
4547 /* Make sure requested mapping is not larger than buffer size */
4548 buffer_size = dmxdevfilter->buffer.size + (PAGE_SIZE-1);
4549 buffer_size = buffer_size & ~(PAGE_SIZE-1);
4550
4551 if (vma_size != buffer_size) {
4552 mutex_unlock(&dmxdevfilter->mutex);
4553 mutex_unlock(&dmxdev->mutex);
4554 return -EINVAL;
4555 }
4556
4557 ret = remap_vmalloc_range(vma, dmxdevfilter->buffer.data, 0);
4558 if (ret) {
4559 mutex_unlock(&dmxdevfilter->mutex);
4560 mutex_unlock(&dmxdev->mutex);
4561 return ret;
4562 }
4563
4564 vma->vm_flags |= VM_DONTDUMP;
4565 vma->vm_flags |= VM_DONTEXPAND;
4566
4567 mutex_unlock(&dmxdevfilter->mutex);
4568 mutex_unlock(&dmxdev->mutex);
4569
4570 return 0;
4571}
4572
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573static int dvb_demux_release(struct inode *inode, struct file *file)
4574{
Peter Beutner3ec4a302005-07-07 17:57:39 -07004575 struct dmxdev_filter *dmxdevfilter = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004576 struct dmxdev *dmxdev = dmxdevfilter->dev;
Markus Rechberger57861b42007-04-14 10:19:18 -03004577 int ret;
4578
4579 ret = dvb_dmxdev_filter_free(dmxdev, dmxdevfilter);
4580
4581 mutex_lock(&dmxdev->mutex);
4582 dmxdev->dvbdev->users--;
4583 if(dmxdev->dvbdev->users==1 && dmxdev->exit==1) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304584 fops_put(file->f_op);
4585 file->f_op = NULL;
Markus Rechberger57861b42007-04-14 10:19:18 -03004586 mutex_unlock(&dmxdev->mutex);
4587 wake_up(&dmxdev->dvbdev->wait_queue);
4588 } else
4589 mutex_unlock(&dmxdev->mutex);
4590
4591 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592}
4593
Jan Engelhardt784e29d2009-01-11 06:12:43 -03004594static const struct file_operations dvb_demux_fops = {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004595 .owner = THIS_MODULE,
4596 .read = dvb_demux_read,
Arnd Bergmann16ef8de2010-04-27 00:24:00 +02004597 .unlocked_ioctl = dvb_demux_ioctl,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004598 .open = dvb_demux_open,
4599 .release = dvb_demux_release,
4600 .poll = dvb_demux_poll,
Arnd Bergmann6038f372010-08-15 18:52:59 +02004601 .llseek = default_llseek,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304602 .mmap = dvb_demux_mmap,
4603#ifdef CONFIG_COMPAT
4604 .compat_ioctl = dvb_demux_compat_ioctl,
4605#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606};
4607
Mauro Carvalho Chehab8afd52e2015-01-02 22:28:53 -03004608static const struct dvb_device dvbdev_demux = {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004609 .priv = NULL,
4610 .users = 1,
4611 .writers = 1,
Mauro Carvalho Chehab8afd52e2015-01-02 22:28:53 -03004612#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
Mauro Carvalho Chehabe4fd3bc2015-02-18 12:09:27 -03004613 .name = "dvb-demux",
Mauro Carvalho Chehab8afd52e2015-01-02 22:28:53 -03004614#endif
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004615 .fops = &dvb_demux_fops
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616};
4617
Arnd Bergmann16ef8de2010-04-27 00:24:00 +02004618static int dvb_dvr_do_ioctl(struct file *file,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004619 unsigned int cmd, void *parg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07004621 struct dvb_device *dvbdev = file->private_data;
4622 struct dmxdev *dmxdev = dvbdev->priv;
Andrea Odettia095be42008-04-20 19:14:51 -03004623 unsigned long arg = (unsigned long)parg;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004624 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625
Ingo Molnar3593cab2006-02-07 06:49:14 -02004626 if (mutex_lock_interruptible(&dmxdev->mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 return -ERESTARTSYS;
4628
4629 switch (cmd) {
4630 case DMX_SET_BUFFER_SIZE:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304631 ret = dvb_dvr_set_buffer_size(dmxdev, file->f_flags, arg);
4632 break;
4633
4634 case DMX_SET_BUFFER_MODE:
4635 ret = dvb_dvr_set_buffer_mode(dmxdev, file->f_flags,
4636 *(enum dmx_buffer_mode *)parg);
4637 break;
4638
4639 case DMX_SET_BUFFER:
4640 ret = dvb_dvr_set_buffer(dmxdev, file->f_flags, parg);
4641 break;
4642
4643 case DMX_GET_BUFFER_STATUS:
4644 ret = dvb_dvr_get_buffer_status(dmxdev, file->f_flags, parg);
4645 break;
4646
4647 case DMX_RELEASE_DATA:
4648 ret = dvb_dvr_release_data(dmxdev, file->f_flags, arg);
4649 break;
4650
4651 case DMX_FEED_DATA:
4652 ret = dvb_dvr_feed_data(dmxdev, file->f_flags, arg);
4653 break;
4654
4655 case DMX_GET_EVENT:
4656 ret = dvb_dvr_get_event(dmxdev, file->f_flags, parg);
4657 break;
4658
4659 case DMX_PUSH_OOB_COMMAND:
4660 ret = dvb_dvr_push_oob_cmd(dmxdev, file->f_flags, parg);
4661 break;
4662
4663 case DMX_FLUSH_BUFFER:
4664 ret = dvb_dvr_flush_buffer(dmxdev, file->f_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665 break;
4666
4667 default:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304668 ret = -ENOIOCTLCMD;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004669 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670 }
Ingo Molnar3593cab2006-02-07 06:49:14 -02004671 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672 return ret;
4673}
4674
Arnd Bergmann16ef8de2010-04-27 00:24:00 +02004675static long dvb_dvr_ioctl(struct file *file,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304676 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677{
Arnd Bergmann72024f12010-09-11 19:56:45 +02004678 return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679}
4680
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304681#ifdef CONFIG_COMPAT
4682static long dvb_dvr_compat_ioctl(struct file *file, unsigned int cmd,
4683 unsigned long arg)
4684{
4685 return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
4686}
4687#endif
4688
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004689static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07004691 struct dvb_device *dvbdev = file->private_data;
4692 struct dmxdev *dmxdev = dvbdev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693 unsigned int mask = 0;
4694
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304695 pr_debug("function : %s\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004697 if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304698 poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
4699
4700 if (dmxdev->dvr_buffer.error) {
4701 mask |= (POLLIN | POLLRDNORM | POLLERR);
4702 if (dmxdev->dvr_buffer.error == -EOVERFLOW)
4703 mask |= POLLPRI;
4704 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705
Andreas Oberritter34731df2006-03-14 17:31:01 -03004706 if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304707 mask |= (POLLIN | POLLRDNORM);
4708
4709 if (dmxdev->dvr_output_events.wakeup_events_counter >=
4710 dmxdev->dvr_output_events.event_mask.wakeup_threshold)
4711 mask |= POLLPRI;
4712 } else {
4713 poll_wait(file, &dmxdev->dvr_input_buffer.queue, wait);
4714 if (dmxdev->dvr_input_buffer.error)
4715 mask |= (POLLOUT | POLLRDNORM | POLLPRI | POLLERR);
4716
4717 if (dvb_ringbuffer_free(&dmxdev->dvr_input_buffer))
4718 mask |= (POLLOUT | POLLRDNORM | POLLPRI);
4719 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720
4721 return mask;
4722}
4723
Alexey Dobriyan828c0952009-10-01 15:43:56 -07004724static const struct file_operations dvb_dvr_fops = {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004725 .owner = THIS_MODULE,
4726 .read = dvb_dvr_read,
4727 .write = dvb_dvr_write,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304728 .mmap = dvb_dvr_mmap,
Arnd Bergmann16ef8de2010-04-27 00:24:00 +02004729 .unlocked_ioctl = dvb_dvr_ioctl,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304730#ifdef CONFIG_COMPAT
4731 .compat_ioctl = dvb_dvr_compat_ioctl,
4732#endif
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004733 .open = dvb_dvr_open,
4734 .release = dvb_dvr_release,
4735 .poll = dvb_dvr_poll,
Arnd Bergmann6038f372010-08-15 18:52:59 +02004736 .llseek = default_llseek,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737};
4738
Mauro Carvalho Chehab8afd52e2015-01-02 22:28:53 -03004739static const struct dvb_device dvbdev_dvr = {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004740 .priv = NULL,
Trent Piepho5e85bd02006-03-30 15:53:32 -03004741 .readers = 1,
Markus Rechberger57861b42007-04-14 10:19:18 -03004742 .users = 1,
Mauro Carvalho Chehab8afd52e2015-01-02 22:28:53 -03004743#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
Mauro Carvalho Chehabe4fd3bc2015-02-18 12:09:27 -03004744 .name = "dvb-dvr",
Mauro Carvalho Chehab8afd52e2015-01-02 22:28:53 -03004745#endif
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004746 .fops = &dvb_dvr_fops
Linus Torvalds1da177e2005-04-16 15:20:36 -07004747};
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304748
4749
4750/**
4751 * debugfs service to print active filters information.
4752 */
4753static int dvb_dmxdev_dbgfs_print(struct seq_file *s, void *p)
4754{
4755 int i;
4756 struct dmxdev *dmxdev = s->private;
4757 struct dmxdev_filter *filter;
4758 int active_count = 0;
4759 struct dmx_buffer_status buffer_status;
4760 struct dmx_scrambling_bits scrambling_bits;
4761 static const char * const pes_feeds[] = {"DEC", "PES", "DVR", "REC"};
4762 int ret;
4763
4764 if (!dmxdev)
4765 return 0;
4766
4767 for (i = 0; i < dmxdev->filternum; i++) {
4768 filter = &dmxdev->filter[i];
4769 if (filter->state >= DMXDEV_STATE_GO) {
4770 active_count++;
4771
4772 seq_printf(s, "filter_%02d - ", i);
4773
4774 if (filter->type == DMXDEV_TYPE_SEC) {
4775 seq_puts(s, "type: SEC, ");
4776 seq_printf(s, "PID %04d ",
4777 filter->params.sec.pid);
4778 scrambling_bits.pid = filter->params.sec.pid;
4779 } else {
4780 seq_printf(s, "type: %s, ",
4781 pes_feeds[filter->params.pes.output]);
4782 seq_printf(s, "PID: %04d ",
4783 filter->params.pes.pid);
4784 scrambling_bits.pid = filter->params.pes.pid;
4785 }
4786
4787 dvb_dmxdev_get_scrambling_bits(filter,
4788 &scrambling_bits);
4789
4790 if (filter->type == DMXDEV_TYPE_PES &&
4791 filter->params.pes.output == DMX_OUT_TS_TAP)
4792 ret = dvb_dvr_get_buffer_status(dmxdev,
4793 O_RDONLY, &buffer_status);
4794 else
4795 ret = dvb_dmxdev_get_buffer_status(filter,
4796 &buffer_status);
4797 if (!ret) {
4798 seq_printf(s, "size: %08d, ",
4799 buffer_status.size);
4800 seq_printf(s, "fullness: %08d, ",
4801 buffer_status.fullness);
4802 seq_printf(s, "error: %d, ",
4803 buffer_status.error);
4804 }
4805
4806 seq_printf(s, "scramble: %d, ",
4807 scrambling_bits.value);
4808 seq_printf(s, "secured: %d\n",
4809 filter->sec_mode.is_secured);
4810 }
4811 }
4812
4813 if (!active_count)
4814 seq_puts(s, "No active filters\n");
4815
4816 return 0;
4817}
4818
4819static int dvb_dmxdev_dbgfs_open(struct inode *inode, struct file *file)
4820{
4821 return single_open(file, dvb_dmxdev_dbgfs_print, inode->i_private);
4822}
4823
4824static const struct file_operations dbgfs_filters_fops = {
4825 .open = dvb_dmxdev_dbgfs_open,
4826 .read = seq_read,
4827 .llseek = seq_lseek,
4828 .release = single_release,
4829 .owner = THIS_MODULE,
4830};
4831
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004832int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004833{
4834 int i;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304835 struct dmx_caps caps;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004836
4837 if (dmxdev->demux->open(dmxdev->demux) < 0)
4838 return -EUSERS;
4839
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004840 dmxdev->filter = vmalloc(dmxdev->filternum * sizeof(struct dmxdev_filter));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004841 if (!dmxdev->filter)
4842 return -ENOMEM;
4843
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304844 dmxdev->playback_mode = DMX_PB_MODE_PUSH;
4845 dmxdev->demux->dvr_input_protected = 0;
4846
Ingo Molnar3593cab2006-02-07 06:49:14 -02004847 mutex_init(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004848 spin_lock_init(&dmxdev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304849 spin_lock_init(&dmxdev->dvr_in_lock);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004850 for (i = 0; i < dmxdev->filternum; i++) {
4851 dmxdev->filter[i].dev = dmxdev;
4852 dmxdev->filter[i].buffer.data = NULL;
4853 dvb_dmxdev_filter_state_set(&dmxdev->filter[i],
4854 DMXDEV_STATE_FREE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004855 }
4856
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004857 dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304858 DVB_DEVICE_DEMUX, 0);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004859 dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304860 dmxdev, DVB_DEVICE_DVR, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004861
Andreas Oberritter34731df2006-03-14 17:31:01 -03004862 dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304863 dvb_ringbuffer_init(&dmxdev->dvr_input_buffer, NULL, 8192);
4864
4865 /* Disable auto buffer flushing if plugin does not allow it */
4866 if (dmxdev->demux->get_caps) {
4867 dmxdev->demux->get_caps(dmxdev->demux, &caps);
4868 if (!(caps.caps & DMX_CAP_AUTO_BUFFER_FLUSH))
4869 overflow_auto_flush = 0;
4870 }
4871
4872 if (dmxdev->demux->debugfs_demux_dir)
4873 debugfs_create_file("filters", 0444,
4874 dmxdev->demux->debugfs_demux_dir, dmxdev,
4875 &dbgfs_filters_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004876
4877 return 0;
4878}
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004879
Linus Torvalds1da177e2005-04-16 15:20:36 -07004880EXPORT_SYMBOL(dvb_dmxdev_init);
4881
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004882void dvb_dmxdev_release(struct dmxdev *dmxdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883{
Markus Rechberger57861b42007-04-14 10:19:18 -03004884 dmxdev->exit=1;
4885 if (dmxdev->dvbdev->users > 1) {
4886 wait_event(dmxdev->dvbdev->wait_queue,
4887 dmxdev->dvbdev->users==1);
4888 }
4889 if (dmxdev->dvr_dvbdev->users > 1) {
4890 wait_event(dmxdev->dvr_dvbdev->wait_queue,
4891 dmxdev->dvr_dvbdev->users==1);
4892 }
4893
Linus Torvalds1da177e2005-04-16 15:20:36 -07004894 dvb_unregister_device(dmxdev->dvbdev);
4895 dvb_unregister_device(dmxdev->dvr_dvbdev);
4896
4897 vfree(dmxdev->filter);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004898 dmxdev->filter = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004899 dmxdev->demux->close(dmxdev->demux);
4900}
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004901
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902EXPORT_SYMBOL(dvb_dmxdev_release);