blob: 0eadf082f0ee50046ad11846c8e3840a9de5f022 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * dmxdev.c - DVB demultiplexer device
3 *
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004 * Copyright (C) 2000 Ralph Metzler & Marcus Metzler
5 * for convergence integrated media GmbH
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public License
9 * as published by the Free Software Foundation; either version 2.1
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 */
22
Alexey Dobriyana99bbaf2009-10-04 16:11:37 +040023#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/spinlock.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/poll.h>
29#include <linux/ioctl.h>
30#include <linux/wait.h>
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +053031#include <linux/uaccess.h>
32#include <linux/debugfs.h>
33#include <linux/seq_file.h>
34#include <linux/compat.h>
35#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "dmxdev.h"
37
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +053038static int overflow_auto_flush = 1;
39module_param(overflow_auto_flush, int, 0644);
40MODULE_PARM_DESC(overflow_auto_flush,
41 "Automatically flush buffer on overflow (default: on)");
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +053043#define DMX_DEFAULT_DECODER_BUFFER_SIZE (32768)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +053045static inline int dvb_dmxdev_verify_buffer_size(u32 size, u32 max_size,
46 u32 size_align)
47{
48 if (size_align)
49 return size <= max_size && !(size % size_align);
50 else
51 return size <= max_size;
52}
53
54static int dvb_filter_verify_buffer_size(struct dmxdev_filter *filter)
55{
56 struct dmx_caps caps;
57 size_t size = filter->buffer.size;
58
59 /*
60 * For backward compatibility, if no demux capabilities can
61 * be retrieved assume size is ok.
62 * Decoder filter buffer size is verified when decoder buffer is set.
63 */
64 if (filter->dev->demux->get_caps) {
65 filter->dev->demux->get_caps(filter->dev->demux, &caps);
66
67 if (filter->type == DMXDEV_TYPE_SEC)
68 return dvb_dmxdev_verify_buffer_size(
69 size,
70 caps.section.max_size,
71 caps.section.size_alignment);
72
73 if (filter->params.pes.output == DMX_OUT_TAP)
74 return dvb_dmxdev_verify_buffer_size(
75 size,
76 caps.pes.max_size,
77 caps.pes.size_alignment);
78
79 size = (filter->params.pes.output == DMX_OUT_TS_TAP) ?
80 filter->dev->dvr_buffer.size : size;
81
82 if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP ||
83 filter->params.pes.output == DMX_OUT_TS_TAP) {
84 if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
85 return dvb_dmxdev_verify_buffer_size(
86 size,
87 caps.recording_188_tsp.max_size,
88 caps.recording_188_tsp.size_alignment);
89
90 return dvb_dmxdev_verify_buffer_size(
91 size,
92 caps.recording_192_tsp.max_size,
93 caps.recording_192_tsp.size_alignment);
94 }
95 }
96
97 return 1;
98}
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Andreas Oberritter34731df2006-03-14 17:31:01 -0300100static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
101 const u8 *src, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102{
Andreas Oberritter34731df2006-03-14 17:31:01 -0300103 ssize_t free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105 if (!len)
106 return 0;
107 if (!buf->data)
108 return 0;
109
Andreas Oberritter34731df2006-03-14 17:31:01 -0300110 free = dvb_ringbuffer_free(buf);
111 if (len > free) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530112 pr_debug("dmxdev: buffer overflow\n");
Andreas Oberritter34731df2006-03-14 17:31:01 -0300113 return -EOVERFLOW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 }
Andreas Oberritter34731df2006-03-14 17:31:01 -0300115
116 return dvb_ringbuffer_write(buf, src, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117}
118
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530119static inline void dvb_dmxdev_notify_data_read(struct dmxdev_filter *filter,
120 int bytes_read)
121{
122 if (!filter)
123 return;
124
125 if (filter->type == DMXDEV_TYPE_SEC) {
126 if (filter->feed.sec.feed->notify_data_read)
127 filter->feed.sec.feed->notify_data_read(
128 filter->filter.sec,
129 bytes_read);
130 } else {
131 struct dmxdev_feed *feed;
132
133 /*
134 * All feeds of same demux-handle share the same output
135 * buffer, it is enough to notify on the buffer status
136 * on one of the feeds
137 */
138 feed = list_first_entry(&filter->feed.ts,
139 struct dmxdev_feed, next);
140
141 if (feed->ts->notify_data_read)
142 feed->ts->notify_data_read(
143 feed->ts,
144 bytes_read);
145 }
146}
147
148static inline u32 dvb_dmxdev_advance_event_idx(u32 index)
149{
150 index++;
151 if (index >= DMX_EVENT_QUEUE_SIZE)
152 index = 0;
153
154 return index;
155}
156
157static inline int dvb_dmxdev_events_is_full(struct dmxdev_events_queue *events)
158{
159 int new_write_index;
160
161 new_write_index = dvb_dmxdev_advance_event_idx(events->write_index);
162 if (new_write_index == events->read_index)
163 return 1;
164
165 return 0;
166
167}
168
169static inline void dvb_dmxdev_flush_events(struct dmxdev_events_queue *events)
170{
171 events->read_index = 0;
172 events->write_index = 0;
173 events->notified_index = 0;
174 events->bytes_read_no_event = 0;
175 events->current_event_data_size = 0;
176 events->wakeup_events_counter = 0;
177}
178
179static inline void dvb_dmxdev_flush_output(struct dvb_ringbuffer *buffer,
180 struct dmxdev_events_queue *events)
181{
182 dvb_dmxdev_flush_events(events);
183 dvb_ringbuffer_flush(buffer);
184}
185
186static int dvb_dmxdev_update_pes_event(struct dmx_filter_event *event,
187 int bytes_read)
188{
189 int start_delta;
190
191 if (event->params.pes.total_length <= bytes_read)
192 return event->params.pes.total_length;
193
194 /*
195 * only part of the data relevant to this event was read.
196 * Update the event's information to reflect the new state.
197 */
198 event->params.pes.total_length -= bytes_read;
199
200 start_delta = event->params.pes.start_offset -
201 event->params.pes.base_offset;
202
203 if (bytes_read <= start_delta) {
204 event->params.pes.base_offset +=
205 bytes_read;
206 } else {
207 start_delta =
208 bytes_read - start_delta;
209
210 event->params.pes.start_offset += start_delta;
211 event->params.pes.actual_length -= start_delta;
212
213 event->params.pes.base_offset =
214 event->params.pes.start_offset;
215 }
216
217 return 0;
218}
219
220static int dvb_dmxdev_update_section_event(struct dmx_filter_event *event,
221 int bytes_read)
222{
223 int start_delta;
224
225 if (event->params.section.total_length <= bytes_read)
226 return event->params.section.total_length;
227
228 /*
229 * only part of the data relevant to this event was read.
230 * Update the event's information to reflect the new state.
231 */
232
233 event->params.section.total_length -= bytes_read;
234
235 start_delta = event->params.section.start_offset -
236 event->params.section.base_offset;
237
238 if (bytes_read <= start_delta) {
239 event->params.section.base_offset +=
240 bytes_read;
241 } else {
242 start_delta =
243 bytes_read - start_delta;
244
245 event->params.section.start_offset += start_delta;
246 event->params.section.actual_length -= start_delta;
247
248 event->params.section.base_offset =
249 event->params.section.start_offset;
250 }
251
252 return 0;
253}
254
255static int dvb_dmxdev_update_rec_event(struct dmx_filter_event *event,
256 int bytes_read)
257{
258 if (event->params.recording_chunk.size <= bytes_read)
259 return event->params.recording_chunk.size;
260
261 /*
262 * only part of the data relevant to this event was read.
263 * Update the event's information to reflect the new state.
264 */
265 event->params.recording_chunk.size -= bytes_read;
266 event->params.recording_chunk.offset += bytes_read;
267
268 return 0;
269}
270
271static int dvb_dmxdev_add_event(struct dmxdev_events_queue *events,
272 struct dmx_filter_event *event)
273{
274 int res;
275 int new_write_index;
276 int data_event;
277
278 /* Check if the event is disabled */
279 if (events->event_mask.disable_mask & event->type)
280 return 0;
281
282 /* Check if we are adding an event that user already read its data */
283 if (events->bytes_read_no_event) {
284 data_event = 1;
285
286 if (event->type == DMX_EVENT_NEW_PES)
287 res = dvb_dmxdev_update_pes_event(event,
288 events->bytes_read_no_event);
289 else if (event->type == DMX_EVENT_NEW_SECTION)
290 res = dvb_dmxdev_update_section_event(event,
291 events->bytes_read_no_event);
292 else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
293 res = dvb_dmxdev_update_rec_event(event,
294 events->bytes_read_no_event);
295 else
296 data_event = 0;
297
298 if (data_event) {
299 if (res) {
300 /*
301 * Data relevant to this event was fully
302 * consumed already, discard event.
303 */
304 events->bytes_read_no_event -= res;
305 return 0;
306 }
307 events->bytes_read_no_event = 0;
308 } else {
309 /*
310 * data was read beyond the non-data event,
311 * making it not relevant anymore
312 */
313 return 0;
314 }
315 }
316
317 new_write_index = dvb_dmxdev_advance_event_idx(events->write_index);
318 if (new_write_index == events->read_index) {
319 pr_err("dmxdev: events overflow\n");
320 return -EOVERFLOW;
321 }
322
323 events->queue[events->write_index] = *event;
324 events->write_index = new_write_index;
325
326 if (!(events->event_mask.no_wakeup_mask & event->type))
327 events->wakeup_events_counter++;
328
329 return 0;
330}
331
332static int dvb_dmxdev_remove_event(struct dmxdev_events_queue *events,
333 struct dmx_filter_event *event)
334{
335 if (events->notified_index == events->write_index)
336 return -ENODATA;
337
338 *event = events->queue[events->notified_index];
339
340 events->notified_index =
341 dvb_dmxdev_advance_event_idx(events->notified_index);
342
343 if (!(events->event_mask.no_wakeup_mask & event->type))
344 events->wakeup_events_counter--;
345
346 return 0;
347}
348
349static int dvb_dmxdev_update_events(struct dmxdev_events_queue *events,
350 int bytes_read)
351{
352 struct dmx_filter_event *event;
353 int res;
354 int data_event;
355
356 /*
357 * If data events are not enabled on this filter,
358 * there's nothing to update.
359 */
360 if (events->data_read_event_masked)
361 return 0;
362
363 /*
364 * Go through all events that were notified and
365 * remove them from the events queue if their respective
366 * data was read.
367 */
368 while ((events->read_index != events->notified_index) &&
369 (bytes_read)) {
370 event = events->queue + events->read_index;
371
372 data_event = 1;
373
374 if (event->type == DMX_EVENT_NEW_PES)
375 res = dvb_dmxdev_update_pes_event(event, bytes_read);
376 else if (event->type == DMX_EVENT_NEW_SECTION)
377 res = dvb_dmxdev_update_section_event(event,
378 bytes_read);
379 else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
380 res = dvb_dmxdev_update_rec_event(event, bytes_read);
381 else
382 data_event = 0;
383
384 if (data_event) {
385 if (res) {
386 /*
387 * Data relevant to this event was
388 * fully consumed, remove it from the queue.
389 */
390 bytes_read -= res;
391 events->read_index =
392 dvb_dmxdev_advance_event_idx(
393 events->read_index);
394 } else {
395 bytes_read = 0;
396 }
397 } else {
398 /*
399 * non-data event was already notified,
400 * no need to keep it
401 */
402 events->read_index = dvb_dmxdev_advance_event_idx(
403 events->read_index);
404 }
405 }
406
407 if (!bytes_read)
408 return 0;
409
410 /*
411 * If we reached here it means:
412 * bytes_read != 0
413 * events->read_index == events->notified_index
414 * Check if there are pending events in the queue
415 * which the user didn't read while their relevant data
416 * was read.
417 */
418 while ((events->notified_index != events->write_index) &&
419 (bytes_read)) {
420 event = events->queue + events->notified_index;
421
422 data_event = 1;
423
424 if (event->type == DMX_EVENT_NEW_PES)
425 res = dvb_dmxdev_update_pes_event(event, bytes_read);
426 else if (event->type == DMX_EVENT_NEW_SECTION)
427 res = dvb_dmxdev_update_section_event(event,
428 bytes_read);
429 else if (event->type == DMX_EVENT_NEW_REC_CHUNK)
430 res = dvb_dmxdev_update_rec_event(event, bytes_read);
431 else
432 data_event = 0;
433
434 if (data_event) {
435 if (res) {
436 /*
437 * Data relevant to this event was
438 * fully consumed, remove it from the queue.
439 */
440 bytes_read -= res;
441 events->notified_index =
442 dvb_dmxdev_advance_event_idx(
443 events->notified_index);
444 if (!(events->event_mask.no_wakeup_mask &
445 event->type))
446 events->wakeup_events_counter--;
447 } else {
448 bytes_read = 0;
449 }
450 } else {
Stephen Boydc52b8542017-07-20 11:20:28 -0700451 if (bytes_read) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530452 /*
453 * data was read beyond the non-data event,
454 * making it not relevant anymore
455 */
456 events->notified_index =
457 dvb_dmxdev_advance_event_idx(
458 events->notified_index);
459 if (!(events->event_mask.no_wakeup_mask &
460 event->type))
461 events->wakeup_events_counter--;
Stephen Boydc52b8542017-07-20 11:20:28 -0700462 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530463 }
464
465 events->read_index = events->notified_index;
466 }
467
468 /*
469 * Check if data was read without having a respective
470 * event in the events-queue
471 */
472 if (bytes_read)
473 events->bytes_read_no_event += bytes_read;
474
475 return 0;
476}
477
478static inline int dvb_dmxdev_check_data(struct dmxdev_filter *filter,
479 struct dvb_ringbuffer *src)
480{
481 int data_status_change;
482
483 if (filter)
484 if (mutex_lock_interruptible(&filter->mutex))
485 return -ERESTARTSYS;
486
487 if (!src->data ||
488 !dvb_ringbuffer_empty(src) ||
489 src->error ||
490 (filter &&
491 (filter->state != DMXDEV_STATE_GO) &&
492 (filter->state != DMXDEV_STATE_DONE)))
493 data_status_change = 1;
494 else
495 data_status_change = 0;
496
497 if (filter)
498 mutex_unlock(&filter->mutex);
499
500 return data_status_change;
501}
502
503static ssize_t dvb_dmxdev_buffer_read(struct dmxdev_filter *filter,
504 struct dvb_ringbuffer *src,
505 int non_blocking, char __user *buf,
506 size_t count, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
Andreas Oberritter34731df2006-03-14 17:31:01 -0300508 size_t todo;
509 ssize_t avail;
510 ssize_t ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
512 if (!src->data)
513 return 0;
514
Andreas Oberritter34731df2006-03-14 17:31:01 -0300515 if (src->error) {
516 ret = src->error;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530517 src->error = 0;
Andreas Oberritter34731df2006-03-14 17:31:01 -0300518 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 }
520
Andreas Oberritter34731df2006-03-14 17:31:01 -0300521 for (todo = count; todo > 0; todo -= ret) {
522 if (non_blocking && dvb_ringbuffer_empty(src)) {
523 ret = -EWOULDBLOCK;
524 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 }
526
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530527 if (filter) {
528 if ((filter->state == DMXDEV_STATE_DONE) &&
529 dvb_ringbuffer_empty(src))
530 break;
531
532 mutex_unlock(&filter->mutex);
533 }
534
Andreas Oberritter34731df2006-03-14 17:31:01 -0300535 ret = wait_event_interruptible(src->queue,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530536 dvb_dmxdev_check_data(filter, src));
537
538 if (filter) {
539 if (mutex_lock_interruptible(&filter->mutex))
540 return -ERESTARTSYS;
541
542 if ((filter->state != DMXDEV_STATE_GO) &&
543 (filter->state != DMXDEV_STATE_DONE))
544 return -ENODEV;
545 }
546
Andreas Oberritter34731df2006-03-14 17:31:01 -0300547 if (ret < 0)
548 break;
549
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530550 if (!src->data)
551 return 0;
552
Andreas Oberritter34731df2006-03-14 17:31:01 -0300553 if (src->error) {
554 ret = src->error;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530555 src->error = 0;
Andreas Oberritter34731df2006-03-14 17:31:01 -0300556 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 }
Andreas Oberritter34731df2006-03-14 17:31:01 -0300558
559 avail = dvb_ringbuffer_avail(src);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300560 if (avail > todo)
561 avail = todo;
Andreas Oberritter34731df2006-03-14 17:31:01 -0300562
Al Virob0ba0e32008-06-22 14:20:29 -0300563 ret = dvb_ringbuffer_read_user(src, buf, avail);
Andreas Oberritter34731df2006-03-14 17:31:01 -0300564 if (ret < 0)
565 break;
566
567 buf += ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 }
Andreas Oberritter34731df2006-03-14 17:31:01 -0300569
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530570 if (count - todo) /* some data was read? */
571 wake_up_all(&src->queue);
572
Andreas Oberritter34731df2006-03-14 17:31:01 -0300573 return (count - todo) ? (count - todo) : ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574}
575
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300576static struct dmx_frontend *get_fe(struct dmx_demux *demux, int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577{
578 struct list_head *head, *pos;
579
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300580 head = demux->get_frontends(demux);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 if (!head)
582 return NULL;
583 list_for_each(pos, head)
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300584 if (DMX_FE_ENTRY(pos)->source == type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 return DMX_FE_ENTRY(pos);
586
587 return NULL;
588}
589
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530590static void dvb_dvr_oob_cmd(struct dmxdev *dmxdev, struct dmx_oob_command *cmd)
591{
592 int i;
593 struct dmxdev_filter *filter;
594 struct dmxdev_feed *feed;
595
596 for (i = 0; i < dmxdev->filternum; i++) {
597 filter = &dmxdev->filter[i];
598 if (!filter || filter->state != DMXDEV_STATE_GO)
599 continue;
600
601 switch (filter->type) {
602 case DMXDEV_TYPE_SEC:
603 filter->feed.sec.feed->oob_command(
604 filter->feed.sec.feed, cmd);
605 break;
606 case DMXDEV_TYPE_PES:
607 feed = list_first_entry(&filter->feed.ts,
608 struct dmxdev_feed, next);
609 feed->ts->oob_command(feed->ts, cmd);
610 break;
611 case DMXDEV_TYPE_NONE:
612 break;
613 default:
614 break;
615 }
616 }
617}
618
619static int dvb_dvr_feed_cmd(struct dmxdev *dmxdev, struct dvr_command *dvr_cmd)
620{
621 int ret = 0;
622 size_t todo;
623 int bytes_written = 0;
624 size_t split;
625 size_t tsp_size;
626 u8 *data_start;
627 struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
628
629 todo = dvr_cmd->cmd.data_feed_count;
630
631 if (dmxdev->demux->get_tsp_size)
632 tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
633 else
634 tsp_size = 188;
635
636 while (todo >= tsp_size) {
637 /* wait for input */
638 ret = wait_event_interruptible(
639 src->queue,
640 (dvb_ringbuffer_avail(src) >= tsp_size) ||
641 dmxdev->dvr_in_exit || src->error);
642
643 if (ret < 0)
644 break;
645
646 spin_lock(&dmxdev->dvr_in_lock);
647
648 if (dmxdev->exit || dmxdev->dvr_in_exit) {
649 spin_unlock(&dmxdev->dvr_in_lock);
650 ret = -ENODEV;
651 break;
652 }
653
654 if (src->error) {
655 spin_unlock(&dmxdev->dvr_in_lock);
656 wake_up_all(&src->queue);
657 ret = -EINVAL;
658 break;
659 }
660
661 dmxdev->dvr_processing_input = 1;
662
663 split = (src->pread + todo > src->size) ?
664 src->size - src->pread : 0;
665
666 /*
667 * In DVR PULL mode, write might block.
668 * Lock on DVR buffer is released before calling to
669 * write, if DVR was released meanwhile, dvr_in_exit is
670 * prompted. Lock is acquired when updating the read pointer
671 * again to preserve read/write pointers consistency.
672 *
673 * In protected input mode, DVR input buffer is not mapped
674 * to kernel memory. Underlying demux implementation
675 * should trigger HW to read from DVR input buffer
676 * based on current read offset.
677 */
678 if (split > 0) {
679 data_start = (dmxdev->demux->dvr_input_protected) ?
680 NULL : (src->data + src->pread);
681
682 spin_unlock(&dmxdev->dvr_in_lock);
683 ret = dmxdev->demux->write(dmxdev->demux,
684 data_start,
685 split);
686
687 if (ret < 0) {
688 pr_err("dmxdev: dvr write error %d\n", ret);
689 continue;
690 }
691
692 if (dmxdev->dvr_in_exit) {
693 ret = -ENODEV;
694 break;
695 }
696
697 spin_lock(&dmxdev->dvr_in_lock);
698
699 todo -= ret;
700 bytes_written += ret;
701 DVB_RINGBUFFER_SKIP(src, ret);
702 if (ret < split) {
703 dmxdev->dvr_processing_input = 0;
704 spin_unlock(&dmxdev->dvr_in_lock);
705 wake_up_all(&src->queue);
706 continue;
707 }
708 }
709
710 data_start = (dmxdev->demux->dvr_input_protected) ?
711 NULL : (src->data + src->pread);
712
713 spin_unlock(&dmxdev->dvr_in_lock);
714 ret = dmxdev->demux->write(dmxdev->demux,
715 data_start, todo);
716
717 if (ret < 0) {
718 pr_err("dmxdev: dvr write error %d\n", ret);
719 continue;
720 }
721
722 if (dmxdev->dvr_in_exit) {
723 ret = -ENODEV;
724 break;
725 }
726
727 spin_lock(&dmxdev->dvr_in_lock);
728
729 todo -= ret;
730 bytes_written += ret;
731 DVB_RINGBUFFER_SKIP(src, ret);
732 dmxdev->dvr_processing_input = 0;
733 spin_unlock(&dmxdev->dvr_in_lock);
734
735 wake_up_all(&src->queue);
736 }
737
738 if (ret < 0)
739 return ret;
740
741 return bytes_written;
742}
743
744static int dvr_input_thread_entry(void *arg)
745{
746 struct dmxdev *dmxdev = arg;
747 struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
748 struct dvr_command dvr_cmd;
749 int leftover = 0;
750 int ret;
751
752 while (1) {
753 /* wait for input */
754 ret = wait_event_interruptible(
755 cmdbuf->queue,
756 (!cmdbuf->data) ||
757 (dvb_ringbuffer_avail(cmdbuf) >= sizeof(dvr_cmd)) ||
758 (dmxdev->dvr_in_exit));
759
760 if (ret < 0)
761 break;
762
763 spin_lock(&dmxdev->dvr_in_lock);
764
765 if (!cmdbuf->data || dmxdev->exit || dmxdev->dvr_in_exit) {
766 spin_unlock(&dmxdev->dvr_in_lock);
767 break;
768 }
769
770 dvb_ringbuffer_read(cmdbuf, (u8 *)&dvr_cmd, sizeof(dvr_cmd));
771
772 spin_unlock(&dmxdev->dvr_in_lock);
773
774 if (dvr_cmd.type == DVR_DATA_FEED_CMD) {
775 dvr_cmd.cmd.data_feed_count += leftover;
776
777 ret = dvb_dvr_feed_cmd(dmxdev, &dvr_cmd);
778 if (ret < 0) {
779 pr_debug("%s: DVR data feed failed, ret=%d\n",
780 __func__, ret);
781 continue;
782 }
783
784 leftover = dvr_cmd.cmd.data_feed_count - ret;
785 } else {
786 /*
787 * For EOS, try to process leftover data in the input
788 * buffer.
789 */
790 if (dvr_cmd.cmd.oobcmd.type == DMX_OOB_CMD_EOS) {
791 struct dvr_command feed_cmd;
792
793 feed_cmd.type = DVR_DATA_FEED_CMD;
794 feed_cmd.cmd.data_feed_count =
795 dvb_ringbuffer_avail(
796 &dmxdev->dvr_input_buffer);
797 dvb_dvr_feed_cmd(dmxdev, &feed_cmd);
798 }
799
800 dvb_dvr_oob_cmd(dmxdev, &dvr_cmd.cmd.oobcmd);
801 }
802 }
803
804 set_current_state(TASK_INTERRUPTIBLE);
805 while (!kthread_should_stop()) {
806 schedule();
807 set_current_state(TASK_INTERRUPTIBLE);
808 }
809 set_current_state(TASK_RUNNING);
810
811 return 0;
812}
813
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814static int dvb_dvr_open(struct inode *inode, struct file *file)
815{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -0700816 struct dvb_device *dvbdev = file->private_data;
817 struct dmxdev *dmxdev = dvbdev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 struct dmx_frontend *front;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530819 void *mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530821 pr_debug("function : %s(%X)\n", __func__, (file->f_flags & O_ACCMODE));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822
Ingo Molnar3593cab2006-02-07 06:49:14 -0200823 if (mutex_lock_interruptible(&dmxdev->mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 return -ERESTARTSYS;
825
Markus Rechberger57861b42007-04-14 10:19:18 -0300826 if (dmxdev->exit) {
827 mutex_unlock(&dmxdev->mutex);
828 return -ENODEV;
829 }
830
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300831 if ((file->f_flags & O_ACCMODE) == O_RDWR) {
832 if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) {
Ingo Molnar3593cab2006-02-07 06:49:14 -0200833 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 return -EOPNOTSUPP;
835 }
836 }
837
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300838 if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
Trent Piepho5e85bd02006-03-30 15:53:32 -0300839 if (!dvbdev->readers) {
840 mutex_unlock(&dmxdev->mutex);
841 return -EBUSY;
842 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530843 mem = vmalloc_user(DVR_BUFFER_SIZE);
Andreas Oberritter34731df2006-03-14 17:31:01 -0300844 if (!mem) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300845 mutex_unlock(&dmxdev->mutex);
846 return -ENOMEM;
847 }
Andreas Oberritter34731df2006-03-14 17:31:01 -0300848 dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530849 dvb_dmxdev_flush_events(&dmxdev->dvr_output_events);
850 dmxdev->dvr_output_events.event_mask.disable_mask = 0;
851 dmxdev->dvr_output_events.event_mask.no_wakeup_mask = 0;
852 dmxdev->dvr_output_events.event_mask.wakeup_threshold = 1;
853 dmxdev->dvr_feeds_count = 0;
854 dmxdev->dvr_buffer_mode = DMX_BUFFER_MODE_INTERNAL;
855 dmxdev->dvr_priv_buff_handle = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530857 dvbdev->readers--;
858 } else if (!dvbdev->writers) {
859 dmxdev->dvr_in_exit = 0;
860 dmxdev->dvr_processing_input = 0;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300861 dmxdev->dvr_orig_fe = dmxdev->demux->frontend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
863 if (!dmxdev->demux->write) {
Ingo Molnar3593cab2006-02-07 06:49:14 -0200864 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 return -EOPNOTSUPP;
866 }
867
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300868 front = get_fe(dmxdev->demux, DMX_MEMORY_FE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
870 if (!front) {
Ingo Molnar3593cab2006-02-07 06:49:14 -0200871 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 return -EINVAL;
873 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530874
875 mem = vmalloc_user(DVR_BUFFER_SIZE);
876 if (!mem) {
877 mutex_unlock(&dmxdev->mutex);
878 return -ENOMEM;
879 }
880
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 dmxdev->demux->disconnect_frontend(dmxdev->demux);
882 dmxdev->demux->connect_frontend(dmxdev->demux, front);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530883 dmxdev->dvr_input_buffer_mode = DMX_BUFFER_MODE_INTERNAL;
884
885 dvb_ringbuffer_init(&dmxdev->dvr_input_buffer,
886 mem,
887 DVR_BUFFER_SIZE);
888
889 dmxdev->demux->dvr_input.priv_handle = NULL;
890 dmxdev->demux->dvr_input.ringbuff = &dmxdev->dvr_input_buffer;
891 dmxdev->demux->dvr_input_protected = 0;
892 mem = vmalloc(DVR_CMDS_BUFFER_SIZE);
893 if (!mem) {
894 vfree(dmxdev->dvr_input_buffer.data);
895 dmxdev->dvr_input_buffer.data = NULL;
896 mutex_unlock(&dmxdev->mutex);
897 return -ENOMEM;
898 }
899 dvb_ringbuffer_init(&dmxdev->dvr_cmd_buffer, mem,
900 DVR_CMDS_BUFFER_SIZE);
901 dvbdev->writers--;
902
903 dmxdev->dvr_input_thread =
904 kthread_run(
905 dvr_input_thread_entry,
906 (void *)dmxdev,
907 "dvr_input");
908
909 if (IS_ERR(dmxdev->dvr_input_thread)) {
910 vfree(dmxdev->dvr_input_buffer.data);
911 vfree(dmxdev->dvr_cmd_buffer.data);
912 dmxdev->dvr_input_buffer.data = NULL;
913 dmxdev->dvr_cmd_buffer.data = NULL;
914 mutex_unlock(&dmxdev->mutex);
915 return -ENOMEM;
916 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530918
Markus Rechberger57861b42007-04-14 10:19:18 -0300919 dvbdev->users++;
Ingo Molnar3593cab2006-02-07 06:49:14 -0200920 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 return 0;
922}
923
924static int dvb_dvr_release(struct inode *inode, struct file *file)
925{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -0700926 struct dvb_device *dvbdev = file->private_data;
927 struct dmxdev *dmxdev = dvbdev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
Simon Arlottc2788502007-03-10 06:21:25 -0300929 mutex_lock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300931 if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
Trent Piepho5e85bd02006-03-30 15:53:32 -0300932 dvbdev->readers++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 if (dmxdev->dvr_buffer.data) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300934 void *mem = dmxdev->dvr_buffer.data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 mb();
936 spin_lock_irq(&dmxdev->lock);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -0300937 dmxdev->dvr_buffer.data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 spin_unlock_irq(&dmxdev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +0530939 wake_up_all(&dmxdev->dvr_buffer.queue);
940
941 if (dmxdev->dvr_buffer_mode == DMX_BUFFER_MODE_INTERNAL)
942 vfree(mem);
943 }
944
945 if ((dmxdev->dvr_buffer_mode == DMX_BUFFER_MODE_EXTERNAL) &&
946 dmxdev->dvr_priv_buff_handle) {
947 dmxdev->demux->unmap_buffer(dmxdev->demux,
948 dmxdev->dvr_priv_buff_handle);
949 dmxdev->dvr_priv_buff_handle = NULL;
950 }
951 } else {
952 int i;
953
954 spin_lock(&dmxdev->dvr_in_lock);
955 dmxdev->dvr_in_exit = 1;
956 spin_unlock(&dmxdev->dvr_in_lock);
957
958 wake_up_all(&dmxdev->dvr_cmd_buffer.queue);
959
960 /*
961 * There might be dmx filters reading now from DVR
962 * device, in PULL mode, they might be also stalled
963 * on output, signal to them that DVR is exiting.
964 */
965 if (dmxdev->playback_mode == DMX_PB_MODE_PULL) {
966 wake_up_all(&dmxdev->dvr_buffer.queue);
967
968 for (i = 0; i < dmxdev->filternum; i++)
969 if (dmxdev->filter[i].state == DMXDEV_STATE_GO)
970 wake_up_all(
971 &dmxdev->filter[i].buffer.queue);
972 }
973
974 /* notify kernel demux that we are canceling */
975 if (dmxdev->demux->write_cancel)
976 dmxdev->demux->write_cancel(dmxdev->demux);
977
978 /*
979 * Now stop dvr-input thread so that no one
980 * would process data from dvr input buffer any more
981 * before it gets freed.
982 */
983 kthread_stop(dmxdev->dvr_input_thread);
984
985 dvbdev->writers++;
986 dmxdev->demux->disconnect_frontend(dmxdev->demux);
987 dmxdev->demux->connect_frontend(dmxdev->demux,
988 dmxdev->dvr_orig_fe);
989
990 if (dmxdev->dvr_input_buffer.data) {
991 void *mem = dmxdev->dvr_input_buffer.data;
992 /*
993 * Ensure all the operations on the DVR input buffer
994 * are completed before it gets freed.
995 */
996 mb();
997 spin_lock_irq(&dmxdev->dvr_in_lock);
998 dmxdev->dvr_input_buffer.data = NULL;
999 spin_unlock_irq(&dmxdev->dvr_in_lock);
1000
1001 if (dmxdev->dvr_input_buffer_mode ==
1002 DMX_BUFFER_MODE_INTERNAL)
1003 vfree(mem);
1004 }
1005
1006 if ((dmxdev->dvr_input_buffer_mode ==
1007 DMX_BUFFER_MODE_EXTERNAL) &&
1008 (dmxdev->demux->dvr_input.priv_handle)) {
1009 if (!dmxdev->demux->dvr_input_protected)
1010 dmxdev->demux->unmap_buffer(dmxdev->demux,
1011 dmxdev->demux->dvr_input.priv_handle);
1012 dmxdev->demux->dvr_input.priv_handle = NULL;
1013 }
1014
1015 if (dmxdev->dvr_cmd_buffer.data) {
1016 void *mem = dmxdev->dvr_cmd_buffer.data;
1017 /*
1018 * Ensure all the operations on the DVR command buffer
1019 * are completed before it gets freed.
1020 */
1021 mb();
1022 spin_lock_irq(&dmxdev->dvr_in_lock);
1023 dmxdev->dvr_cmd_buffer.data = NULL;
1024 spin_unlock_irq(&dmxdev->dvr_in_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 vfree(mem);
1026 }
1027 }
Markus Rechberger57861b42007-04-14 10:19:18 -03001028 /* TODO */
1029 dvbdev->users--;
Jiri Slaby1c488ea2010-07-18 15:34:18 -03001030 if (dvbdev->users == 1 && dmxdev->exit == 1) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301031 fops_put(file->f_op);
1032 file->f_op = NULL;
Markus Rechberger57861b42007-04-14 10:19:18 -03001033 mutex_unlock(&dmxdev->mutex);
1034 wake_up(&dvbdev->wait_queue);
1035 } else
1036 mutex_unlock(&dmxdev->mutex);
1037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 return 0;
1039}
1040
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301041
1042static int dvb_dvr_mmap(struct file *filp, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043{
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301044 struct dvb_device *dvbdev = filp->private_data;
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07001045 struct dmxdev *dmxdev = dvbdev->priv;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301046 struct dvb_ringbuffer *buffer;
1047 enum dmx_buffer_mode buffer_mode;
1048 int vma_size;
1049 int buffer_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 int ret;
1051
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301052 if (((filp->f_flags & O_ACCMODE) == O_RDONLY) &&
1053 (vma->vm_flags & VM_WRITE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 return -EINVAL;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301055
Ingo Molnar3593cab2006-02-07 06:49:14 -02001056 if (mutex_lock_interruptible(&dmxdev->mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 return -ERESTARTSYS;
Markus Rechberger57861b42007-04-14 10:19:18 -03001058
1059 if (dmxdev->exit) {
1060 mutex_unlock(&dmxdev->mutex);
1061 return -ENODEV;
1062 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301063
1064 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
1065 buffer = &dmxdev->dvr_buffer;
1066 buffer_mode = dmxdev->dvr_buffer_mode;
1067 } else {
1068 buffer = &dmxdev->dvr_input_buffer;
1069 buffer_mode = dmxdev->dvr_input_buffer_mode;
1070 }
1071
1072 if (buffer_mode == DMX_BUFFER_MODE_EXTERNAL) {
1073 mutex_unlock(&dmxdev->mutex);
1074 return -EINVAL;
1075 }
1076
1077 vma_size = vma->vm_end - vma->vm_start;
1078
1079 /* Make sure requested mapping is not larger than buffer size */
1080 buffer_size = buffer->size + (PAGE_SIZE-1);
1081 buffer_size = buffer_size & ~(PAGE_SIZE-1);
1082
1083 if (vma_size != buffer_size) {
1084 mutex_unlock(&dmxdev->mutex);
1085 return -EINVAL;
1086 }
1087
1088 ret = remap_vmalloc_range(vma, buffer->data, 0);
1089 if (ret) {
1090 mutex_unlock(&dmxdev->mutex);
1091 return ret;
1092 }
1093
1094 vma->vm_flags |= VM_DONTDUMP;
1095 vma->vm_flags |= VM_DONTEXPAND;
1096
Ingo Molnar3593cab2006-02-07 06:49:14 -02001097 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 return ret;
1099}
1100
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301101static void dvb_dvr_queue_data_feed(struct dmxdev *dmxdev, size_t count)
1102{
1103 struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
1104 struct dvr_command *dvr_cmd;
1105 int last_dvr_cmd;
1106
1107 spin_lock(&dmxdev->dvr_in_lock);
1108
1109 /* Peek at the last DVR command queued, try to coalesce FEED commands */
1110 if (dvb_ringbuffer_avail(cmdbuf) >= sizeof(*dvr_cmd)) {
1111 last_dvr_cmd = cmdbuf->pwrite - sizeof(*dvr_cmd);
1112 if (last_dvr_cmd < 0)
1113 last_dvr_cmd += cmdbuf->size;
1114
1115 dvr_cmd = (struct dvr_command *)&cmdbuf->data[last_dvr_cmd];
1116 if (dvr_cmd->type == DVR_DATA_FEED_CMD) {
1117 dvr_cmd->cmd.data_feed_count += count;
1118 spin_unlock(&dmxdev->dvr_in_lock);
1119 return;
1120 }
1121 }
1122
1123 /*
1124 * We assume command buffer is large enough so that overflow should not
1125 * happen. Overflow to the command buffer means data previously written
1126 * to the input buffer is 'orphan' - does not have a matching FEED
1127 * command. Issue a warning if this ever happens.
1128 * Orphan data might still be processed if EOS is issued.
1129 */
1130 if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd)) {
1131 pr_err("%s: DVR command buffer overflow\n", __func__);
1132 spin_unlock(&dmxdev->dvr_in_lock);
1133 return;
1134 }
1135
1136 dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite];
1137 dvr_cmd->type = DVR_DATA_FEED_CMD;
1138 dvr_cmd->cmd.data_feed_count = count;
1139 DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd));
1140 spin_unlock(&dmxdev->dvr_in_lock);
1141
1142 wake_up_all(&cmdbuf->queue);
1143}
1144
1145static int dvb_dvr_external_input_only(struct dmxdev *dmxdev)
1146{
1147 struct dmx_caps caps;
1148 int is_external_only;
1149 int flags;
1150 size_t tsp_size;
1151
1152 if (dmxdev->demux->get_tsp_size)
1153 tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
1154 else
1155 tsp_size = 188;
1156
1157 /*
1158 * For backward compatibility, default assumes that
1159 * external only buffers are not supported.
1160 */
1161 flags = 0;
1162 if (dmxdev->demux->get_caps) {
1163 dmxdev->demux->get_caps(dmxdev->demux, &caps);
1164
1165 if (tsp_size == 188)
1166 flags = caps.playback_188_tsp.flags;
1167 else
1168 flags = caps.playback_192_tsp.flags;
1169 }
1170
1171 if (!(flags & DMX_BUFFER_INTERNAL_SUPPORT) &&
1172 (flags & DMX_BUFFER_EXTERNAL_SUPPORT))
1173 is_external_only = 1;
1174 else
1175 is_external_only = 0;
1176
1177 return is_external_only;
1178}
1179
1180static int dvb_dvr_verify_buffer_size(struct dmxdev *dmxdev,
1181 unsigned int f_flags,
1182 unsigned long size)
1183{
1184 struct dmx_caps caps;
1185 int tsp_size;
1186
1187 if (!dmxdev->demux->get_caps)
1188 return 1;
1189
1190 if (dmxdev->demux->get_tsp_size)
1191 tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux);
1192 else
1193 tsp_size = 188;
1194
1195 dmxdev->demux->get_caps(dmxdev->demux, &caps);
1196 if ((f_flags & O_ACCMODE) == O_RDONLY)
1197 return (tsp_size == 188 && dvb_dmxdev_verify_buffer_size(size,
1198 caps.recording_188_tsp.max_size,
1199 caps.recording_188_tsp.size_alignment)) ||
1200 (tsp_size == 192 && dvb_dmxdev_verify_buffer_size(size,
1201 caps.recording_192_tsp.max_size,
1202 caps.recording_192_tsp.size_alignment));
1203
1204 return (tsp_size == 188 && dvb_dmxdev_verify_buffer_size(size,
1205 caps.playback_188_tsp.max_size,
1206 caps.playback_188_tsp.size_alignment)) ||
1207 (tsp_size == 192 && dvb_dmxdev_verify_buffer_size(size,
1208 caps.playback_192_tsp.max_size,
1209 caps.playback_192_tsp.size_alignment));
1210}
1211
1212static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
1213 size_t count, loff_t *ppos)
1214{
1215 struct dvb_device *dvbdev = file->private_data;
1216 struct dmxdev *dmxdev = dvbdev->priv;
1217 struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer;
1218 struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
1219 int ret;
1220 size_t todo;
1221 ssize_t free_space;
1222
1223 if (!dmxdev->demux->write)
1224 return -EOPNOTSUPP;
1225
1226 if (!dvb_dvr_verify_buffer_size(dmxdev, file->f_flags, src->size) ||
1227 ((file->f_flags & O_ACCMODE) == O_RDONLY) ||
1228 !src->data || !cmdbuf->data ||
1229 (dvb_dvr_external_input_only(dmxdev) &&
1230 (dmxdev->dvr_input_buffer_mode == DMX_BUFFER_MODE_INTERNAL)))
1231 return -EINVAL;
1232
1233 if ((file->f_flags & O_NONBLOCK) &&
1234 (dvb_ringbuffer_free(src) == 0))
1235 return -EWOULDBLOCK;
1236
1237 ret = 0;
1238 for (todo = count; todo > 0; todo -= ret) {
1239 ret = wait_event_interruptible(src->queue,
1240 (dvb_ringbuffer_free(src)) ||
1241 !src->data || !cmdbuf->data ||
1242 (src->error != 0) || dmxdev->dvr_in_exit);
1243
1244 if (ret < 0)
1245 return ret;
1246
1247 if (mutex_lock_interruptible(&dmxdev->mutex))
1248 return -ERESTARTSYS;
1249
1250 if ((!src->data) || (!cmdbuf->data)) {
1251 mutex_unlock(&dmxdev->mutex);
1252 return 0;
1253 }
1254
1255 if (dmxdev->exit || dmxdev->dvr_in_exit) {
1256 mutex_unlock(&dmxdev->mutex);
1257 return -ENODEV;
1258 }
1259
1260 if (src->error) {
1261 ret = src->error;
1262 dvb_ringbuffer_flush(src);
1263 mutex_unlock(&dmxdev->mutex);
1264 wake_up_all(&src->queue);
1265 return ret;
1266 }
1267
1268 free_space = dvb_ringbuffer_free(src);
1269
1270 if (free_space > todo)
1271 free_space = todo;
1272
1273 ret = dvb_ringbuffer_write_user(src, buf, free_space);
1274
1275 if (ret < 0) {
1276 mutex_unlock(&dmxdev->mutex);
1277 return ret;
1278 }
1279
1280 buf += ret;
1281
1282 dvb_dvr_queue_data_feed(dmxdev, ret);
1283
1284 mutex_unlock(&dmxdev->mutex);
1285 }
1286
1287 return (count - todo) ? (count - todo) : ret;
1288}
1289
1290static int dvb_dmxdev_flush_data(struct dmxdev_filter *filter, size_t length)
1291{
1292 int ret = 0;
1293 unsigned long flags;
1294
1295 struct dvb_ringbuffer *buffer = &filter->buffer;
1296 struct dmxdev_events_queue *events = &filter->events;
1297
1298 if (filter->type == DMXDEV_TYPE_PES &&
1299 filter->params.pes.output == DMX_OUT_TS_TAP) {
1300 buffer = &filter->dev->dvr_buffer;
1301 events = &filter->dev->dvr_output_events;
1302 }
1303
1304 /*
1305 * Drop 'length' pending data bytes from the ringbuffer and update
1306 * event queue accordingly, similarly to dvb_dmxdev_release_data().
1307 */
1308 spin_lock_irqsave(&filter->dev->lock, flags);
1309 DVB_RINGBUFFER_SKIP(buffer, length);
1310 buffer->error = 0;
1311 dvb_dmxdev_flush_events(events);
1312 events->current_event_start_offset = buffer->pwrite;
1313 spin_unlock_irqrestore(&filter->dev->lock, flags);
1314
1315 if (filter->type == DMXDEV_TYPE_PES) {
1316 struct dmxdev_feed *feed;
1317
1318 feed = list_first_entry(&filter->feed.ts,
1319 struct dmxdev_feed, next);
1320
1321 if (feed->ts->flush_buffer)
1322 return feed->ts->flush_buffer(feed->ts, length);
1323 } else if (filter->type == DMXDEV_TYPE_SEC &&
1324 filter->feed.sec.feed->flush_buffer) {
1325 return filter->feed.sec.feed->flush_buffer(
1326 filter->feed.sec.feed, length);
1327 }
1328
1329 return ret;
1330}
1331
1332static inline void dvb_dmxdev_auto_flush_buffer(struct dmxdev_filter *filter,
1333 struct dvb_ringbuffer *buf)
1334{
1335 size_t flush_len;
1336
1337 /*
1338 * When buffer overflowed, demux-dev marked the buffer in
1339 * error state. If auto-flush is enabled discard current
1340 * pending data in buffer.
1341 */
1342 if (overflow_auto_flush) {
1343 flush_len = dvb_ringbuffer_avail(buf);
1344 dvb_dmxdev_flush_data(filter, flush_len);
1345 }
1346}
1347
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001349 loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350{
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301351 ssize_t res;
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07001352 struct dvb_device *dvbdev = file->private_data;
1353 struct dmxdev *dmxdev = dvbdev->priv;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301354 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
Simon Arlotteda9f752009-05-12 17:39:28 -03001356 if (dmxdev->exit)
Markus Rechberger57861b42007-04-14 10:19:18 -03001357 return -ENODEV;
Markus Rechberger57861b42007-04-14 10:19:18 -03001358
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301359 if (!dvb_dvr_verify_buffer_size(dmxdev, file->f_flags,
1360 dmxdev->dvr_buffer.size))
1361 return -EINVAL;
1362
1363 res = dvb_dmxdev_buffer_read(NULL, &dmxdev->dvr_buffer,
1364 file->f_flags & O_NONBLOCK,
1365 buf, count, ppos);
1366
1367 if (res > 0) {
1368 dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, res);
1369 spin_lock_irqsave(&dmxdev->lock, flags);
1370 dvb_dmxdev_update_events(&dmxdev->dvr_output_events, res);
1371 spin_unlock_irqrestore(&dmxdev->lock, flags);
1372
1373 /*
1374 * in PULL mode, we might be stalling on
1375 * event queue, so need to wake-up waiters
1376 */
1377 if (dmxdev->playback_mode == DMX_PB_MODE_PULL)
1378 wake_up_all(&dmxdev->dvr_buffer.queue);
1379 } else if (res == -EOVERFLOW) {
1380 dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed,
1381 &dmxdev->dvr_buffer);
1382 }
1383
1384 return res;
1385}
1386
1387/*
1388 * dvb_dvr_push_oob_cmd
1389 *
1390 * Note: this function assume dmxdev->mutex was taken, so command buffer cannot
1391 * be released during its operation.
1392 */
1393static int dvb_dvr_push_oob_cmd(struct dmxdev *dmxdev, unsigned int f_flags,
1394 struct dmx_oob_command *cmd)
1395{
1396 struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer;
1397 struct dvr_command *dvr_cmd;
1398
1399 if ((f_flags & O_ACCMODE) == O_RDONLY ||
1400 dmxdev->source < DMX_SOURCE_DVR0)
1401 return -EPERM;
1402
1403 if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd))
1404 return -ENOMEM;
1405
1406 dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite];
1407 dvr_cmd->type = DVR_OOB_CMD;
1408 dvr_cmd->cmd.oobcmd = *cmd;
1409 DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd));
1410 wake_up_all(&cmdbuf->queue);
1411
1412 return 0;
1413}
1414
1415static int dvb_dvr_flush_buffer(struct dmxdev *dmxdev, unsigned int f_flags)
1416{
1417 size_t flush_len;
1418 int ret;
1419
1420 if ((f_flags & O_ACCMODE) != O_RDONLY)
1421 return -EINVAL;
1422
1423 flush_len = dvb_ringbuffer_avail(&dmxdev->dvr_buffer);
1424 ret = dvb_dmxdev_flush_data(dmxdev->dvr_feed, flush_len);
1425
1426 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427}
1428
Andrea Odettia095be42008-04-20 19:14:51 -03001429static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301430 unsigned int f_flags,
1431 unsigned long size)
Andrea Odettia095be42008-04-20 19:14:51 -03001432{
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301433 struct dvb_ringbuffer *buf;
Andrea Odettia095be42008-04-20 19:14:51 -03001434 void *newmem;
1435 void *oldmem;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301436 spinlock_t *lock;
1437 enum dmx_buffer_mode buffer_mode;
Andrea Odettia095be42008-04-20 19:14:51 -03001438
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301439 pr_debug("function : %s\n", __func__);
1440
1441 if ((f_flags & O_ACCMODE) == O_RDONLY) {
1442 buf = &dmxdev->dvr_buffer;
1443 lock = &dmxdev->lock;
1444 buffer_mode = dmxdev->dvr_buffer_mode;
1445 } else {
1446 buf = &dmxdev->dvr_input_buffer;
1447 lock = &dmxdev->dvr_in_lock;
1448 buffer_mode = dmxdev->dvr_input_buffer_mode;
1449 }
Andrea Odettia095be42008-04-20 19:14:51 -03001450
1451 if (buf->size == size)
1452 return 0;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301453 if (!size || (buffer_mode == DMX_BUFFER_MODE_EXTERNAL))
Andrea Odettia095be42008-04-20 19:14:51 -03001454 return -EINVAL;
1455
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301456 newmem = vmalloc_user(size);
Andrea Odettia095be42008-04-20 19:14:51 -03001457 if (!newmem)
1458 return -ENOMEM;
1459
1460 oldmem = buf->data;
1461
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301462 spin_lock_irq(lock);
1463
1464 if (((f_flags & O_ACCMODE) != O_RDONLY) &&
1465 (dmxdev->dvr_processing_input)) {
1466 spin_unlock_irq(lock);
1467 vfree(oldmem);
1468 return -EBUSY;
1469 }
1470
Andrea Odettia095be42008-04-20 19:14:51 -03001471 buf->data = newmem;
1472 buf->size = size;
1473
1474 /* reset and not flush in case the buffer shrinks */
1475 dvb_ringbuffer_reset(buf);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301476
1477 spin_unlock_irq(lock);
Andrea Odettia095be42008-04-20 19:14:51 -03001478
1479 vfree(oldmem);
1480
1481 return 0;
1482}
1483
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301484static int dvb_dvr_set_buffer_mode(struct dmxdev *dmxdev,
1485 unsigned int f_flags, enum dmx_buffer_mode mode)
1486{
1487 struct dvb_ringbuffer *buf;
1488 spinlock_t *lock;
1489 enum dmx_buffer_mode *buffer_mode;
1490 void **buff_handle;
1491 void *oldmem;
1492 int *is_protected;
1493
1494 if ((mode != DMX_BUFFER_MODE_INTERNAL) &&
1495 (mode != DMX_BUFFER_MODE_EXTERNAL))
1496 return -EINVAL;
1497
1498 if ((mode == DMX_BUFFER_MODE_EXTERNAL) &&
1499 (!dmxdev->demux->map_buffer || !dmxdev->demux->unmap_buffer))
1500 return -EINVAL;
1501
1502 if ((f_flags & O_ACCMODE) == O_RDONLY) {
1503 buf = &dmxdev->dvr_buffer;
1504 lock = &dmxdev->lock;
1505 buffer_mode = &dmxdev->dvr_buffer_mode;
1506 buff_handle = &dmxdev->dvr_priv_buff_handle;
1507 is_protected = NULL;
1508 } else {
1509 buf = &dmxdev->dvr_input_buffer;
1510 lock = &dmxdev->dvr_in_lock;
1511 buffer_mode = &dmxdev->dvr_input_buffer_mode;
1512 buff_handle = &dmxdev->demux->dvr_input.priv_handle;
1513 is_protected = &dmxdev->demux->dvr_input_protected;
1514 }
1515
1516 if (mode == *buffer_mode)
1517 return 0;
1518
1519 oldmem = buf->data;
1520 spin_lock_irq(lock);
1521 buf->data = NULL;
1522 spin_unlock_irq(lock);
1523
1524 *buffer_mode = mode;
1525
1526 if (mode == DMX_BUFFER_MODE_INTERNAL) {
1527 /* switched from external to internal */
1528 if (*buff_handle) {
1529 dmxdev->demux->unmap_buffer(dmxdev->demux,
1530 *buff_handle);
1531 *buff_handle = NULL;
1532 }
1533
1534 if (is_protected)
1535 *is_protected = 0;
1536
1537 /* set default internal buffer */
1538 dvb_dvr_set_buffer_size(dmxdev, f_flags, DVR_BUFFER_SIZE);
1539 } else if (oldmem) {
1540 /* switched from internal to external */
1541 vfree(oldmem);
1542 }
1543
1544 return 0;
1545}
1546
1547static int dvb_dvr_set_buffer(struct dmxdev *dmxdev,
1548 unsigned int f_flags, struct dmx_buffer *dmx_buffer)
1549{
1550 struct dvb_ringbuffer *buf;
1551 spinlock_t *lock;
1552 enum dmx_buffer_mode buffer_mode;
1553 void **buff_handle;
1554 void *newmem;
1555 void *oldmem;
1556 int *is_protected;
1557 struct dmx_caps caps;
1558
1559 if (dmxdev->demux->get_caps)
1560 dmxdev->demux->get_caps(dmxdev->demux, &caps);
1561 else
1562 caps.caps = 0;
1563
1564 if ((f_flags & O_ACCMODE) == O_RDONLY) {
1565 buf = &dmxdev->dvr_buffer;
1566 lock = &dmxdev->lock;
1567 buffer_mode = dmxdev->dvr_buffer_mode;
1568 buff_handle = &dmxdev->dvr_priv_buff_handle;
1569 is_protected = NULL;
1570 } else {
1571 buf = &dmxdev->dvr_input_buffer;
1572 lock = &dmxdev->dvr_in_lock;
1573 buffer_mode = dmxdev->dvr_input_buffer_mode;
1574 buff_handle = &dmxdev->demux->dvr_input.priv_handle;
1575 is_protected = &dmxdev->demux->dvr_input_protected;
1576 if (!(caps.caps & DMX_CAP_SECURED_INPUT_PLAYBACK) &&
1577 dmx_buffer->is_protected)
1578 return -EINVAL;
1579 }
1580
1581 if (!dmx_buffer->size ||
1582 (buffer_mode == DMX_BUFFER_MODE_INTERNAL))
1583 return -EINVAL;
1584
1585 oldmem = *buff_handle;
1586
1587 /*
1588 * Protected buffer is relevant only for DVR input buffer
1589 * when DVR device is opened for write. In such case,
1590 * buffer is mapped only if the buffer is not protected one.
1591 */
1592 if (!is_protected || !dmx_buffer->is_protected) {
1593 if (dmxdev->demux->map_buffer(dmxdev->demux, dmx_buffer,
1594 buff_handle, &newmem))
1595 return -ENOMEM;
1596 } else {
1597 newmem = NULL;
1598 *buff_handle = NULL;
1599 }
1600
1601 spin_lock_irq(lock);
1602 buf->data = newmem;
1603 buf->size = dmx_buffer->size;
1604 if (is_protected)
1605 *is_protected = dmx_buffer->is_protected;
1606 dvb_ringbuffer_reset(buf);
1607 spin_unlock_irq(lock);
1608
1609 if (oldmem)
1610 dmxdev->demux->unmap_buffer(dmxdev->demux, oldmem);
1611
1612 return 0;
1613}
1614
1615static int dvb_dvr_get_event(struct dmxdev *dmxdev,
1616 unsigned int f_flags,
1617 struct dmx_filter_event *event)
1618{
1619 int res = 0;
1620
1621 if (!((f_flags & O_ACCMODE) == O_RDONLY))
1622 return -EINVAL;
1623
1624 spin_lock_irq(&dmxdev->lock);
1625
1626 if (dmxdev->dvr_buffer.error == -EOVERFLOW) {
1627 event->type = DMX_EVENT_BUFFER_OVERFLOW;
1628 dmxdev->dvr_buffer.error = 0;
1629 } else {
1630 res = dvb_dmxdev_remove_event(&dmxdev->dvr_output_events,
1631 event);
1632 if (res) {
1633 spin_unlock_irq(&dmxdev->lock);
1634 return res;
1635 }
1636 }
1637
1638 spin_unlock_irq(&dmxdev->lock);
1639
1640 if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
1641 dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed,
1642 &dmxdev->dvr_buffer);
1643
1644 /*
1645 * in PULL mode, we might be stalling on
1646 * event queue, so need to wake-up waiters
1647 */
1648 if (dmxdev->playback_mode == DMX_PB_MODE_PULL)
1649 wake_up_all(&dmxdev->dvr_buffer.queue);
1650
1651 return res;
1652}
1653
1654static int dvb_dvr_get_buffer_status(struct dmxdev *dmxdev,
1655 unsigned int f_flags,
1656 struct dmx_buffer_status *dmx_buffer_status)
1657{
1658 struct dvb_ringbuffer *buf;
1659 spinlock_t *lock;
1660
1661 if ((f_flags & O_ACCMODE) == O_RDONLY) {
1662 buf = &dmxdev->dvr_buffer;
1663 lock = &dmxdev->lock;
1664 } else {
1665 buf = &dmxdev->dvr_input_buffer;
1666 lock = &dmxdev->dvr_in_lock;
1667 }
1668
1669 spin_lock_irq(lock);
1670
1671 dmx_buffer_status->error = buf->error;
1672 dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
1673 dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
1674 dmx_buffer_status->read_offset = buf->pread;
1675 dmx_buffer_status->write_offset = buf->pwrite;
1676 dmx_buffer_status->size = buf->size;
1677 buf->error = 0;
1678
1679 spin_unlock_irq(lock);
1680
1681 if (dmx_buffer_status->error == -EOVERFLOW)
1682 dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed, buf);
1683
1684 return 0;
1685}
1686
1687static int dvb_dvr_release_data(struct dmxdev *dmxdev,
1688 unsigned int f_flags,
1689 u32 bytes_count)
1690{
1691 ssize_t buff_fullness;
1692
1693 if (!((f_flags & O_ACCMODE) == O_RDONLY))
1694 return -EINVAL;
1695
1696 if (!bytes_count)
1697 return 0;
1698
1699 buff_fullness = dvb_ringbuffer_avail(&dmxdev->dvr_buffer);
1700
1701 if (bytes_count > buff_fullness)
1702 return -EINVAL;
1703
1704 DVB_RINGBUFFER_SKIP(&dmxdev->dvr_buffer, bytes_count);
1705
1706 dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, bytes_count);
1707 spin_lock_irq(&dmxdev->lock);
1708 dvb_dmxdev_update_events(&dmxdev->dvr_output_events, bytes_count);
1709 spin_unlock_irq(&dmxdev->lock);
1710
1711 wake_up_all(&dmxdev->dvr_buffer.queue);
1712 return 0;
1713}
1714
1715/*
1716 * dvb_dvr_feed_data - Notify new data in DVR input buffer
1717 *
1718 * @dmxdev - demux device instance
1719 * @f_flags - demux device file flag (access mode)
1720 * @bytes_count - how many bytes were written to the input buffer
1721 *
1722 * Note: this function assume dmxdev->mutex was taken, so buffer cannot
1723 * be released during its operation.
1724 */
1725static int dvb_dvr_feed_data(struct dmxdev *dmxdev,
1726 unsigned int f_flags,
1727 u32 bytes_count)
1728{
1729 ssize_t free_space;
1730 struct dvb_ringbuffer *buffer = &dmxdev->dvr_input_buffer;
1731
1732 if ((f_flags & O_ACCMODE) == O_RDONLY)
1733 return -EINVAL;
1734
1735 if (!bytes_count)
1736 return 0;
1737
1738 free_space = dvb_ringbuffer_free(buffer);
1739
1740 if (bytes_count > free_space)
1741 return -EINVAL;
1742
1743 DVB_RINGBUFFER_PUSH(buffer, bytes_count);
1744
1745 dvb_dvr_queue_data_feed(dmxdev, bytes_count);
1746
1747 return 0;
1748}
1749
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001750static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter
1751 *dmxdevfilter, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752{
1753 spin_lock_irq(&dmxdevfilter->dev->lock);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001754 dmxdevfilter->state = state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 spin_unlock_irq(&dmxdevfilter->dev->lock);
1756}
1757
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001758static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter,
1759 unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760{
Andreas Oberritter34731df2006-03-14 17:31:01 -03001761 struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
Andrea Odettia095be42008-04-20 19:14:51 -03001762 void *newmem;
1763 void *oldmem;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001765 if (buf->size == size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 return 0;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301767 if (!size ||
1768 (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL))
Andrea Odettia095be42008-04-20 19:14:51 -03001769 return -EINVAL;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001770 if (dmxdevfilter->state >= DMXDEV_STATE_GO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 return -EBUSY;
Andrea Odettia095be42008-04-20 19:14:51 -03001772
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301773 newmem = vmalloc_user(size);
Andrea Odettia095be42008-04-20 19:14:51 -03001774 if (!newmem)
1775 return -ENOMEM;
1776
1777 oldmem = buf->data;
1778
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 spin_lock_irq(&dmxdevfilter->dev->lock);
Andrea Odettia095be42008-04-20 19:14:51 -03001780 buf->data = newmem;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03001781 buf->size = size;
Andrea Odetti48c01a92008-04-20 18:37:45 -03001782
1783 /* reset and not flush in case the buffer shrinks */
1784 dvb_ringbuffer_reset(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 spin_unlock_irq(&dmxdevfilter->dev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
Andrea Odettia095be42008-04-20 19:14:51 -03001787 vfree(oldmem);
1788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 return 0;
1790}
1791
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05301792static int dvb_dmxdev_set_buffer_mode(struct dmxdev_filter *dmxdevfilter,
1793 enum dmx_buffer_mode mode)
1794{
1795 struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
1796 struct dmxdev *dmxdev = dmxdevfilter->dev;
1797 void *oldmem;
1798
1799 if (dmxdevfilter->state >= DMXDEV_STATE_GO)
1800 return -EBUSY;
1801
1802 if ((mode != DMX_BUFFER_MODE_INTERNAL) &&
1803 (mode != DMX_BUFFER_MODE_EXTERNAL))
1804 return -EINVAL;
1805
1806 if ((mode == DMX_BUFFER_MODE_EXTERNAL) &&
1807 (!dmxdev->demux->map_buffer || !dmxdev->demux->unmap_buffer))
1808 return -EINVAL;
1809
1810 if (mode == dmxdevfilter->buffer_mode)
1811 return 0;
1812
1813 oldmem = buf->data;
1814 spin_lock_irq(&dmxdevfilter->dev->lock);
1815 buf->data = NULL;
1816 spin_unlock_irq(&dmxdevfilter->dev->lock);
1817
1818 dmxdevfilter->buffer_mode = mode;
1819
1820 if (mode == DMX_BUFFER_MODE_INTERNAL) {
1821 /* switched from external to internal */
1822 if (dmxdevfilter->priv_buff_handle) {
1823 dmxdev->demux->unmap_buffer(dmxdev->demux,
1824 dmxdevfilter->priv_buff_handle);
1825 dmxdevfilter->priv_buff_handle = NULL;
1826 }
1827 } else if (oldmem) {
1828 /* switched from internal to external */
1829 vfree(oldmem);
1830 }
1831
1832 return 0;
1833}
1834
1835static int dvb_dmxdev_set_buffer(struct dmxdev_filter *dmxdevfilter,
1836 struct dmx_buffer *buffer)
1837{
1838 struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
1839 struct dmxdev *dmxdev = dmxdevfilter->dev;
1840 void *newmem;
1841 void *oldmem;
1842
1843 if (dmxdevfilter->state >= DMXDEV_STATE_GO)
1844 return -EBUSY;
1845
1846 if ((!buffer->size) ||
1847 (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_INTERNAL))
1848 return -EINVAL;
1849
1850 oldmem = dmxdevfilter->priv_buff_handle;
1851 if (dmxdev->demux->map_buffer(dmxdev->demux, buffer,
1852 &dmxdevfilter->priv_buff_handle, &newmem))
1853 return -ENOMEM;
1854
1855 spin_lock_irq(&dmxdevfilter->dev->lock);
1856 buf->data = newmem;
1857 buf->size = buffer->size;
1858 dvb_ringbuffer_reset(buf);
1859 spin_unlock_irq(&dmxdevfilter->dev->lock);
1860
1861 if (oldmem)
1862 dmxdev->demux->unmap_buffer(dmxdev->demux, oldmem);
1863
1864 return 0;
1865}
1866
1867static int dvb_dmxdev_set_tsp_out_format(struct dmxdev_filter *dmxdevfilter,
1868 enum dmx_tsp_format_t dmx_tsp_format)
1869{
1870 if (dmxdevfilter->state >= DMXDEV_STATE_GO)
1871 return -EBUSY;
1872
1873 if ((dmx_tsp_format > DMX_TSP_FORMAT_192_HEAD) ||
1874 (dmx_tsp_format < DMX_TSP_FORMAT_188))
1875 return -EINVAL;
1876
1877 dmxdevfilter->dmx_tsp_format = dmx_tsp_format;
1878
1879 return 0;
1880}
1881
1882static int dvb_dmxdev_set_decoder_buffer_size(
1883 struct dmxdev_filter *dmxdevfilter,
1884 unsigned long size)
1885{
1886 struct dmx_caps caps;
1887 struct dmx_demux *demux = dmxdevfilter->dev->demux;
1888
1889 if (demux->get_caps) {
1890 demux->get_caps(demux, &caps);
1891 if (!dvb_dmxdev_verify_buffer_size(size, caps.decoder.max_size,
1892 caps.decoder.size_alignment))
1893 return -EINVAL;
1894 }
1895
1896 if (size == 0)
1897 return -EINVAL;
1898
1899 if (dmxdevfilter->decoder_buffers.buffers_size == size)
1900 return 0;
1901
1902 if (dmxdevfilter->state >= DMXDEV_STATE_GO)
1903 return -EBUSY;
1904
1905 /*
1906 * In case decoder buffers were already set before to some external
1907 * buffers, setting the decoder buffer size alone implies transition
1908 * to internal buffer mode.
1909 */
1910 dmxdevfilter->decoder_buffers.buffers_size = size;
1911 dmxdevfilter->decoder_buffers.buffers_num = 0;
1912 dmxdevfilter->decoder_buffers.is_linear = 0;
1913 return 0;
1914}
1915
1916static int dvb_dmxdev_set_source(struct dmxdev_filter *dmxdevfilter,
1917 dmx_source_t *source)
1918{
1919 int ret = 0;
1920 struct dmxdev *dev;
1921
1922 if (dmxdevfilter->state == DMXDEV_STATE_GO)
1923 return -EBUSY;
1924
1925 dev = dmxdevfilter->dev;
1926 if (dev->demux->set_source)
1927 ret = dev->demux->set_source(dev->demux, source);
1928
1929 if (!ret)
1930 dev->source = *source;
1931
1932 return ret;
1933}
1934
1935static int dvb_dmxdev_reuse_decoder_buf(struct dmxdev_filter *dmxdevfilter,
1936 int cookie)
1937{
1938 struct dmxdev_feed *feed;
1939
1940 if (dmxdevfilter->state != DMXDEV_STATE_GO ||
1941 (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
1942 (dmxdevfilter->params.pes.output != DMX_OUT_DECODER) ||
1943 (dmxdevfilter->events.event_mask.disable_mask &
1944 DMX_EVENT_NEW_ES_DATA))
1945 return -EPERM;
1946
1947 /* Only one feed should be in the list in case of decoder */
1948 feed = list_first_entry(&dmxdevfilter->feed.ts,
1949 struct dmxdev_feed, next);
1950 if (feed && feed->ts && feed->ts->reuse_decoder_buffer)
1951 return feed->ts->reuse_decoder_buffer(feed->ts, cookie);
1952
1953 return -ENODEV;
1954}
1955
1956static int dvb_dmxdev_set_event_mask(struct dmxdev_filter *dmxdevfilter,
1957 struct dmx_events_mask *event_mask)
1958{
1959 if (!event_mask ||
1960 (event_mask->wakeup_threshold >= DMX_EVENT_QUEUE_SIZE))
1961 return -EINVAL;
1962
1963 if (dmxdevfilter->state == DMXDEV_STATE_GO)
1964 return -EBUSY;
1965
1966 /*
1967 * Overflow event is not allowed to be masked.
1968 * This is because if overflow occurs, demux stops outputting data
1969 * until user is notified. If user is using events to read the data,
1970 * the overflow event must be always enabled or otherwise we would
1971 * never recover from overflow state.
1972 */
1973 event_mask->disable_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
1974 event_mask->no_wakeup_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW;
1975
1976 dmxdevfilter->events.event_mask = *event_mask;
1977
1978 return 0;
1979}
1980
1981static int dvb_dmxdev_get_event_mask(struct dmxdev_filter *dmxdevfilter,
1982 struct dmx_events_mask *event_mask)
1983{
1984 if (!event_mask)
1985 return -EINVAL;
1986
1987 *event_mask = dmxdevfilter->events.event_mask;
1988
1989 return 0;
1990}
1991
1992static int dvb_dmxdev_set_indexing_params(struct dmxdev_filter *dmxdevfilter,
1993 struct dmx_indexing_params *idx_params)
1994{
1995 int found_pid;
1996 struct dmxdev_feed *feed;
1997 struct dmxdev_feed *ts_feed = NULL;
1998 struct dmx_caps caps;
1999 int ret = 0;
2000
2001 if (!dmxdevfilter->dev->demux->get_caps)
2002 return -EINVAL;
2003
2004 dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
2005
2006 if (!idx_params ||
2007 !(caps.caps & DMX_CAP_VIDEO_INDEXING) ||
2008 (dmxdevfilter->state < DMXDEV_STATE_SET) ||
2009 (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
2010 ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
2011 (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
2012 return -EINVAL;
2013
2014 if (idx_params->enable && !idx_params->types)
2015 return -EINVAL;
2016
2017 found_pid = 0;
2018 list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
2019 if (feed->pid == idx_params->pid) {
2020 found_pid = 1;
2021 ts_feed = feed;
2022 ts_feed->idx_params = *idx_params;
2023 if ((dmxdevfilter->state == DMXDEV_STATE_GO) &&
2024 ts_feed->ts->set_idx_params)
2025 ret = ts_feed->ts->set_idx_params(
2026 ts_feed->ts, idx_params);
2027 break;
2028 }
2029 }
2030
2031 if (!found_pid)
2032 return -EINVAL;
2033
2034 return ret;
2035}
2036
2037static int dvb_dmxdev_get_scrambling_bits(struct dmxdev_filter *filter,
2038 struct dmx_scrambling_bits *scrambling_bits)
2039{
2040 struct dmxdev_feed *feed;
2041
2042 if (!scrambling_bits ||
2043 (filter->state != DMXDEV_STATE_GO))
2044 return -EINVAL;
2045
2046 if (filter->type == DMXDEV_TYPE_SEC) {
2047 if (filter->feed.sec.feed->get_scrambling_bits)
2048 return filter->feed.sec.feed->get_scrambling_bits(
2049 filter->feed.sec.feed,
2050 &scrambling_bits->value);
2051 return -EINVAL;
2052 }
2053
2054 list_for_each_entry(feed, &filter->feed.ts, next) {
2055 if (feed->pid == scrambling_bits->pid) {
2056 if (feed->ts->get_scrambling_bits)
2057 return feed->ts->get_scrambling_bits(feed->ts,
2058 &scrambling_bits->value);
2059 return -EINVAL;
2060 }
2061 }
2062
2063 return -EINVAL;
2064}
2065
2066static void dvb_dmxdev_ts_insertion_work(struct work_struct *worker)
2067{
2068 struct ts_insertion_buffer *ts_buffer =
2069 container_of(to_delayed_work(worker),
2070 struct ts_insertion_buffer, dwork);
2071 struct dmxdev_feed *feed;
2072 size_t free_bytes;
2073 struct dmx_ts_feed *ts;
2074
2075 mutex_lock(&ts_buffer->dmxdevfilter->mutex);
2076
2077 if (ts_buffer->abort ||
2078 (ts_buffer->dmxdevfilter->state != DMXDEV_STATE_GO)) {
2079 mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
2080 return;
2081 }
2082
2083 feed = list_first_entry(&ts_buffer->dmxdevfilter->feed.ts,
2084 struct dmxdev_feed, next);
2085 ts = feed->ts;
2086 free_bytes = dvb_ringbuffer_free(&ts_buffer->dmxdevfilter->buffer);
2087
2088 mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
2089
2090 if (ts_buffer->size < free_bytes)
2091 ts->ts_insertion_insert_buffer(ts,
2092 ts_buffer->buffer, ts_buffer->size);
2093
2094 if (ts_buffer->repetition_time && !ts_buffer->abort)
2095 schedule_delayed_work(&ts_buffer->dwork,
2096 msecs_to_jiffies(ts_buffer->repetition_time));
2097}
2098
2099static void dvb_dmxdev_queue_ts_insertion(
2100 struct ts_insertion_buffer *ts_buffer)
2101{
2102 size_t tsp_size;
2103
2104 if (ts_buffer->dmxdevfilter->dmx_tsp_format == DMX_TSP_FORMAT_188)
2105 tsp_size = 188;
2106 else
2107 tsp_size = 192;
2108
2109 if (ts_buffer->size % tsp_size) {
2110 pr_err("%s: Wrong buffer alignment, size=%zu, tsp_size=%zu\n",
2111 __func__, ts_buffer->size, tsp_size);
2112 return;
2113 }
2114
2115 ts_buffer->abort = 0;
2116 schedule_delayed_work(&ts_buffer->dwork, 0);
2117}
2118
2119static void dvb_dmxdev_cancel_ts_insertion(
2120 struct ts_insertion_buffer *ts_buffer)
2121{
2122 /*
2123 * This function assumes it is called while mutex
2124 * of demux filter is taken. Since work in workqueue
2125 * captures the filter's mutex to protect against the DB,
2126 * mutex needs to be released before waiting for the work
2127 * to get finished otherwise work in workqueue will
2128 * never be finished.
2129 */
2130 if (!mutex_is_locked(&ts_buffer->dmxdevfilter->mutex)) {
2131 pr_err("%s: mutex is not locked!\n", __func__);
2132 return;
2133 }
2134
2135 ts_buffer->abort = 1;
2136
2137 mutex_unlock(&ts_buffer->dmxdevfilter->mutex);
2138 cancel_delayed_work_sync(&ts_buffer->dwork);
2139 mutex_lock(&ts_buffer->dmxdevfilter->mutex);
2140}
2141
2142static int dvb_dmxdev_set_ts_insertion(struct dmxdev_filter *dmxdevfilter,
2143 struct dmx_set_ts_insertion *params)
2144{
2145 int ret = 0;
2146 int first_buffer;
2147 struct dmxdev_feed *feed;
2148 struct ts_insertion_buffer *ts_buffer;
2149 struct dmx_caps caps;
2150
2151 if (!dmxdevfilter->dev->demux->get_caps)
2152 return -EINVAL;
2153
2154 dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
2155
2156 if (!params ||
2157 !params->size ||
2158 !(caps.caps & DMX_CAP_TS_INSERTION) ||
2159 (dmxdevfilter->state < DMXDEV_STATE_SET) ||
2160 (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
2161 ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
2162 (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
2163 return -EINVAL;
2164
2165 ts_buffer = vmalloc(sizeof(struct ts_insertion_buffer));
2166 if (!ts_buffer)
2167 return -ENOMEM;
2168
2169 ts_buffer->buffer = vmalloc(params->size);
2170 if (!ts_buffer->buffer) {
2171 vfree(ts_buffer);
2172 return -ENOMEM;
2173 }
2174
2175 if (copy_from_user(ts_buffer->buffer,
2176 params->ts_packets, params->size)) {
2177 vfree(ts_buffer->buffer);
2178 vfree(ts_buffer);
2179 return -EFAULT;
2180 }
2181
2182 if (params->repetition_time &&
2183 params->repetition_time < DMX_MIN_INSERTION_REPETITION_TIME)
2184 params->repetition_time = DMX_MIN_INSERTION_REPETITION_TIME;
2185
2186 ts_buffer->size = params->size;
2187 ts_buffer->identifier = params->identifier;
2188 ts_buffer->repetition_time = params->repetition_time;
2189 ts_buffer->dmxdevfilter = dmxdevfilter;
2190 INIT_DELAYED_WORK(&ts_buffer->dwork, dvb_dmxdev_ts_insertion_work);
2191
2192 first_buffer = list_empty(&dmxdevfilter->insertion_buffers);
2193 list_add_tail(&ts_buffer->next, &dmxdevfilter->insertion_buffers);
2194
2195 if (dmxdevfilter->state != DMXDEV_STATE_GO)
2196 return 0;
2197
2198 feed = list_first_entry(&dmxdevfilter->feed.ts,
2199 struct dmxdev_feed, next);
2200
2201 if (first_buffer && feed->ts->ts_insertion_init)
2202 ret = feed->ts->ts_insertion_init(feed->ts);
2203
2204 if (!ret) {
2205 dvb_dmxdev_queue_ts_insertion(ts_buffer);
2206 } else {
2207 list_del(&ts_buffer->next);
2208 vfree(ts_buffer->buffer);
2209 vfree(ts_buffer);
2210 }
2211
2212 return ret;
2213}
2214
2215static int dvb_dmxdev_abort_ts_insertion(struct dmxdev_filter *dmxdevfilter,
2216 struct dmx_abort_ts_insertion *params)
2217{
2218 int ret = 0;
2219 int found_buffer;
2220 struct dmxdev_feed *feed;
2221 struct ts_insertion_buffer *ts_buffer, *tmp;
2222 struct dmx_caps caps;
2223
2224 if (!dmxdevfilter->dev->demux->get_caps)
2225 return -EINVAL;
2226
2227 dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps);
2228
2229 if (!params ||
2230 !(caps.caps & DMX_CAP_TS_INSERTION) ||
2231 (dmxdevfilter->state < DMXDEV_STATE_SET) ||
2232 (dmxdevfilter->type != DMXDEV_TYPE_PES) ||
2233 ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) &&
2234 (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP)))
2235 return -EINVAL;
2236
2237 found_buffer = 0;
2238 list_for_each_entry_safe(ts_buffer, tmp,
2239 &dmxdevfilter->insertion_buffers, next) {
2240 if (ts_buffer->identifier == params->identifier) {
2241 list_del(&ts_buffer->next);
2242 found_buffer = 1;
2243 break;
2244 }
2245 }
2246
2247 if (!found_buffer)
2248 return -EINVAL;
2249
2250 if (dmxdevfilter->state == DMXDEV_STATE_GO) {
2251 dvb_dmxdev_cancel_ts_insertion(ts_buffer);
2252 if (list_empty(&dmxdevfilter->insertion_buffers)) {
2253 feed = list_first_entry(&dmxdevfilter->feed.ts,
2254 struct dmxdev_feed, next);
2255 if (feed->ts->ts_insertion_terminate)
2256 ret = feed->ts->ts_insertion_terminate(
2257 feed->ts);
2258 }
2259 }
2260
2261 vfree(ts_buffer->buffer);
2262 vfree(ts_buffer);
2263
2264 return ret;
2265}
2266
2267static int dvb_dmxdev_ts_fullness_callback(struct dmx_ts_feed *filter,
2268 int required_space, int wait)
2269{
2270 struct dmxdev_filter *dmxdevfilter = filter->priv;
2271 struct dvb_ringbuffer *src;
2272 struct dmxdev_events_queue *events;
2273 int ret;
2274
2275 if (!dmxdevfilter) {
2276 pr_err("%s: NULL demux filter object!\n", __func__);
2277 return -ENODEV;
2278 }
2279
2280 if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
2281 src = &dmxdevfilter->buffer;
2282 events = &dmxdevfilter->events;
2283 } else {
2284 src = &dmxdevfilter->dev->dvr_buffer;
2285 events = &dmxdevfilter->dev->dvr_output_events;
2286 }
2287
2288 do {
2289 ret = 0;
2290
2291 if (dmxdevfilter->dev->dvr_in_exit)
2292 return -ENODEV;
2293
2294 spin_lock(&dmxdevfilter->dev->lock);
2295
2296 if ((!src->data) ||
2297 (dmxdevfilter->state != DMXDEV_STATE_GO))
2298 ret = -EINVAL;
2299 else if (src->error)
2300 ret = src->error;
2301
2302 if (ret) {
2303 spin_unlock(&dmxdevfilter->dev->lock);
2304 return ret;
2305 }
2306
2307 if ((required_space <= dvb_ringbuffer_free(src)) &&
2308 (!dvb_dmxdev_events_is_full(events))) {
2309 spin_unlock(&dmxdevfilter->dev->lock);
2310 return 0;
2311 }
2312
2313 spin_unlock(&dmxdevfilter->dev->lock);
2314
2315 if (!wait)
2316 return -ENOSPC;
2317
2318 ret = wait_event_interruptible(src->queue,
2319 (!src->data) ||
2320 ((dvb_ringbuffer_free(src) >= required_space) &&
2321 (!dvb_dmxdev_events_is_full(events))) ||
2322 (src->error != 0) ||
2323 (dmxdevfilter->state != DMXDEV_STATE_GO) ||
2324 dmxdevfilter->dev->dvr_in_exit);
2325
2326 if (ret < 0)
2327 return ret;
2328 } while (1);
2329}
2330
2331static int dvb_dmxdev_sec_fullness_callback(
2332 struct dmx_section_filter *filter,
2333 int required_space, int wait)
2334{
2335 struct dmxdev_filter *dmxdevfilter = filter->priv;
2336 struct dvb_ringbuffer *src;
2337 struct dmxdev_events_queue *events;
2338 int ret;
2339
2340 if (!dmxdevfilter) {
2341 pr_err("%s: NULL demux filter object!\n", __func__);
2342 return -ENODEV;
2343 }
2344
2345 src = &dmxdevfilter->buffer;
2346 events = &dmxdevfilter->events;
2347
2348 do {
2349 ret = 0;
2350
2351 if (dmxdevfilter->dev->dvr_in_exit)
2352 return -ENODEV;
2353
2354 spin_lock(&dmxdevfilter->dev->lock);
2355
2356 if ((!src->data) ||
2357 (dmxdevfilter->state != DMXDEV_STATE_GO))
2358 ret = -EINVAL;
2359 else if (src->error)
2360 ret = src->error;
2361
2362 if (ret) {
2363 spin_unlock(&dmxdevfilter->dev->lock);
2364 return ret;
2365 }
2366
2367 if ((required_space <= dvb_ringbuffer_free(src)) &&
2368 (!dvb_dmxdev_events_is_full(events))) {
2369 spin_unlock(&dmxdevfilter->dev->lock);
2370 return 0;
2371 }
2372
2373 spin_unlock(&dmxdevfilter->dev->lock);
2374
2375 if (!wait)
2376 return -ENOSPC;
2377
2378 ret = wait_event_interruptible(src->queue,
2379 (!src->data) ||
2380 ((dvb_ringbuffer_free(src) >= required_space) &&
2381 (!dvb_dmxdev_events_is_full(events))) ||
2382 (src->error != 0) ||
2383 (dmxdevfilter->state != DMXDEV_STATE_GO) ||
2384 dmxdevfilter->dev->dvr_in_exit);
2385
2386 if (ret < 0)
2387 return ret;
2388 } while (1);
2389}
2390
2391static int dvb_dmxdev_set_playback_mode(struct dmxdev_filter *dmxdevfilter,
2392 enum dmx_playback_mode_t playback_mode)
2393{
2394 struct dmxdev *dmxdev = dmxdevfilter->dev;
2395 struct dmx_caps caps;
2396
2397 if (dmxdev->demux->get_caps)
2398 dmxdev->demux->get_caps(dmxdev->demux, &caps);
2399 else
2400 caps.caps = 0;
2401
2402 if ((playback_mode != DMX_PB_MODE_PUSH) &&
2403 (playback_mode != DMX_PB_MODE_PULL))
2404 return -EINVAL;
2405
2406 if (dmxdev->demux->set_playback_mode == NULL)
2407 return -EINVAL;
2408
2409 if (((dmxdev->source < DMX_SOURCE_DVR0) ||
2410 !(caps.caps & DMX_CAP_PULL_MODE)) &&
2411 (playback_mode == DMX_PB_MODE_PULL))
2412 return -EPERM;
2413
2414 if (dmxdevfilter->state == DMXDEV_STATE_GO)
2415 return -EBUSY;
2416
2417 dmxdev->playback_mode = playback_mode;
2418
2419 return dmxdev->demux->set_playback_mode(
2420 dmxdev->demux,
2421 dmxdev->playback_mode,
2422 dvb_dmxdev_ts_fullness_callback,
2423 dvb_dmxdev_sec_fullness_callback);
2424}
2425
2426static int dvb_dmxdev_flush_buffer(struct dmxdev_filter *filter)
2427{
2428 size_t flush_len;
2429 int ret;
2430
2431 if (filter->state != DMXDEV_STATE_GO)
2432 return -EINVAL;
2433
2434 flush_len = dvb_ringbuffer_avail(&filter->buffer);
2435 ret = dvb_dmxdev_flush_data(filter, flush_len);
2436
2437 return ret;
2438}
2439
2440static int dvb_dmxdev_get_buffer_status(
2441 struct dmxdev_filter *dmxdevfilter,
2442 struct dmx_buffer_status *dmx_buffer_status)
2443{
2444 struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
2445
2446 /*
2447 * Note: Taking the dmxdevfilter->dev->lock spinlock is required only
2448 * when getting the status of the Demux-userspace data ringbuffer .
2449 * In case we are getting the status of a decoder buffer, taking this
2450 * spinlock is not required and in fact might lead to a deadlock.
2451 */
2452 if ((dmxdevfilter->type == DMXDEV_TYPE_PES) &&
2453 (dmxdevfilter->params.pes.output == DMX_OUT_DECODER)) {
2454 struct dmxdev_feed *feed;
2455 int ret;
2456
2457 /* Only one feed should be in the list in case of decoder */
2458 feed = list_first_entry(&dmxdevfilter->feed.ts,
2459 struct dmxdev_feed, next);
2460
2461 /* Ask for status of decoder's buffer from underlying HW */
2462 if (feed->ts->get_decoder_buff_status)
2463 ret = feed->ts->get_decoder_buff_status(
2464 feed->ts,
2465 dmx_buffer_status);
2466 else
2467 ret = -ENODEV;
2468
2469 return ret;
2470 }
2471
2472 spin_lock_irq(&dmxdevfilter->dev->lock);
2473
2474 if (!buf->data) {
2475 spin_unlock_irq(&dmxdevfilter->dev->lock);
2476 return -EINVAL;
2477 }
2478
2479 dmx_buffer_status->error = buf->error;
2480 dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
2481 dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
2482 dmx_buffer_status->read_offset = buf->pread;
2483 dmx_buffer_status->write_offset = buf->pwrite;
2484 dmx_buffer_status->size = buf->size;
2485 buf->error = 0;
2486
2487 spin_unlock_irq(&dmxdevfilter->dev->lock);
2488
2489 if (dmx_buffer_status->error == -EOVERFLOW)
2490 dvb_dmxdev_auto_flush_buffer(dmxdevfilter, buf);
2491
2492 return 0;
2493}
2494
2495static int dvb_dmxdev_release_data(struct dmxdev_filter *dmxdevfilter,
2496 u32 bytes_count)
2497{
2498 ssize_t buff_fullness;
2499
2500 if (!dmxdevfilter->buffer.data)
2501 return -EINVAL;
2502
2503 if (!bytes_count)
2504 return 0;
2505
2506 buff_fullness = dvb_ringbuffer_avail(&dmxdevfilter->buffer);
2507
2508 if (bytes_count > buff_fullness)
2509 return -EINVAL;
2510
2511 DVB_RINGBUFFER_SKIP(&dmxdevfilter->buffer, bytes_count);
2512
2513 dvb_dmxdev_notify_data_read(dmxdevfilter, bytes_count);
2514 spin_lock_irq(&dmxdevfilter->dev->lock);
2515 dvb_dmxdev_update_events(&dmxdevfilter->events, bytes_count);
2516 spin_unlock_irq(&dmxdevfilter->dev->lock);
2517
2518 wake_up_all(&dmxdevfilter->buffer.queue);
2519
2520 return 0;
2521}
2522
2523static int dvb_dmxdev_get_event(struct dmxdev_filter *dmxdevfilter,
2524 struct dmx_filter_event *event)
2525{
2526 int res = 0;
2527
2528 spin_lock_irq(&dmxdevfilter->dev->lock);
2529
2530 /* Check first for filter overflow */
2531 if (dmxdevfilter->buffer.error == -EOVERFLOW) {
2532 event->type = DMX_EVENT_BUFFER_OVERFLOW;
2533 } else {
2534 res = dvb_dmxdev_remove_event(&dmxdevfilter->events, event);
2535 if (res) {
2536 spin_unlock_irq(&dmxdevfilter->dev->lock);
2537 return res;
2538 }
2539 }
2540
2541 /* clear buffer error now that user was notified */
2542 if (event->type == DMX_EVENT_BUFFER_OVERFLOW ||
2543 event->type == DMX_EVENT_SECTION_TIMEOUT)
2544 dmxdevfilter->buffer.error = 0;
2545
2546 spin_unlock_irq(&dmxdevfilter->dev->lock);
2547
2548 if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
2549 dvb_dmxdev_auto_flush_buffer(dmxdevfilter,
2550 &dmxdevfilter->buffer);
2551
2552 spin_lock_irq(&dmxdevfilter->dev->lock);
2553
2554 /*
2555 * If no-data events are enabled on this filter,
2556 * the events can be removed from the queue when
2557 * user gets them.
2558 * For filters with data events enabled, the event is removed
2559 * from the queue only when the respective data is read.
2560 */
2561 if (event->type != DMX_EVENT_BUFFER_OVERFLOW &&
2562 dmxdevfilter->events.data_read_event_masked)
2563 dmxdevfilter->events.read_index =
2564 dvb_dmxdev_advance_event_idx(
2565 dmxdevfilter->events.read_index);
2566
2567 spin_unlock_irq(&dmxdevfilter->dev->lock);
2568
2569 /*
2570 * in PULL mode, we might be stalling on
2571 * event queue, so need to wake-up waiters
2572 */
2573 if (dmxdevfilter->dev->playback_mode == DMX_PB_MODE_PULL)
2574 wake_up_all(&dmxdevfilter->buffer.queue);
2575
2576 return res;
2577}
2578
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579static void dvb_dmxdev_filter_timeout(unsigned long data)
2580{
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002581 struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302582 struct dmx_filter_event event;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002584 dmxdevfilter->buffer.error = -ETIMEDOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 spin_lock_irq(&dmxdevfilter->dev->lock);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002586 dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302587 event.type = DMX_EVENT_SECTION_TIMEOUT;
2588 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 spin_unlock_irq(&dmxdevfilter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302590 wake_up_all(&dmxdevfilter->buffer.queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591}
2592
2593static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
2594{
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002595 struct dmx_sct_filter_params *para = &dmxdevfilter->params.sec;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596
2597 del_timer(&dmxdevfilter->timer);
2598 if (para->timeout) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002599 dmxdevfilter->timer.function = dvb_dmxdev_filter_timeout;
2600 dmxdevfilter->timer.data = (unsigned long)dmxdevfilter;
2601 dmxdevfilter->timer.expires =
2602 jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 add_timer(&dmxdevfilter->timer);
2604 }
2605}
2606
2607static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002608 const u8 *buffer2, size_t buffer2_len,
Mauro Carvalho Chehab2f684b22015-10-06 19:53:02 -03002609 struct dmx_section_filter *filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07002611 struct dmxdev_filter *dmxdevfilter = filter->priv;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302612 struct dmx_filter_event event;
2613 ssize_t free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302615
2616 if (!dmxdevfilter) {
2617 pr_err("%s: null filter.\n", __func__);
2618 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302620
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002621 spin_lock(&dmxdevfilter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302622
2623 if (dmxdevfilter->buffer.error ||
2624 dmxdevfilter->state != DMXDEV_STATE_GO ||
2625 dmxdevfilter->eos_state) {
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002626 spin_unlock(&dmxdevfilter->dev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 return 0;
2628 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302629
2630 /* Discard section data if event cannot be notified */
2631 if (!(dmxdevfilter->events.event_mask.disable_mask &
2632 DMX_EVENT_NEW_SECTION) &&
2633 dvb_dmxdev_events_is_full(&dmxdevfilter->events)) {
2634 spin_unlock(&dmxdevfilter->dev->lock);
2635 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302637
2638 if ((buffer1_len + buffer2_len) == 0) {
2639 if (buffer1 == NULL && buffer2 == NULL) {
2640 /* Section was dropped due to CRC error */
2641 event.type = DMX_EVENT_SECTION_CRC_ERROR;
2642 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2643
2644 spin_unlock(&dmxdevfilter->dev->lock);
2645 wake_up_all(&dmxdevfilter->buffer.queue);
2646 } else {
2647 spin_unlock(&dmxdevfilter->dev->lock);
2648 }
2649
2650 return 0;
2651 }
2652
2653 event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
2654 event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
2655
2656 del_timer(&dmxdevfilter->timer);
2657
2658 /* Verify output buffer has sufficient space, or report overflow */
2659 free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
2660 if (free < (buffer1_len + buffer2_len)) {
2661 pr_debug("%s: section filter overflow (pid=%u)\n",
2662 __func__, dmxdevfilter->params.sec.pid);
2663 dmxdevfilter->buffer.error = -EOVERFLOW;
2664 spin_unlock(&dmxdevfilter->dev->lock);
2665 wake_up_all(&dmxdevfilter->buffer.queue);
2666 return 0;
2667 }
2668
2669 dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1, buffer1_len);
2670 dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2, buffer2_len);
2671
2672 event.type = DMX_EVENT_NEW_SECTION;
2673 event.params.section.total_length = buffer1_len + buffer2_len;
2674 event.params.section.actual_length =
2675 event.params.section.total_length;
2676
2677 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2678
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002679 if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
2680 dmxdevfilter->state = DMXDEV_STATE_DONE;
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002681 spin_unlock(&dmxdevfilter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302682 wake_up_all(&dmxdevfilter->buffer.queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 return 0;
2684}
2685
2686static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002687 const u8 *buffer2, size_t buffer2_len,
Mauro Carvalho Chehab2f684b22015-10-06 19:53:02 -03002688 struct dmx_ts_feed *feed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07002690 struct dmxdev_filter *dmxdevfilter = feed->priv;
Andreas Oberritter34731df2006-03-14 17:31:01 -03002691 struct dvb_ringbuffer *buffer;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302692 struct dmxdev_events_queue *events;
2693 struct dmx_filter_event event;
2694 ssize_t free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302696 if (!dmxdevfilter) {
2697 pr_err("%s: null filter (feed->is_filtering=%d)\n",
2698 __func__, feed->is_filtering);
2699 return -EINVAL;
2700 }
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002701 spin_lock(&dmxdevfilter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302702
2703 if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER ||
2704 dmxdevfilter->state != DMXDEV_STATE_GO ||
2705 dmxdevfilter->eos_state) {
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002706 spin_unlock(&dmxdevfilter->dev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 return 0;
2708 }
2709
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302710 if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002711 buffer = &dmxdevfilter->buffer;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302712 events = &dmxdevfilter->events;
2713 } else {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03002714 buffer = &dmxdevfilter->dev->dvr_buffer;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302715 events = &dmxdevfilter->dev->dvr_output_events;
2716 }
2717
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 if (buffer->error) {
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002719 spin_unlock(&dmxdevfilter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302720 wake_up_all(&buffer->queue);
2721 return buffer->error;
2722 }
2723
2724 if (!events->current_event_data_size)
2725 events->current_event_start_offset = buffer->pwrite;
2726
2727 /* Verify output buffer has sufficient space, or report overflow */
2728 free = dvb_ringbuffer_free(buffer);
2729 if (free < (buffer1_len + buffer2_len)) {
2730 pr_debug("%s: buffer overflow error, pid=%u\n",
2731 __func__, dmxdevfilter->params.pes.pid);
2732 buffer->error = -EOVERFLOW;
2733 spin_unlock(&dmxdevfilter->dev->lock);
2734 wake_up_all(&buffer->queue);
2735
2736 return -EOVERFLOW;
2737 }
2738
2739 if (buffer1_len + buffer2_len) {
2740 dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
2741 dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
2742
2743 events->current_event_data_size += (buffer1_len + buffer2_len);
2744
2745 if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP ||
2746 dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
2747 && events->current_event_data_size >=
2748 dmxdevfilter->params.pes.rec_chunk_size) {
2749 event.type = DMX_EVENT_NEW_REC_CHUNK;
2750 event.params.recording_chunk.offset =
2751 events->current_event_start_offset;
2752 event.params.recording_chunk.size =
2753 events->current_event_data_size;
2754
2755 dvb_dmxdev_add_event(events, &event);
2756 events->current_event_data_size = 0;
2757 }
2758 }
2759
2760 spin_unlock(&dmxdevfilter->dev->lock);
2761 wake_up_all(&buffer->queue);
2762 return 0;
2763}
2764
2765static int dvb_dmxdev_section_event_cb(struct dmx_section_filter *filter,
2766 struct dmx_data_ready *dmx_data_ready)
2767{
2768 int res = 0;
2769 struct dmxdev_filter *dmxdevfilter = filter->priv;
2770 struct dmx_filter_event event;
2771 ssize_t free;
2772
2773 if (!dmxdevfilter) {
2774 pr_err("%s: null filter. event type=%d (length=%d) will be discarded\n",
2775 __func__, dmx_data_ready->status,
2776 dmx_data_ready->data_length);
2777 return -EINVAL;
2778 }
2779
2780 spin_lock(&dmxdevfilter->dev->lock);
2781
2782 if (dmxdevfilter->buffer.error == -ETIMEDOUT ||
2783 dmxdevfilter->state != DMXDEV_STATE_GO ||
2784 dmxdevfilter->eos_state) {
2785 spin_unlock(&dmxdevfilter->dev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 return 0;
2787 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302788
2789 if (dmx_data_ready->data_length == 0) {
2790 if (dmx_data_ready->status == DMX_CRC_ERROR) {
2791 /* Section was dropped due to CRC error */
2792 event.type = DMX_EVENT_SECTION_CRC_ERROR;
2793 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2794
2795 spin_unlock(&dmxdevfilter->dev->lock);
2796 wake_up_all(&dmxdevfilter->buffer.queue);
2797 } else if (dmx_data_ready->status == DMX_OK_EOS) {
2798 event.type = DMX_EVENT_EOS;
2799 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2800 spin_unlock(&dmxdevfilter->dev->lock);
2801 wake_up_all(&dmxdevfilter->buffer.queue);
2802 } else if (dmx_data_ready->status == DMX_OK_MARKER) {
2803 event.type = DMX_EVENT_MARKER;
2804 event.params.marker.id = dmx_data_ready->marker.id;
2805 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2806 spin_unlock(&dmxdevfilter->dev->lock);
2807 wake_up_all(&dmxdevfilter->buffer.queue);
2808 } else if (dmx_data_ready->status == DMX_OK_SCRAMBLING_STATUS) {
2809 event.type = DMX_EVENT_SCRAMBLING_STATUS_CHANGE;
2810 event.params.scrambling_status =
2811 dmx_data_ready->scrambling_bits;
2812 dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2813 spin_unlock(&dmxdevfilter->dev->lock);
2814 wake_up_all(&dmxdevfilter->buffer.queue);
2815 } else if (dmx_data_ready->status == DMX_OVERRUN_ERROR) {
2816 pr_debug("dmxdev: section filter overflow (pid=%u)\n",
2817 dmxdevfilter->params.sec.pid);
2818 /* Set buffer error to notify user overflow occurred */
2819 dmxdevfilter->buffer.error = -EOVERFLOW;
2820 spin_unlock(&dmxdevfilter->dev->lock);
2821 wake_up_all(&dmxdevfilter->buffer.queue);
2822 } else {
2823 spin_unlock(&dmxdevfilter->dev->lock);
2824 }
2825 return 0;
2826 }
2827
2828 event.type = DMX_EVENT_NEW_SECTION;
2829 event.params.section.base_offset = dmxdevfilter->buffer.pwrite;
2830 event.params.section.start_offset = dmxdevfilter->buffer.pwrite;
2831 event.params.section.total_length = dmx_data_ready->data_length;
2832 event.params.section.actual_length = dmx_data_ready->data_length;
2833
2834 if (dmx_data_ready->status == DMX_MISSED_ERROR)
2835 event.params.section.flags = DMX_FILTER_CC_ERROR;
2836 else
2837 event.params.section.flags = 0;
2838
2839 free = dvb_ringbuffer_free(&dmxdevfilter->buffer);
2840 if (free < dmx_data_ready->data_length) {
2841 pr_err("%s: invalid data length: data_length=%d > free=%zd\n",
2842 __func__, dmx_data_ready->data_length, free);
2843 } else {
2844 res = dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
2845 DVB_RINGBUFFER_PUSH(&dmxdevfilter->buffer,
2846 dmx_data_ready->data_length);
2847 }
2848
Mauro Carvalho Chehab28100162009-02-16 15:27:44 -03002849 spin_unlock(&dmxdevfilter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05302850 wake_up_all(&dmxdevfilter->buffer.queue);
2851
2852 return res;
2853}
2854
2855static int dvb_dmxdev_ts_event_cb(struct dmx_ts_feed *feed,
2856 struct dmx_data_ready *dmx_data_ready)
2857{
2858 struct dmxdev_filter *dmxdevfilter = feed->priv;
2859 struct dvb_ringbuffer *buffer;
2860 struct dmxdev_events_queue *events;
2861 struct dmx_filter_event event;
2862 ssize_t free;
2863
2864 if (!dmxdevfilter) {
2865 pr_err("%s: null filter (feed->is_filtering=%d) event type=%d (length=%d) will be discarded\n",
2866 __func__, feed->is_filtering,
2867 dmx_data_ready->status,
2868 dmx_data_ready->data_length);
2869 return -EINVAL;
2870 }
2871
2872 spin_lock(&dmxdevfilter->dev->lock);
2873
2874 if (dmxdevfilter->state != DMXDEV_STATE_GO ||
2875 dmxdevfilter->eos_state) {
2876 spin_unlock(&dmxdevfilter->dev->lock);
2877 return 0;
2878 }
2879
2880 if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
2881 buffer = &dmxdevfilter->buffer;
2882 events = &dmxdevfilter->events;
2883 } else {
2884 buffer = &dmxdevfilter->dev->dvr_buffer;
2885 events = &dmxdevfilter->dev->dvr_output_events;
2886 }
2887
2888 if (!buffer->error && dmx_data_ready->status == DMX_OVERRUN_ERROR) {
2889 pr_debug("dmxdev: %s filter buffer overflow (pid=%u)\n",
2890 dmxdevfilter->params.pes.output == DMX_OUT_DECODER ?
2891 "decoder" : "",
2892 dmxdevfilter->params.pes.pid);
2893 /* Set buffer error to notify user overflow occurred */
2894 buffer->error = -EOVERFLOW;
2895 spin_unlock(&dmxdevfilter->dev->lock);
2896 wake_up_all(&buffer->queue);
2897 return 0;
2898 }
2899
2900 if (dmx_data_ready->status == DMX_OK_EOS) {
2901 /* Report partial recording chunk */
2902 if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP ||
2903 dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
2904 && events->current_event_data_size) {
2905 event.type = DMX_EVENT_NEW_REC_CHUNK;
2906 event.params.recording_chunk.offset =
2907 events->current_event_start_offset;
2908 event.params.recording_chunk.size =
2909 events->current_event_data_size;
2910 events->current_event_start_offset =
2911 (events->current_event_start_offset +
2912 events->current_event_data_size) %
2913 buffer->size;
2914 events->current_event_data_size = 0;
2915 dvb_dmxdev_add_event(events, &event);
2916 }
2917
2918 dmxdevfilter->eos_state = 1;
2919 pr_debug("dmxdev: DMX_OK_EOS - entering EOS state\n");
2920 event.type = DMX_EVENT_EOS;
2921 dvb_dmxdev_add_event(events, &event);
2922 spin_unlock(&dmxdevfilter->dev->lock);
2923 wake_up_all(&buffer->queue);
2924 return 0;
2925 }
2926
2927 if (dmx_data_ready->status == DMX_OK_MARKER) {
2928 pr_debug("dmxdev: DMX_OK_MARKER - id=%llu\n",
2929 dmx_data_ready->marker.id);
2930 event.type = DMX_EVENT_MARKER;
2931 event.params.marker.id = dmx_data_ready->marker.id;
2932 dvb_dmxdev_add_event(events, &event);
2933 spin_unlock(&dmxdevfilter->dev->lock);
2934 wake_up_all(&buffer->queue);
2935 return 0;
2936 }
2937
2938 if (dmx_data_ready->status == DMX_OK_PCR) {
2939 pr_debug("dmxdev: event callback DMX_OK_PCR\n");
2940 event.type = DMX_EVENT_NEW_PCR;
2941 event.params.pcr.pcr = dmx_data_ready->pcr.pcr;
2942 event.params.pcr.stc = dmx_data_ready->pcr.stc;
2943 if (dmx_data_ready->pcr.disc_indicator_set)
2944 event.params.pcr.flags =
2945 DMX_FILTER_DISCONTINUITY_INDICATOR;
2946 else
2947 event.params.pcr.flags = 0;
2948
2949 dvb_dmxdev_add_event(events, &event);
2950 spin_unlock(&dmxdevfilter->dev->lock);
2951 wake_up_all(&buffer->queue);
2952 return 0;
2953 }
2954
2955 if (dmx_data_ready->status == DMX_OK_IDX) {
2956 pr_debug("dmxdev: event callback DMX_OK_IDX\n");
2957 event.type = DMX_EVENT_NEW_INDEX_ENTRY;
2958 event.params.index = dmx_data_ready->idx_event;
2959
2960 dvb_dmxdev_add_event(events, &event);
2961 spin_unlock(&dmxdevfilter->dev->lock);
2962 wake_up_all(&buffer->queue);
2963 return 0;
2964 }
2965
2966 if (dmx_data_ready->status == DMX_OK_SCRAMBLING_STATUS) {
2967 event.type = DMX_EVENT_SCRAMBLING_STATUS_CHANGE;
2968 event.params.scrambling_status =
2969 dmx_data_ready->scrambling_bits;
2970 dvb_dmxdev_add_event(events, &event);
2971 spin_unlock(&dmxdevfilter->dev->lock);
2972 wake_up_all(&buffer->queue);
2973 return 0;
2974 }
2975
2976 if (dmx_data_ready->status == DMX_OK_DECODER_BUF) {
2977 event.type = DMX_EVENT_NEW_ES_DATA;
2978 event.params.es_data.buf_handle = dmx_data_ready->buf.handle;
2979 event.params.es_data.cookie = dmx_data_ready->buf.cookie;
2980 event.params.es_data.offset = dmx_data_ready->buf.offset;
2981 event.params.es_data.data_len = dmx_data_ready->buf.len;
2982 event.params.es_data.pts_valid = dmx_data_ready->buf.pts_exists;
2983 event.params.es_data.pts = dmx_data_ready->buf.pts;
2984 event.params.es_data.dts_valid = dmx_data_ready->buf.dts_exists;
2985 event.params.es_data.dts = dmx_data_ready->buf.dts;
2986 event.params.es_data.stc = dmx_data_ready->buf.stc;
2987 event.params.es_data.transport_error_indicator_counter =
2988 dmx_data_ready->buf.tei_counter;
2989 event.params.es_data.continuity_error_counter =
2990 dmx_data_ready->buf.cont_err_counter;
2991 event.params.es_data.ts_packets_num =
2992 dmx_data_ready->buf.ts_packets_num;
2993 event.params.es_data.ts_dropped_bytes =
2994 dmx_data_ready->buf.ts_dropped_bytes;
2995 dvb_dmxdev_add_event(events, &event);
2996 spin_unlock(&dmxdevfilter->dev->lock);
2997 wake_up_all(&buffer->queue);
2998 return 0;
2999 }
3000
3001 if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
3002 spin_unlock(&dmxdevfilter->dev->lock);
3003 wake_up_all(&buffer->queue);
3004 return 0;
3005 }
3006
3007 free = dvb_ringbuffer_free(buffer);
3008 if (free < dmx_data_ready->data_length) {
3009 pr_err("%s: invalid data length: data_length=%d > free=%zd\n",
3010 __func__, dmx_data_ready->data_length, free);
3011
3012 spin_unlock(&dmxdevfilter->dev->lock);
3013 wake_up_all(&buffer->queue);
3014 return 0;
3015 }
3016
3017 if (dmxdevfilter->params.pes.output == DMX_OUT_TAP) {
3018 if (dmx_data_ready->status == DMX_OK &&
3019 !events->current_event_data_size) {
3020 events->current_event_start_offset = buffer->pwrite;
3021 } else if (dmx_data_ready->status == DMX_OK_PES_END) {
3022 event.type = DMX_EVENT_NEW_PES;
3023
3024 event.params.pes.base_offset =
3025 events->current_event_start_offset;
3026 event.params.pes.start_offset =
3027 (events->current_event_start_offset +
3028 dmx_data_ready->pes_end.start_gap) %
3029 buffer->size;
3030
3031 event.params.pes.actual_length =
3032 dmx_data_ready->pes_end.actual_length;
3033 event.params.pes.total_length =
3034 events->current_event_data_size;
3035
3036 event.params.pes.flags = 0;
3037 if (dmx_data_ready->pes_end.disc_indicator_set)
3038 event.params.pes.flags |=
3039 DMX_FILTER_DISCONTINUITY_INDICATOR;
3040 if (dmx_data_ready->pes_end.pes_length_mismatch)
3041 event.params.pes.flags |=
3042 DMX_FILTER_PES_LENGTH_ERROR;
3043
3044 event.params.pes.stc = dmx_data_ready->pes_end.stc;
3045 event.params.pes.transport_error_indicator_counter =
3046 dmx_data_ready->pes_end.tei_counter;
3047 event.params.pes.continuity_error_counter =
3048 dmx_data_ready->pes_end.cont_err_counter;
3049 event.params.pes.ts_packets_num =
3050 dmx_data_ready->pes_end.ts_packets_num;
3051
3052 /* Do not report zero length PES */
3053 if (event.params.pes.total_length)
3054 dvb_dmxdev_add_event(events, &event);
3055
3056 events->current_event_data_size = 0;
3057 }
3058 } else if (!events->current_event_data_size) {
3059 events->current_event_start_offset = buffer->pwrite;
3060 }
3061
3062 events->current_event_data_size += dmx_data_ready->data_length;
3063 DVB_RINGBUFFER_PUSH(buffer, dmx_data_ready->data_length);
3064
3065 if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) ||
3066 (dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)) {
3067 while (events->current_event_data_size >=
3068 dmxdevfilter->params.pes.rec_chunk_size) {
3069 event.type = DMX_EVENT_NEW_REC_CHUNK;
3070 event.params.recording_chunk.offset =
3071 events->current_event_start_offset;
3072 event.params.recording_chunk.size =
3073 dmxdevfilter->params.pes.rec_chunk_size;
3074 events->current_event_data_size =
3075 events->current_event_data_size -
3076 dmxdevfilter->params.pes.rec_chunk_size;
3077 events->current_event_start_offset =
3078 (events->current_event_start_offset +
3079 dmxdevfilter->params.pes.rec_chunk_size) %
3080 buffer->size;
3081
3082 dvb_dmxdev_add_event(events, &event);
3083 }
3084 }
3085 spin_unlock(&dmxdevfilter->dev->lock);
3086 wake_up_all(&buffer->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087 return 0;
3088}
3089
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090/* stop feed but only mark the specified filter as stopped (state set) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091static int dvb_dmxdev_feed_stop(struct dmxdev_filter *dmxdevfilter)
3092{
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003093 struct dmxdev_feed *feed;
3094
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
3096
3097 switch (dmxdevfilter->type) {
3098 case DMXDEV_TYPE_SEC:
3099 del_timer(&dmxdevfilter->timer);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303100 dmxdevfilter->feed.sec.feed->stop_filtering(
3101 dmxdevfilter->feed.sec.feed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102 break;
3103 case DMXDEV_TYPE_PES:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303104 list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
3105 if (dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) {
3106 dmxdevfilter->dev->dvr_feeds_count--;
3107 if (!dmxdevfilter->dev->dvr_feeds_count)
3108 dmxdevfilter->dev->dvr_feed = NULL;
3109 }
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003110 feed->ts->stop_filtering(feed->ts);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303111 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112 break;
3113 default:
3114 return -EINVAL;
3115 }
3116 return 0;
3117}
3118
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119/* start feed associated with the specified filter */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120static int dvb_dmxdev_feed_start(struct dmxdev_filter *filter)
3121{
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003122 struct dmxdev_feed *feed;
3123 int ret;
3124
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003125 dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126
3127 switch (filter->type) {
3128 case DMXDEV_TYPE_SEC:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303129 return filter->feed.sec.feed->start_filtering(
3130 filter->feed.sec.feed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131 case DMXDEV_TYPE_PES:
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003132 list_for_each_entry(feed, &filter->feed.ts, next) {
3133 ret = feed->ts->start_filtering(feed->ts);
3134 if (ret < 0) {
3135 dvb_dmxdev_feed_stop(filter);
3136 return ret;
3137 }
3138 }
3139 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140 default:
3141 return -EINVAL;
3142 }
3143
3144 return 0;
3145}
3146
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147/* restart section feed if it has filters left associated with it,
3148 otherwise release the feed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149static int dvb_dmxdev_feed_restart(struct dmxdev_filter *filter)
3150{
3151 int i;
3152 struct dmxdev *dmxdev = filter->dev;
3153 u16 pid = filter->params.sec.pid;
3154
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003155 for (i = 0; i < dmxdev->filternum; i++)
3156 if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
3157 dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
3158 dmxdev->filter[i].params.sec.pid == pid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 dvb_dmxdev_feed_start(&dmxdev->filter[i]);
3160 return 0;
3161 }
3162
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003163 filter->dev->demux->release_section_feed(dmxdev->demux,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303164 filter->feed.sec.feed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165
3166 return 0;
3167}
3168
3169static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter)
3170{
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003171 struct dmxdev_feed *feed;
3172 struct dmx_demux *demux;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303173 struct ts_insertion_buffer *ts_buffer;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003174
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003175 if (dmxdevfilter->state < DMXDEV_STATE_GO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 return 0;
3177
3178 switch (dmxdevfilter->type) {
3179 case DMXDEV_TYPE_SEC:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303180 if (!dmxdevfilter->feed.sec.feed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 break;
3182 dvb_dmxdev_feed_stop(dmxdevfilter);
3183 if (dmxdevfilter->filter.sec)
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303184 dmxdevfilter->feed.sec.feed->
3185 release_filter(dmxdevfilter->feed.sec.feed,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003186 dmxdevfilter->filter.sec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 dvb_dmxdev_feed_restart(dmxdevfilter);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303188 dmxdevfilter->feed.sec.feed = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 break;
3190 case DMXDEV_TYPE_PES:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 dvb_dmxdev_feed_stop(dmxdevfilter);
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003192 demux = dmxdevfilter->dev->demux;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303193
3194 if (!list_empty(&dmxdevfilter->insertion_buffers)) {
3195 feed = list_first_entry(&dmxdevfilter->feed.ts,
3196 struct dmxdev_feed, next);
3197
3198 list_for_each_entry(ts_buffer,
3199 &dmxdevfilter->insertion_buffers, next)
3200 dvb_dmxdev_cancel_ts_insertion(ts_buffer);
3201 if (feed->ts->ts_insertion_terminate)
3202 feed->ts->ts_insertion_terminate(feed->ts);
3203 }
3204
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003205 list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
3206 demux->release_ts_feed(demux, feed->ts);
3207 feed->ts = NULL;
3208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 break;
3210 default:
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003211 if (dmxdevfilter->state == DMXDEV_STATE_ALLOCATED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212 return 0;
3213 return -EINVAL;
3214 }
Andreas Oberritter34731df2006-03-14 17:31:01 -03003215
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303216 spin_lock_irq(&dmxdevfilter->dev->lock);
3217 dvb_dmxdev_flush_output(&dmxdevfilter->buffer, &dmxdevfilter->events);
3218 dvb_ringbuffer_reset(&dmxdevfilter->buffer);
3219 spin_unlock_irq(&dmxdevfilter->dev->lock);
3220
3221 wake_up_all(&dmxdevfilter->buffer.queue);
3222
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223 return 0;
3224}
3225
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003226static void dvb_dmxdev_delete_pids(struct dmxdev_filter *dmxdevfilter)
3227{
3228 struct dmxdev_feed *feed, *tmp;
3229
3230 /* delete all PIDs */
3231 list_for_each_entry_safe(feed, tmp, &dmxdevfilter->feed.ts, next) {
3232 list_del(&feed->next);
3233 kfree(feed);
3234 }
3235
3236 BUG_ON(!list_empty(&dmxdevfilter->feed.ts));
3237}
3238
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239static inline int dvb_dmxdev_filter_reset(struct dmxdev_filter *dmxdevfilter)
3240{
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003241 if (dmxdevfilter->state < DMXDEV_STATE_SET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242 return 0;
3243
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003244 if (dmxdevfilter->type == DMXDEV_TYPE_PES)
3245 dvb_dmxdev_delete_pids(dmxdevfilter);
3246
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003247 dmxdevfilter->type = DMXDEV_TYPE_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
3249 return 0;
3250}
3251
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003252static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
3253 struct dmxdev_filter *filter,
3254 struct dmxdev_feed *feed)
3255{
Arnd Bergmanne95be152016-06-17 17:46:28 -03003256 ktime_t timeout = ktime_set(0, 0);
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003257 struct dmx_pes_filter_params *para = &filter->params.pes;
3258 dmx_output_t otype;
3259 int ret;
3260 int ts_type;
Mauro Carvalho Chehabfde04ab2013-04-04 13:25:30 -03003261 enum dmx_ts_pes ts_pes;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003262 struct dmx_ts_feed *tsfeed;
3263
3264 feed->ts = NULL;
3265 otype = para->output;
3266
Mauro Carvalho Chehab9ae2ae32010-12-27 11:41:14 -03003267 ts_pes = para->pes_type;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003268
3269 if (ts_pes < DMX_PES_OTHER)
3270 ts_type = TS_DECODER;
3271 else
3272 ts_type = 0;
3273
3274 if (otype == DMX_OUT_TS_TAP)
3275 ts_type |= TS_PACKET;
3276 else if (otype == DMX_OUT_TSDEMUX_TAP)
3277 ts_type |= TS_PACKET | TS_DEMUX;
3278 else if (otype == DMX_OUT_TAP)
3279 ts_type |= TS_PACKET | TS_DEMUX | TS_PAYLOAD_ONLY;
3280
3281 ret = dmxdev->demux->allocate_ts_feed(dmxdev->demux, &feed->ts,
3282 dvb_dmxdev_ts_callback);
3283 if (ret < 0)
3284 return ret;
3285
3286 tsfeed = feed->ts;
3287 tsfeed->priv = filter;
3288
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303289 if (filter->params.pes.output == DMX_OUT_TS_TAP) {
3290 tsfeed->buffer.ringbuff = &dmxdev->dvr_buffer;
3291 tsfeed->buffer.priv_handle = dmxdev->dvr_priv_buff_handle;
3292 if (!dmxdev->dvr_feeds_count)
3293 dmxdev->dvr_feed = filter;
3294 dmxdev->dvr_feeds_count++;
3295 } else if (filter->params.pes.output == DMX_OUT_DECODER) {
3296 tsfeed->buffer.ringbuff = &filter->buffer;
3297 tsfeed->decoder_buffers = &filter->decoder_buffers;
3298 tsfeed->buffer.priv_handle = filter->priv_buff_handle;
3299 } else {
3300 tsfeed->buffer.ringbuff = &filter->buffer;
3301 tsfeed->buffer.priv_handle = filter->priv_buff_handle;
3302 }
3303
3304 if (tsfeed->data_ready_cb) {
3305 ret = tsfeed->data_ready_cb(tsfeed, dvb_dmxdev_ts_event_cb);
3306
3307 if (ret < 0) {
3308 dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
3309 return ret;
3310 }
3311 }
3312
3313 ret = tsfeed->set(tsfeed, feed->pid,
3314 ts_type, ts_pes,
3315 filter->decoder_buffers.buffers_size,
3316 timeout);
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003317 if (ret < 0) {
3318 dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
3319 return ret;
3320 }
3321
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303322 if (tsfeed->set_tsp_out_format)
3323 tsfeed->set_tsp_out_format(tsfeed, filter->dmx_tsp_format);
3324
3325 if (tsfeed->set_secure_mode)
3326 tsfeed->set_secure_mode(tsfeed, &filter->sec_mode);
3327
3328 if (tsfeed->set_cipher_ops)
3329 tsfeed->set_cipher_ops(tsfeed, &feed->cipher_ops);
3330
3331 if ((para->pes_type == DMX_PES_VIDEO0) ||
3332 (para->pes_type == DMX_PES_VIDEO1) ||
3333 (para->pes_type == DMX_PES_VIDEO2) ||
3334 (para->pes_type == DMX_PES_VIDEO3)) {
3335 if (tsfeed->set_video_codec) {
3336 ret = tsfeed->set_video_codec(tsfeed,
3337 para->video_codec);
3338
3339 if (ret < 0) {
3340 dmxdev->demux->release_ts_feed(dmxdev->demux,
3341 tsfeed);
3342 return ret;
3343 }
3344 }
3345 }
3346
3347 if ((filter->params.pes.output == DMX_OUT_TS_TAP) ||
3348 (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP))
3349 if (tsfeed->set_idx_params) {
3350 ret = tsfeed->set_idx_params(
3351 tsfeed, &feed->idx_params);
3352 if (ret) {
3353 dmxdev->demux->release_ts_feed(dmxdev->demux,
3354 tsfeed);
3355 return ret;
3356 }
3357 }
3358
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003359 ret = tsfeed->start_filtering(tsfeed);
3360 if (ret < 0) {
3361 dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
3362 return ret;
3363 }
3364
3365 return 0;
3366}
3367
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303368static int dvb_filter_external_buffer_only(struct dmxdev *dmxdev,
3369 struct dmxdev_filter *filter)
3370{
3371 struct dmx_caps caps;
3372 int is_external_only;
3373 int flags;
3374
3375 /*
3376 * For backward compatibility, default assumes that
3377 * external only buffers are not supported.
3378 */
3379 flags = 0;
3380 if (dmxdev->demux->get_caps) {
3381 dmxdev->demux->get_caps(dmxdev->demux, &caps);
3382
3383 if (filter->type == DMXDEV_TYPE_SEC)
3384 flags = caps.section.flags;
3385 else if (filter->params.pes.output == DMX_OUT_DECODER)
3386 /* For decoder filters dmxdev buffer is not required */
3387 flags = 0;
3388 else if (filter->params.pes.output == DMX_OUT_TAP)
3389 flags = caps.pes.flags;
3390 else if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
3391 flags = caps.recording_188_tsp.flags;
3392 else
3393 flags = caps.recording_192_tsp.flags;
3394 }
3395
3396 if (!(flags & DMX_BUFFER_INTERNAL_SUPPORT) &&
3397 (flags & DMX_BUFFER_EXTERNAL_SUPPORT))
3398 is_external_only = 1;
3399 else
3400 is_external_only = 0;
3401
3402 return is_external_only;
3403}
3404
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
3406{
3407 struct dmxdev *dmxdev = filter->dev;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003408 struct dmxdev_feed *feed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 void *mem;
3410 int ret, i;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303411 size_t tsp_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412
3413 if (filter->state < DMXDEV_STATE_SET)
3414 return -EINVAL;
3415
3416 if (filter->state >= DMXDEV_STATE_GO)
3417 dvb_dmxdev_filter_stop(filter);
3418
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303419 if (!dvb_filter_verify_buffer_size(filter))
3420 return -EINVAL;
3421
Andreas Oberritter34731df2006-03-14 17:31:01 -03003422 if (!filter->buffer.data) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303423 /*
3424 * dmxdev buffer in decoder filters is not really used
3425 * to exchange data with applications. Decoder buffers
3426 * can be set using DMX_SET_DECODER_BUFFER, which
3427 * would not update the filter->buffer.data at all.
3428 * Therefore we should not treat this filter as
3429 * other regular filters and should not fail here
3430 * even if user sets the buffer in deocder
3431 * filter as external buffer.
3432 */
3433 if (filter->type == DMXDEV_TYPE_PES &&
3434 (filter->params.pes.output == DMX_OUT_DECODER ||
3435 filter->params.pes.output == DMX_OUT_TS_TAP))
3436 filter->buffer_mode = DMX_BUFFER_MODE_INTERNAL;
3437
3438 if (!(filter->type == DMXDEV_TYPE_PES &&
3439 filter->params.pes.output == DMX_OUT_TS_TAP) &&
3440 (filter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL ||
3441 dvb_filter_external_buffer_only(dmxdev, filter)))
3442 return -ENOMEM;
3443
3444 mem = vmalloc_user(filter->buffer.size);
Andreas Oberritter34731df2006-03-14 17:31:01 -03003445 if (!mem)
3446 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447 spin_lock_irq(&filter->dev->lock);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003448 filter->buffer.data = mem;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 spin_unlock_irq(&filter->dev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303450 } else if ((filter->buffer_mode == DMX_BUFFER_MODE_INTERNAL) &&
3451 dvb_filter_external_buffer_only(dmxdev, filter)) {
3452 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 }
3454
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303455 filter->eos_state = 0;
3456
3457 spin_lock_irq(&filter->dev->lock);
3458 dvb_dmxdev_flush_output(&filter->buffer, &filter->events);
3459 spin_unlock_irq(&filter->dev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460
3461 switch (filter->type) {
3462 case DMXDEV_TYPE_SEC:
3463 {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003464 struct dmx_sct_filter_params *para = &filter->params.sec;
3465 struct dmx_section_filter **secfilter = &filter->filter.sec;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303466 struct dmx_section_feed **secfeed = &filter->feed.sec.feed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003468 *secfilter = NULL;
3469 *secfeed = NULL;
3470
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 /* find active filter/feed with same PID */
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003472 for (i = 0; i < dmxdev->filternum; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
Andreas Oberritter09794a62006-03-10 15:21:28 -03003474 dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
3475 dmxdev->filter[i].params.sec.pid == para->pid) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303476 *secfeed = dmxdev->filter[i].feed.sec.feed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477 break;
3478 }
3479 }
3480
3481 /* if no feed found, try to allocate new one */
3482 if (!*secfeed) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003483 ret = dmxdev->demux->allocate_section_feed(dmxdev->demux,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303484 secfeed,
3485 dvb_dmxdev_section_callback);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003486 if (ret < 0) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303487 pr_err("DVB (%s): could not alloc feed\n",
Harvey Harrison46b4f7c2008-04-08 23:20:00 -03003488 __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489 return ret;
3490 }
3491
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303492 if ((*secfeed)->data_ready_cb) {
3493 ret = (*secfeed)->data_ready_cb(
3494 *secfeed,
3495 dvb_dmxdev_section_event_cb);
3496
3497 if (ret < 0) {
3498 pr_err(
3499 "DVB (%s): could not set event cb\n",
3500 __func__);
3501 dvb_dmxdev_feed_restart(filter);
3502 return ret;
3503 }
3504 }
3505
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003506 ret = (*secfeed)->set(*secfeed, para->pid, 32768,
3507 (para->flags & DMX_CHECK_CRC) ? 1 : 0);
3508 if (ret < 0) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303509 pr_err("DVB (%s): could not set feed\n",
3510 __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511 dvb_dmxdev_feed_restart(filter);
3512 return ret;
3513 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303514
3515 if ((*secfeed)->set_secure_mode)
3516 (*secfeed)->set_secure_mode(*secfeed,
3517 &filter->sec_mode);
3518
3519 if ((*secfeed)->set_cipher_ops)
3520 (*secfeed)->set_cipher_ops(*secfeed,
3521 &filter->feed.sec.cipher_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522 } else {
3523 dvb_dmxdev_feed_stop(filter);
3524 }
3525
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003526 ret = (*secfeed)->allocate_filter(*secfeed, secfilter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 if (ret < 0) {
3528 dvb_dmxdev_feed_restart(filter);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303529 filter->feed.sec.feed->start_filtering(*secfeed);
3530 pr_debug("could not get filter\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531 return ret;
3532 }
3533
3534 (*secfilter)->priv = filter;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303535 (*secfilter)->buffer.ringbuff = &filter->buffer;
3536 (*secfilter)->buffer.priv_handle = filter->priv_buff_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537
3538 memcpy(&((*secfilter)->filter_value[3]),
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003539 &(para->filter.filter[1]), DMX_FILTER_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 memcpy(&(*secfilter)->filter_mask[3],
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003541 &para->filter.mask[1], DMX_FILTER_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542 memcpy(&(*secfilter)->filter_mode[3],
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003543 &para->filter.mode[1], DMX_FILTER_SIZE - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003545 (*secfilter)->filter_value[0] = para->filter.filter[0];
3546 (*secfilter)->filter_mask[0] = para->filter.mask[0];
3547 (*secfilter)->filter_mode[0] = para->filter.mode[0];
3548 (*secfilter)->filter_mask[1] = 0;
3549 (*secfilter)->filter_mask[2] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550
3551 filter->todo = 0;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303552 filter->events.data_read_event_masked =
3553 filter->events.event_mask.disable_mask &
3554 DMX_EVENT_NEW_SECTION;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303556 ret = filter->feed.sec.feed->start_filtering(
3557 filter->feed.sec.feed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558 if (ret < 0)
3559 return ret;
3560
3561 dvb_dmxdev_filter_timer(filter);
3562 break;
3563 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 case DMXDEV_TYPE_PES:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303565 if (filter->params.pes.rec_chunk_size <
3566 DMX_REC_BUFF_CHUNK_MIN_SIZE)
3567 filter->params.pes.rec_chunk_size =
3568 DMX_REC_BUFF_CHUNK_MIN_SIZE;
3569
3570 if (filter->params.pes.rec_chunk_size >=
3571 filter->buffer.size)
3572 filter->params.pes.rec_chunk_size =
3573 filter->buffer.size >> 2;
3574
3575 /* Align rec-chunk based on output format */
3576 if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188)
3577 tsp_size = 188;
3578 else
3579 tsp_size = 192;
3580
3581 filter->params.pes.rec_chunk_size /= tsp_size;
3582 filter->params.pes.rec_chunk_size *= tsp_size;
3583
3584 if (filter->params.pes.output == DMX_OUT_TS_TAP)
3585 dmxdev->dvr_output_events.data_read_event_masked =
3586 dmxdev->dvr_output_events.event_mask.disable_mask &
3587 DMX_EVENT_NEW_REC_CHUNK;
3588 else if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP)
3589 filter->events.data_read_event_masked =
3590 filter->events.event_mask.disable_mask &
3591 DMX_EVENT_NEW_REC_CHUNK;
3592 else if (filter->params.pes.output == DMX_OUT_TAP)
3593 filter->events.data_read_event_masked =
3594 filter->events.event_mask.disable_mask &
3595 DMX_EVENT_NEW_PES;
3596 else
3597 filter->events.data_read_event_masked = 1;
3598
3599 ret = 0;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003600 list_for_each_entry(feed, &filter->feed.ts, next) {
3601 ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303602 if (ret)
3603 break;
3604 }
3605
3606 if (!ret)
3607 break;
3608
3609 /* cleanup feeds that were started before the failure */
3610 list_for_each_entry(feed, &filter->feed.ts, next) {
3611 if (!feed->ts)
3612 continue;
3613 feed->ts->stop_filtering(feed->ts);
3614 dmxdev->demux->release_ts_feed(dmxdev->demux, feed->ts);
3615 feed->ts = NULL;
3616
3617 if (filter->params.pes.output == DMX_OUT_TS_TAP) {
3618 filter->dev->dvr_feeds_count--;
3619 if (!filter->dev->dvr_feeds_count)
3620 filter->dev->dvr_feed = NULL;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003621 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303623 return ret;
3624
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 default:
3626 return -EINVAL;
3627 }
3628
3629 dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303630
3631 if ((filter->type == DMXDEV_TYPE_PES) &&
3632 !list_empty(&filter->insertion_buffers)) {
3633 struct ts_insertion_buffer *ts_buffer;
3634
3635 feed = list_first_entry(&filter->feed.ts,
3636 struct dmxdev_feed, next);
3637
3638 ret = 0;
3639 if (feed->ts->ts_insertion_init)
3640 ret = feed->ts->ts_insertion_init(feed->ts);
3641 if (!ret) {
3642 list_for_each_entry(ts_buffer,
3643 &filter->insertion_buffers, next)
3644 dvb_dmxdev_queue_ts_insertion(
3645 ts_buffer);
3646 } else {
3647 pr_err("%s: ts_insertion_init failed, err %d\n",
3648 __func__, ret);
3649 }
3650 }
3651
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652 return 0;
3653}
3654
3655static int dvb_demux_open(struct inode *inode, struct file *file)
3656{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07003657 struct dvb_device *dvbdev = file->private_data;
3658 struct dmxdev *dmxdev = dvbdev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659 int i;
3660 struct dmxdev_filter *dmxdevfilter;
3661
3662 if (!dmxdev->filter)
3663 return -EINVAL;
3664
Ingo Molnar3593cab2006-02-07 06:49:14 -02003665 if (mutex_lock_interruptible(&dmxdev->mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666 return -ERESTARTSYS;
3667
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003668 for (i = 0; i < dmxdev->filternum; i++)
3669 if (dmxdev->filter[i].state == DMXDEV_STATE_FREE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 break;
3671
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003672 if (i == dmxdev->filternum) {
Ingo Molnar3593cab2006-02-07 06:49:14 -02003673 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 return -EMFILE;
3675 }
3676
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003677 dmxdevfilter = &dmxdev->filter[i];
Ingo Molnar3593cab2006-02-07 06:49:14 -02003678 mutex_init(&dmxdevfilter->mutex);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003679 file->private_data = dmxdevfilter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303681 memset(&dmxdevfilter->decoder_buffers,
3682 0,
3683 sizeof(dmxdevfilter->decoder_buffers));
3684 dmxdevfilter->decoder_buffers.buffers_size =
3685 DMX_DEFAULT_DECODER_BUFFER_SIZE;
3686 dmxdevfilter->buffer_mode = DMX_BUFFER_MODE_INTERNAL;
3687 dmxdevfilter->priv_buff_handle = NULL;
Andreas Oberritter34731df2006-03-14 17:31:01 -03003688 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303689 dvb_dmxdev_flush_events(&dmxdevfilter->events);
3690 dmxdevfilter->events.event_mask.disable_mask = DMX_EVENT_NEW_ES_DATA;
3691 dmxdevfilter->events.event_mask.no_wakeup_mask = 0;
3692 dmxdevfilter->events.event_mask.wakeup_threshold = 1;
3693
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003694 dmxdevfilter->type = DMXDEV_TYPE_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696 init_timer(&dmxdevfilter->timer);
3697
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303698 dmxdevfilter->sec_mode.is_secured = 0;
3699
3700 INIT_LIST_HEAD(&dmxdevfilter->insertion_buffers);
3701
3702 dmxdevfilter->dmx_tsp_format = DMX_TSP_FORMAT_188;
Markus Rechberger57861b42007-04-14 10:19:18 -03003703 dvbdev->users++;
3704
Ingo Molnar3593cab2006-02-07 06:49:14 -02003705 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706 return 0;
3707}
3708
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003709static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev,
3710 struct dmxdev_filter *dmxdevfilter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711{
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303712 struct ts_insertion_buffer *ts_buffer, *tmp;
3713
Simon Arlottc2788502007-03-10 06:21:25 -03003714 mutex_lock(&dmxdev->mutex);
3715 mutex_lock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716
3717 dvb_dmxdev_filter_stop(dmxdevfilter);
3718 dvb_dmxdev_filter_reset(dmxdevfilter);
3719
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303720 list_for_each_entry_safe(ts_buffer, tmp,
3721 &dmxdevfilter->insertion_buffers, next) {
3722 list_del(&ts_buffer->next);
3723 vfree(ts_buffer->buffer);
3724 vfree(ts_buffer);
3725 }
3726
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727 if (dmxdevfilter->buffer.data) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003728 void *mem = dmxdevfilter->buffer.data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729
3730 spin_lock_irq(&dmxdev->lock);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003731 dmxdevfilter->buffer.data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732 spin_unlock_irq(&dmxdev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303733 if (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_INTERNAL)
3734 vfree(mem);
3735 }
3736
3737 if ((dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL) &&
3738 dmxdevfilter->priv_buff_handle) {
3739 dmxdev->demux->unmap_buffer(dmxdev->demux,
3740 dmxdevfilter->priv_buff_handle);
3741 dmxdevfilter->priv_buff_handle = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742 }
3743
3744 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303745 wake_up_all(&dmxdevfilter->buffer.queue);
Ingo Molnar3593cab2006-02-07 06:49:14 -02003746 mutex_unlock(&dmxdevfilter->mutex);
3747 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748 return 0;
3749}
3750
3751static inline void invert_mode(dmx_filter_t *filter)
3752{
3753 int i;
3754
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003755 for (i = 0; i < DMX_FILTER_SIZE; i++)
3756 filter->mode[i] ^= 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757}
3758
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003759static int dvb_dmxdev_add_pid(struct dmxdev *dmxdev,
3760 struct dmxdev_filter *filter, u16 pid)
3761{
3762 struct dmxdev_feed *feed;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303763 int ret = 0;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003764
3765 if ((filter->type != DMXDEV_TYPE_PES) ||
3766 (filter->state < DMXDEV_STATE_SET))
3767 return -EINVAL;
3768
3769 /* only TS packet filters may have multiple PIDs */
3770 if ((filter->params.pes.output != DMX_OUT_TSDEMUX_TAP) &&
3771 (!list_empty(&filter->feed.ts)))
3772 return -EINVAL;
3773
3774 feed = kzalloc(sizeof(struct dmxdev_feed), GFP_KERNEL);
3775 if (feed == NULL)
3776 return -ENOMEM;
3777
3778 feed->pid = pid;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303779 feed->cipher_ops.operations_count = 0;
3780 feed->idx_params.enable = 0;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003781
3782 if (filter->state >= DMXDEV_STATE_GO)
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303783 ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003784
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303785 if (!ret)
3786 list_add(&feed->next, &filter->feed.ts);
3787 else
3788 kfree(feed);
3789
3790 return ret;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003791}
3792
3793static int dvb_dmxdev_remove_pid(struct dmxdev *dmxdev,
3794 struct dmxdev_filter *filter, u16 pid)
3795{
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303796 int feed_count;
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003797 struct dmxdev_feed *feed, *tmp;
3798
3799 if ((filter->type != DMXDEV_TYPE_PES) ||
3800 (filter->state < DMXDEV_STATE_SET))
3801 return -EINVAL;
3802
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303803 feed_count = 0;
3804 list_for_each_entry(tmp, &filter->feed.ts, next)
3805 feed_count++;
3806
3807 if (feed_count <= 1)
3808 return -EINVAL;
3809
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003810 list_for_each_entry_safe(feed, tmp, &filter->feed.ts, next) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303811 if (feed->pid == pid) {
3812 if (feed->ts != NULL) {
3813 feed->ts->stop_filtering(feed->ts);
3814 filter->dev->demux->release_ts_feed(
3815 filter->dev->demux,
3816 feed->ts);
3817 }
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003818 list_del(&feed->next);
3819 kfree(feed);
3820 }
3821 }
3822
3823 return 0;
3824}
3825
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003827 struct dmxdev_filter *dmxdevfilter,
3828 struct dmx_sct_filter_params *params)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829{
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303830 pr_debug("function : %s, PID=0x%04x, flags=%02x, timeout=%d\n",
Mauro Carvalho Chehab17e67d42013-03-01 15:20:25 -03003831 __func__, params->pid, params->flags, params->timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832
3833 dvb_dmxdev_filter_stop(dmxdevfilter);
3834
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003835 dmxdevfilter->type = DMXDEV_TYPE_SEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836 memcpy(&dmxdevfilter->params.sec,
3837 params, sizeof(struct dmx_sct_filter_params));
3838 invert_mode(&dmxdevfilter->params.sec.filter);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303839 dmxdevfilter->feed.sec.cipher_ops.operations_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
3841
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003842 if (params->flags & DMX_IMMEDIATE_START)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003843 return dvb_dmxdev_filter_start(dmxdevfilter);
3844
3845 return 0;
3846}
3847
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303848static int dvb_dmxdev_set_secure_mode(
3849 struct dmxdev *dmxdev,
3850 struct dmxdev_filter *filter,
3851 struct dmx_secure_mode *sec_mode)
3852{
3853 if (!dmxdev || !filter || !sec_mode)
3854 return -EINVAL;
3855
3856 if (filter->state == DMXDEV_STATE_GO) {
3857 pr_err("%s: invalid filter state\n", __func__);
3858 return -EBUSY;
3859 }
3860
3861 pr_debug("%s: secure=%d\n", __func__, sec_mode->is_secured);
3862
3863 filter->sec_mode = *sec_mode;
3864
3865 return 0;
3866}
3867
3868static int dvb_dmxdev_set_cipher(struct dmxdev *dmxdev,
3869 struct dmxdev_filter *filter,
3870 struct dmx_cipher_operations *cipher_ops)
3871{
3872 struct dmxdev_feed *feed;
3873 struct dmxdev_feed *ts_feed = NULL;
3874 struct dmxdev_sec_feed *sec_feed = NULL;
3875 struct dmx_caps caps;
3876
3877 if (!dmxdev || !dmxdev->demux->get_caps)
3878 return -EINVAL;
3879
3880 dmxdev->demux->get_caps(dmxdev->demux, &caps);
3881
3882 if (!filter || !cipher_ops ||
3883 (cipher_ops->operations_count > caps.num_cipher_ops) ||
3884 (cipher_ops->operations_count >
3885 DMX_MAX_CIPHER_OPERATIONS_COUNT))
3886 return -EINVAL;
3887
3888 pr_debug("%s: pid=%d, operations=%d\n", __func__,
3889 cipher_ops->pid, cipher_ops->operations_count);
3890
3891 if (filter->state < DMXDEV_STATE_SET ||
3892 filter->state > DMXDEV_STATE_GO) {
3893 pr_err("%s: invalid filter state\n", __func__);
3894 return -EPERM;
3895 }
3896
3897 if (!filter->sec_mode.is_secured && cipher_ops->operations_count) {
3898 pr_err("%s: secure mode must be enabled to set cipher ops\n",
3899 __func__);
3900 return -EPERM;
3901 }
3902
3903 switch (filter->type) {
3904 case DMXDEV_TYPE_PES:
3905 list_for_each_entry(feed, &filter->feed.ts, next) {
3906 if (feed->pid == cipher_ops->pid) {
3907 ts_feed = feed;
3908 ts_feed->cipher_ops = *cipher_ops;
3909 if (filter->state == DMXDEV_STATE_GO &&
3910 ts_feed->ts->set_cipher_ops)
3911 ts_feed->ts->set_cipher_ops(
3912 ts_feed->ts, cipher_ops);
3913 break;
3914 }
3915 }
3916 break;
3917 case DMXDEV_TYPE_SEC:
3918 if (filter->params.sec.pid == cipher_ops->pid) {
3919 sec_feed = &filter->feed.sec;
3920 sec_feed->cipher_ops = *cipher_ops;
3921 if (filter->state == DMXDEV_STATE_GO &&
3922 sec_feed->feed->set_cipher_ops)
3923 sec_feed->feed->set_cipher_ops(sec_feed->feed,
3924 cipher_ops);
3925 }
3926 break;
3927
3928 default:
3929 return -EINVAL;
3930 }
3931
3932 if (!ts_feed && !sec_feed) {
3933 pr_err("%s: pid %d is undefined for this filter\n",
3934 __func__, cipher_ops->pid);
3935 return -EINVAL;
3936 }
3937
3938 return 0;
3939}
3940
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003942 struct dmxdev_filter *dmxdevfilter,
3943 struct dmx_pes_filter_params *params)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944{
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003945 int ret;
3946
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 dvb_dmxdev_filter_stop(dmxdevfilter);
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003948 dvb_dmxdev_filter_reset(dmxdevfilter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949
Mauro Carvalho Chehab31becf02012-10-27 15:30:47 -03003950 if ((unsigned)params->pes_type > DMX_PES_OTHER)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951 return -EINVAL;
3952
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003953 dmxdevfilter->type = DMXDEV_TYPE_PES;
3954 memcpy(&dmxdevfilter->params, params,
3955 sizeof(struct dmx_pes_filter_params));
Francesco Lavra691c9ae2010-02-07 09:49:58 -03003956 INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957
3958 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
3959
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03003960 ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter,
3961 dmxdevfilter->params.pes.pid);
3962 if (ret < 0)
3963 return ret;
3964
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03003965 if (params->flags & DMX_IMMEDIATE_START)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 return dvb_dmxdev_filter_start(dmxdevfilter);
3967
3968 return 0;
3969}
3970
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05303971static int dvb_dmxdev_set_decoder_buffer(struct dmxdev *dmxdev,
3972 struct dmxdev_filter *filter,
3973 struct dmx_decoder_buffers *buffs)
3974{
3975 int i;
3976 struct dmx_decoder_buffers *dec_buffs;
3977 struct dmx_caps caps;
3978
3979 if (!dmxdev || !filter || !buffs)
3980 return -EINVAL;
3981
3982 dec_buffs = &filter->decoder_buffers;
3983 if (!dmxdev->demux->get_caps)
3984 return -EINVAL;
3985
3986 dmxdev->demux->get_caps(dmxdev->demux, &caps);
3987 if (!dvb_dmxdev_verify_buffer_size(buffs->buffers_size,
3988 caps.decoder.max_size, caps.decoder.size_alignment))
3989 return -EINVAL;
3990
3991 if ((buffs->buffers_size == 0) ||
3992 (buffs->is_linear &&
3993 ((buffs->buffers_num <= 1) ||
3994 (buffs->buffers_num > DMX_MAX_DECODER_BUFFER_NUM))))
3995 return -EINVAL;
3996
3997 if (buffs->buffers_num == 0) {
3998 /* Internal mode - linear buffers not supported in this mode */
3999 if (!(caps.decoder.flags & DMX_BUFFER_INTERNAL_SUPPORT) ||
4000 buffs->is_linear)
4001 return -EINVAL;
4002 } else {
4003 /* External buffer(s) mode */
4004 if ((!(caps.decoder.flags & DMX_BUFFER_LINEAR_GROUP_SUPPORT) &&
4005 buffs->buffers_num > 1) ||
4006 !(caps.decoder.flags & DMX_BUFFER_EXTERNAL_SUPPORT) ||
4007 buffs->buffers_num > caps.decoder.max_buffer_num)
4008 return -EINVAL;
4009
4010 dec_buffs->is_linear = buffs->is_linear;
4011 dec_buffs->buffers_num = buffs->buffers_num;
4012 dec_buffs->buffers_size = buffs->buffers_size;
4013 for (i = 0; i < dec_buffs->buffers_num; i++)
4014 dec_buffs->handles[i] = buffs->handles[i];
4015 }
4016
4017 return 0;
4018}
4019
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004021 struct file *file, char __user *buf,
4022 size_t count, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023{
4024 int result, hcount;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004025 int done = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004027 if (dfil->todo <= 0) {
4028 hcount = 3 + dfil->todo;
4029 if (hcount > count)
4030 hcount = count;
Udaya Bhaskara Reddy Mallavarapuab460e72017-11-07 20:38:11 +05304031 if (hcount == 0)
4032 return done;
4033
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304034 result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004035 file->f_flags & O_NONBLOCK,
4036 buf, hcount, ppos);
4037 if (result < 0) {
4038 dfil->todo = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039 return result;
4040 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004041 if (copy_from_user(dfil->secheader - dfil->todo, buf, result))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042 return -EFAULT;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004043 buf += result;
4044 done = result;
4045 count -= result;
4046 dfil->todo -= result;
4047 if (dfil->todo > -3)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048 return done;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004049 dfil->todo = ((dfil->secheader[1] << 8) | dfil->secheader[2]) & 0xfff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004050 if (!count)
4051 return done;
4052 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004053 if (count > dfil->todo)
4054 count = dfil->todo;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304055 result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004056 file->f_flags & O_NONBLOCK,
4057 buf, count, ppos);
4058 if (result < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059 return result;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004060 dfil->todo -= result;
4061 return (result + done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062}
4063
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064static ssize_t
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004065dvb_demux_read(struct file *file, char __user *buf, size_t count,
4066 loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067{
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004068 struct dmxdev_filter *dmxdevfilter = file->private_data;
4069 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070
Ingo Molnar3593cab2006-02-07 06:49:14 -02004071 if (mutex_lock_interruptible(&dmxdevfilter->mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072 return -ERESTARTSYS;
4073
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304074 if (dmxdevfilter->eos_state &&
4075 dvb_ringbuffer_empty(&dmxdevfilter->buffer)) {
4076 mutex_unlock(&dmxdevfilter->mutex);
4077 return 0;
4078 }
4079
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004080 if (dmxdevfilter->type == DMXDEV_TYPE_SEC)
4081 ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082 else
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304083 ret = dvb_dmxdev_buffer_read(dmxdevfilter,
4084 &dmxdevfilter->buffer,
4085 file->f_flags & O_NONBLOCK,
4086 buf, count, ppos);
4087
4088 if (ret > 0) {
4089 dvb_dmxdev_notify_data_read(dmxdevfilter, ret);
4090 spin_lock_irq(&dmxdevfilter->dev->lock);
4091 dvb_dmxdev_update_events(&dmxdevfilter->events, ret);
4092 spin_unlock_irq(&dmxdevfilter->dev->lock);
4093
4094 /*
4095 * in PULL mode, we might be stalling on
4096 * event queue, so need to wake-up waiters
4097 */
4098 if (dmxdevfilter->dev->playback_mode == DMX_PB_MODE_PULL)
4099 wake_up_all(&dmxdevfilter->buffer.queue);
4100 } else if (ret == -EOVERFLOW) {
4101 dvb_dmxdev_auto_flush_buffer(dmxdevfilter,
4102 &dmxdevfilter->buffer);
4103 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104
Ingo Molnar3593cab2006-02-07 06:49:14 -02004105 mutex_unlock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106 return ret;
4107}
4108
Arnd Bergmann16ef8de2010-04-27 00:24:00 +02004109static int dvb_demux_do_ioctl(struct file *file,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110 unsigned int cmd, void *parg)
4111{
Peter Beutner3ec4a302005-07-07 17:57:39 -07004112 struct dmxdev_filter *dmxdevfilter = file->private_data;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004113 struct dmxdev *dmxdev = dmxdevfilter->dev;
4114 unsigned long arg = (unsigned long)parg;
4115 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116
Ingo Molnar3593cab2006-02-07 06:49:14 -02004117 if (mutex_lock_interruptible(&dmxdev->mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118 return -ERESTARTSYS;
4119
4120 switch (cmd) {
4121 case DMX_START:
Ingo Molnar3593cab2006-02-07 06:49:14 -02004122 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4123 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124 return -ERESTARTSYS;
4125 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004126 if (dmxdevfilter->state < DMXDEV_STATE_SET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127 ret = -EINVAL;
4128 else
4129 ret = dvb_dmxdev_filter_start(dmxdevfilter);
Ingo Molnar3593cab2006-02-07 06:49:14 -02004130 mutex_unlock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131 break;
4132
4133 case DMX_STOP:
Ingo Molnar3593cab2006-02-07 06:49:14 -02004134 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4135 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004136 return -ERESTARTSYS;
4137 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004138 ret = dvb_dmxdev_filter_stop(dmxdevfilter);
Ingo Molnar3593cab2006-02-07 06:49:14 -02004139 mutex_unlock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140 break;
4141
4142 case DMX_SET_FILTER:
Ingo Molnar3593cab2006-02-07 06:49:14 -02004143 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4144 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145 return -ERESTARTSYS;
4146 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004147 ret = dvb_dmxdev_filter_set(dmxdev, dmxdevfilter, parg);
Ingo Molnar3593cab2006-02-07 06:49:14 -02004148 mutex_unlock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149 break;
4150
4151 case DMX_SET_PES_FILTER:
Ingo Molnar3593cab2006-02-07 06:49:14 -02004152 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4153 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154 return -ERESTARTSYS;
4155 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004156 ret = dvb_dmxdev_pes_filter_set(dmxdev, dmxdevfilter, parg);
Ingo Molnar3593cab2006-02-07 06:49:14 -02004157 mutex_unlock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158 break;
4159
4160 case DMX_SET_BUFFER_SIZE:
Ingo Molnar3593cab2006-02-07 06:49:14 -02004161 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4162 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 return -ERESTARTSYS;
4164 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004165 ret = dvb_dmxdev_set_buffer_size(dmxdevfilter, arg);
Ingo Molnar3593cab2006-02-07 06:49:14 -02004166 mutex_unlock(&dmxdevfilter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167 break;
4168
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304169 case DMX_SET_BUFFER_MODE:
4170 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4171 mutex_unlock(&dmxdev->mutex);
4172 return -ERESTARTSYS;
4173 }
4174 ret = dvb_dmxdev_set_buffer_mode(dmxdevfilter,
4175 *(enum dmx_buffer_mode *)parg);
4176 mutex_unlock(&dmxdevfilter->mutex);
4177 break;
4178
4179 case DMX_SET_BUFFER:
4180 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4181 mutex_unlock(&dmxdev->mutex);
4182 return -ERESTARTSYS;
4183 }
4184 ret = dvb_dmxdev_set_buffer(dmxdevfilter, parg);
4185 mutex_unlock(&dmxdevfilter->mutex);
4186 break;
4187
4188 case DMX_GET_BUFFER_STATUS:
4189 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4190 mutex_unlock(&dmxdev->mutex);
4191 return -ERESTARTSYS;
4192 }
4193 ret = dvb_dmxdev_get_buffer_status(dmxdevfilter, parg);
4194 mutex_unlock(&dmxdevfilter->mutex);
4195 break;
4196
4197 case DMX_RELEASE_DATA:
4198 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4199 mutex_unlock(&dmxdev->mutex);
4200 return -ERESTARTSYS;
4201 }
4202 ret = dvb_dmxdev_release_data(dmxdevfilter, arg);
4203 mutex_unlock(&dmxdevfilter->mutex);
4204 break;
4205
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206 case DMX_GET_PES_PIDS:
4207 if (!dmxdev->demux->get_pes_pids) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004208 ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 break;
4210 }
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004211 dmxdev->demux->get_pes_pids(dmxdev->demux, parg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 break;
4213
Andreas Oberritterc0510052005-09-09 13:02:21 -07004214 case DMX_GET_CAPS:
4215 if (!dmxdev->demux->get_caps) {
4216 ret = -EINVAL;
4217 break;
4218 }
4219 ret = dmxdev->demux->get_caps(dmxdev->demux, parg);
4220 break;
4221
4222 case DMX_SET_SOURCE:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304223 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4224 mutex_unlock(&dmxdev->mutex);
4225 return -ERESTARTSYS;
4226 }
4227 ret = dvb_dmxdev_set_source(dmxdevfilter, parg);
4228 mutex_unlock(&dmxdevfilter->mutex);
4229 break;
4230
4231 case DMX_SET_TS_PACKET_FORMAT:
4232 if (!dmxdev->demux->set_tsp_format) {
Andreas Oberritterc0510052005-09-09 13:02:21 -07004233 ret = -EINVAL;
4234 break;
4235 }
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304236
4237 if (dmxdevfilter->state >= DMXDEV_STATE_GO) {
4238 ret = -EBUSY;
4239 break;
4240 }
4241 ret = dmxdev->demux->set_tsp_format(
4242 dmxdev->demux,
4243 *(enum dmx_tsp_format_t *)parg);
Andreas Oberritterc0510052005-09-09 13:02:21 -07004244 break;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304245
4246 case DMX_SET_TS_OUT_FORMAT:
4247 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4248 mutex_unlock(&dmxdev->mutex);
4249 return -ERESTARTSYS;
4250 }
4251
4252 ret = dvb_dmxdev_set_tsp_out_format(dmxdevfilter,
4253 *(enum dmx_tsp_format_t *)parg);
4254
4255 mutex_unlock(&dmxdevfilter->mutex);
4256 break;
4257
4258 case DMX_SET_DECODER_BUFFER_SIZE:
4259 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4260 mutex_unlock(&dmxdev->mutex);
4261 return -ERESTARTSYS;
4262 }
4263
4264 ret = dvb_dmxdev_set_decoder_buffer_size(dmxdevfilter, arg);
4265 mutex_unlock(&dmxdevfilter->mutex);
4266 break;
4267
4268 case DMX_SET_PLAYBACK_MODE:
4269 ret = dvb_dmxdev_set_playback_mode(
4270 dmxdevfilter,
4271 *(enum dmx_playback_mode_t *)parg);
4272 break;
4273
4274 case DMX_GET_EVENT:
4275 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4276 mutex_unlock(&dmxdev->mutex);
4277 return -ERESTARTSYS;
4278 }
4279 ret = dvb_dmxdev_get_event(dmxdevfilter, parg);
4280 mutex_unlock(&dmxdevfilter->mutex);
4281 break;
Andreas Oberritterc0510052005-09-09 13:02:21 -07004282
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 case DMX_GET_STC:
4284 if (!dmxdev->demux->get_stc) {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004285 ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286 break;
4287 }
4288 ret = dmxdev->demux->get_stc(dmxdev->demux,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004289 ((struct dmx_stc *)parg)->num,
4290 &((struct dmx_stc *)parg)->stc,
4291 &((struct dmx_stc *)parg)->base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292 break;
4293
Andreas Oberritter1cb662a2009-07-14 20:28:50 -03004294 case DMX_ADD_PID:
4295 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4296 ret = -ERESTARTSYS;
4297 break;
4298 }
4299 ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter, *(u16 *)parg);
4300 mutex_unlock(&dmxdevfilter->mutex);
4301 break;
4302
4303 case DMX_REMOVE_PID:
4304 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4305 ret = -ERESTARTSYS;
4306 break;
4307 }
4308 ret = dvb_dmxdev_remove_pid(dmxdev, dmxdevfilter, *(u16 *)parg);
4309 mutex_unlock(&dmxdevfilter->mutex);
4310 break;
4311
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304312 case DMX_SET_DECODER_BUFFER:
4313 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4314 ret = -ERESTARTSYS;
4315 break;
4316 }
4317 ret = dvb_dmxdev_set_decoder_buffer(dmxdev, dmxdevfilter, parg);
4318 mutex_unlock(&dmxdevfilter->mutex);
4319 break;
4320
4321 case DMX_SET_SECURE_MODE:
4322 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4323 ret = -ERESTARTSYS;
4324 break;
4325 }
4326 ret = dvb_dmxdev_set_secure_mode(dmxdev, dmxdevfilter, parg);
4327 mutex_unlock(&dmxdevfilter->mutex);
4328 break;
4329
4330 case DMX_SET_CIPHER:
4331 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4332 ret = -ERESTARTSYS;
4333 break;
4334 }
4335 ret = dvb_dmxdev_set_cipher(dmxdev, dmxdevfilter, parg);
4336 mutex_unlock(&dmxdevfilter->mutex);
4337 break;
4338
4339 case DMX_REUSE_DECODER_BUFFER:
4340 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4341 mutex_unlock(&dmxdev->mutex);
4342 return -ERESTARTSYS;
4343 }
4344 ret = dvb_dmxdev_reuse_decoder_buf(dmxdevfilter, arg);
4345 mutex_unlock(&dmxdevfilter->mutex);
4346 break;
4347
4348 case DMX_SET_EVENTS_MASK:
4349 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4350 mutex_unlock(&dmxdev->mutex);
4351 return -ERESTARTSYS;
4352 }
4353 ret = dvb_dmxdev_set_event_mask(dmxdevfilter, parg);
4354 mutex_unlock(&dmxdevfilter->mutex);
4355 break;
4356
4357 case DMX_GET_EVENTS_MASK:
4358 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4359 mutex_unlock(&dmxdev->mutex);
4360 return -ERESTARTSYS;
4361 }
4362 ret = dvb_dmxdev_get_event_mask(dmxdevfilter, parg);
4363 mutex_unlock(&dmxdevfilter->mutex);
4364 break;
4365
4366 case DMX_SET_INDEXING_PARAMS:
4367 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4368 mutex_unlock(&dmxdev->mutex);
4369 return -ERESTARTSYS;
4370 }
4371 ret = dvb_dmxdev_set_indexing_params(dmxdevfilter, parg);
4372 mutex_unlock(&dmxdevfilter->mutex);
4373 break;
4374
4375 case DMX_SET_TS_INSERTION:
4376 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4377 mutex_unlock(&dmxdev->mutex);
4378 return -ERESTARTSYS;
4379 }
4380 ret = dvb_dmxdev_set_ts_insertion(dmxdevfilter, parg);
4381 mutex_unlock(&dmxdevfilter->mutex);
4382 break;
4383
4384 case DMX_ABORT_TS_INSERTION:
4385 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4386 mutex_unlock(&dmxdev->mutex);
4387 return -ERESTARTSYS;
4388 }
4389 ret = dvb_dmxdev_abort_ts_insertion(dmxdevfilter, parg);
4390 mutex_unlock(&dmxdevfilter->mutex);
4391 break;
4392
4393 case DMX_GET_SCRAMBLING_BITS:
4394 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4395 mutex_unlock(&dmxdev->mutex);
4396 return -ERESTARTSYS;
4397 }
4398 ret = dvb_dmxdev_get_scrambling_bits(dmxdevfilter, parg);
4399 mutex_unlock(&dmxdevfilter->mutex);
4400 break;
4401
4402 case DMX_FLUSH_BUFFER:
4403 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4404 mutex_unlock(&dmxdev->mutex);
4405 return -ERESTARTSYS;
4406 }
4407 ret = dvb_dmxdev_flush_buffer(dmxdevfilter);
4408 mutex_unlock(&dmxdevfilter->mutex);
4409 break;
4410
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411 default:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304412 pr_err("%s: unknown ioctl code (0x%x)\n",
4413 __func__, cmd);
4414 ret = -ENOIOCTLCMD;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004415 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416 }
Ingo Molnar3593cab2006-02-07 06:49:14 -02004417 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418 return ret;
4419}
4420
Arnd Bergmann16ef8de2010-04-27 00:24:00 +02004421static long dvb_demux_ioctl(struct file *file, unsigned int cmd,
4422 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004423{
Arnd Bergmann72024f12010-09-11 19:56:45 +02004424 return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425}
4426
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304427#ifdef CONFIG_COMPAT
4428
4429struct dmx_set_ts_insertion32 {
4430 __u32 identifier;
4431 __u32 repetition_time;
4432 compat_uptr_t ts_packets;
4433 compat_size_t size;
4434};
4435
4436static long dmx_set_ts_insertion32_wrapper(struct file *file, unsigned int cmd,
4437 unsigned long arg)
4438{
4439 int ret;
4440 struct dmx_set_ts_insertion32 dmx_ts_insert32;
4441 struct dmx_set_ts_insertion dmx_ts_insert;
4442
4443 ret = copy_from_user(&dmx_ts_insert32, (void __user *)arg,
4444 sizeof(dmx_ts_insert32));
4445 if (ret) {
4446 pr_err(
4447 "%s: copy dmx_set_ts_insertion32 from user failed, ret=%d\n",
4448 __func__, ret);
4449 return -EFAULT;
4450 }
4451
4452 memset(&dmx_ts_insert, 0, sizeof(dmx_ts_insert));
4453 dmx_ts_insert.identifier = dmx_ts_insert32.identifier;
4454 dmx_ts_insert.repetition_time = dmx_ts_insert32.repetition_time;
4455 dmx_ts_insert.ts_packets = compat_ptr(dmx_ts_insert32.ts_packets);
4456 dmx_ts_insert.size = dmx_ts_insert32.size;
4457
4458 ret = dvb_demux_do_ioctl(file, DMX_SET_TS_INSERTION, &dmx_ts_insert);
4459
4460 return ret;
4461}
4462
4463#define DMX_SET_TS_INSERTION32 _IOW('o', 70, struct dmx_set_ts_insertion32)
4464
4465/*
4466 * compat ioctl is called whenever compatibility is required, i.e when a 32bit
4467 * process calls an ioctl for a 64bit kernel.
4468 */
4469static long dvb_demux_compat_ioctl(struct file *file, unsigned int cmd,
4470 unsigned long arg)
4471{
4472 long ret = 0;
4473
4474 switch (cmd) {
4475 case DMX_SET_TS_INSERTION32:
4476 ret = dmx_set_ts_insertion32_wrapper(file, cmd, arg);
4477 break;
4478 case DMX_SET_TS_INSERTION:
4479 pr_err("%s: 64bit ioctl code (0x%lx) used by 32bit userspace\n",
4480 __func__, DMX_SET_TS_INSERTION);
4481 ret = -ENOIOCTLCMD;
4482 break;
4483 default:
4484 /* use regular ioctl */
4485 ret = dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
4486 }
4487
4488 return ret;
4489}
4490#endif
4491
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004492static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493{
Peter Beutner3ec4a302005-07-07 17:57:39 -07004494 struct dmxdev_filter *dmxdevfilter = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495 unsigned int mask = 0;
4496
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304497 if (!dmxdevfilter)
4498 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499
4500 poll_wait(file, &dmxdevfilter->buffer.queue, wait);
4501
4502 if (dmxdevfilter->state != DMXDEV_STATE_GO &&
4503 dmxdevfilter->state != DMXDEV_STATE_DONE &&
4504 dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT)
4505 return 0;
4506
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304507 if (dmxdevfilter->buffer.error) {
4508 mask |= (POLLIN | POLLRDNORM | POLLERR);
4509 if (dmxdevfilter->buffer.error == -EOVERFLOW)
4510 mask |= POLLPRI;
4511 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512
Andreas Oberritter34731df2006-03-14 17:31:01 -03004513 if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304514 mask |= (POLLIN | POLLRDNORM);
4515
4516 if (dmxdevfilter->events.wakeup_events_counter >=
4517 dmxdevfilter->events.event_mask.wakeup_threshold)
4518 mask |= POLLPRI;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519
4520 return mask;
4521}
4522
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304523static int dvb_demux_mmap(struct file *filp, struct vm_area_struct *vma)
4524{
4525 struct dmxdev_filter *dmxdevfilter = filp->private_data;
4526 struct dmxdev *dmxdev = dmxdevfilter->dev;
4527 int ret;
4528 int vma_size;
4529 int buffer_size;
4530
4531 vma_size = vma->vm_end - vma->vm_start;
4532
4533 if (vma->vm_flags & VM_WRITE)
4534 return -EINVAL;
4535
4536 if (mutex_lock_interruptible(&dmxdev->mutex))
4537 return -ERESTARTSYS;
4538
4539 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
4540 mutex_unlock(&dmxdev->mutex);
4541 return -ERESTARTSYS;
4542 }
4543
4544 if ((!dmxdevfilter->buffer.data) ||
4545 (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL)) {
4546 mutex_unlock(&dmxdevfilter->mutex);
4547 mutex_unlock(&dmxdev->mutex);
4548 return -EINVAL;
4549 }
4550
4551 /* Make sure requested mapping is not larger than buffer size */
4552 buffer_size = dmxdevfilter->buffer.size + (PAGE_SIZE-1);
4553 buffer_size = buffer_size & ~(PAGE_SIZE-1);
4554
4555 if (vma_size != buffer_size) {
4556 mutex_unlock(&dmxdevfilter->mutex);
4557 mutex_unlock(&dmxdev->mutex);
4558 return -EINVAL;
4559 }
4560
4561 ret = remap_vmalloc_range(vma, dmxdevfilter->buffer.data, 0);
4562 if (ret) {
4563 mutex_unlock(&dmxdevfilter->mutex);
4564 mutex_unlock(&dmxdev->mutex);
4565 return ret;
4566 }
4567
4568 vma->vm_flags |= VM_DONTDUMP;
4569 vma->vm_flags |= VM_DONTEXPAND;
4570
4571 mutex_unlock(&dmxdevfilter->mutex);
4572 mutex_unlock(&dmxdev->mutex);
4573
4574 return 0;
4575}
4576
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577static int dvb_demux_release(struct inode *inode, struct file *file)
4578{
Peter Beutner3ec4a302005-07-07 17:57:39 -07004579 struct dmxdev_filter *dmxdevfilter = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580 struct dmxdev *dmxdev = dmxdevfilter->dev;
Markus Rechberger57861b42007-04-14 10:19:18 -03004581 int ret;
4582
4583 ret = dvb_dmxdev_filter_free(dmxdev, dmxdevfilter);
4584
4585 mutex_lock(&dmxdev->mutex);
4586 dmxdev->dvbdev->users--;
4587 if(dmxdev->dvbdev->users==1 && dmxdev->exit==1) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304588 fops_put(file->f_op);
4589 file->f_op = NULL;
Markus Rechberger57861b42007-04-14 10:19:18 -03004590 mutex_unlock(&dmxdev->mutex);
4591 wake_up(&dmxdev->dvbdev->wait_queue);
4592 } else
4593 mutex_unlock(&dmxdev->mutex);
4594
4595 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596}
4597
Jan Engelhardt784e29d2009-01-11 06:12:43 -03004598static const struct file_operations dvb_demux_fops = {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004599 .owner = THIS_MODULE,
4600 .read = dvb_demux_read,
Arnd Bergmann16ef8de2010-04-27 00:24:00 +02004601 .unlocked_ioctl = dvb_demux_ioctl,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004602 .open = dvb_demux_open,
4603 .release = dvb_demux_release,
4604 .poll = dvb_demux_poll,
Arnd Bergmann6038f372010-08-15 18:52:59 +02004605 .llseek = default_llseek,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304606 .mmap = dvb_demux_mmap,
4607#ifdef CONFIG_COMPAT
4608 .compat_ioctl = dvb_demux_compat_ioctl,
4609#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610};
4611
Mauro Carvalho Chehab8afd52e2015-01-02 22:28:53 -03004612static const struct dvb_device dvbdev_demux = {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004613 .priv = NULL,
4614 .users = 1,
4615 .writers = 1,
Mauro Carvalho Chehab8afd52e2015-01-02 22:28:53 -03004616#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
Mauro Carvalho Chehabe4fd3bc2015-02-18 12:09:27 -03004617 .name = "dvb-demux",
Mauro Carvalho Chehab8afd52e2015-01-02 22:28:53 -03004618#endif
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004619 .fops = &dvb_demux_fops
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620};
4621
Arnd Bergmann16ef8de2010-04-27 00:24:00 +02004622static int dvb_dvr_do_ioctl(struct file *file,
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004623 unsigned int cmd, void *parg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07004625 struct dvb_device *dvbdev = file->private_data;
4626 struct dmxdev *dmxdev = dvbdev->priv;
Andrea Odettia095be42008-04-20 19:14:51 -03004627 unsigned long arg = (unsigned long)parg;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004628 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004629
Ingo Molnar3593cab2006-02-07 06:49:14 -02004630 if (mutex_lock_interruptible(&dmxdev->mutex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004631 return -ERESTARTSYS;
4632
4633 switch (cmd) {
4634 case DMX_SET_BUFFER_SIZE:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304635 ret = dvb_dvr_set_buffer_size(dmxdev, file->f_flags, arg);
4636 break;
4637
4638 case DMX_SET_BUFFER_MODE:
4639 ret = dvb_dvr_set_buffer_mode(dmxdev, file->f_flags,
4640 *(enum dmx_buffer_mode *)parg);
4641 break;
4642
4643 case DMX_SET_BUFFER:
4644 ret = dvb_dvr_set_buffer(dmxdev, file->f_flags, parg);
4645 break;
4646
4647 case DMX_GET_BUFFER_STATUS:
4648 ret = dvb_dvr_get_buffer_status(dmxdev, file->f_flags, parg);
4649 break;
4650
4651 case DMX_RELEASE_DATA:
4652 ret = dvb_dvr_release_data(dmxdev, file->f_flags, arg);
4653 break;
4654
4655 case DMX_FEED_DATA:
4656 ret = dvb_dvr_feed_data(dmxdev, file->f_flags, arg);
4657 break;
4658
4659 case DMX_GET_EVENT:
4660 ret = dvb_dvr_get_event(dmxdev, file->f_flags, parg);
4661 break;
4662
4663 case DMX_PUSH_OOB_COMMAND:
4664 ret = dvb_dvr_push_oob_cmd(dmxdev, file->f_flags, parg);
4665 break;
4666
4667 case DMX_FLUSH_BUFFER:
4668 ret = dvb_dvr_flush_buffer(dmxdev, file->f_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 break;
4670
4671 default:
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304672 ret = -ENOIOCTLCMD;
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004673 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004674 }
Ingo Molnar3593cab2006-02-07 06:49:14 -02004675 mutex_unlock(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676 return ret;
4677}
4678
Arnd Bergmann16ef8de2010-04-27 00:24:00 +02004679static long dvb_dvr_ioctl(struct file *file,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304680 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004681{
Arnd Bergmann72024f12010-09-11 19:56:45 +02004682 return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004683}
4684
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304685#ifdef CONFIG_COMPAT
4686static long dvb_dvr_compat_ioctl(struct file *file, unsigned int cmd,
4687 unsigned long arg)
4688{
4689 return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
4690}
4691#endif
4692
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004693static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694{
Johannes Stezenbach0c53c702005-05-16 21:54:24 -07004695 struct dvb_device *dvbdev = file->private_data;
4696 struct dmxdev *dmxdev = dvbdev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697 unsigned int mask = 0;
4698
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304699 pr_debug("function : %s\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004701 if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304702 poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
4703
4704 if (dmxdev->dvr_buffer.error) {
4705 mask |= (POLLIN | POLLRDNORM | POLLERR);
4706 if (dmxdev->dvr_buffer.error == -EOVERFLOW)
4707 mask |= POLLPRI;
4708 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709
Andreas Oberritter34731df2006-03-14 17:31:01 -03004710 if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304711 mask |= (POLLIN | POLLRDNORM);
4712
4713 if (dmxdev->dvr_output_events.wakeup_events_counter >=
4714 dmxdev->dvr_output_events.event_mask.wakeup_threshold)
4715 mask |= POLLPRI;
4716 } else {
4717 poll_wait(file, &dmxdev->dvr_input_buffer.queue, wait);
4718 if (dmxdev->dvr_input_buffer.error)
4719 mask |= (POLLOUT | POLLRDNORM | POLLPRI | POLLERR);
4720
4721 if (dvb_ringbuffer_free(&dmxdev->dvr_input_buffer))
4722 mask |= (POLLOUT | POLLRDNORM | POLLPRI);
4723 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004724
4725 return mask;
4726}
4727
Alexey Dobriyan828c0952009-10-01 15:43:56 -07004728static const struct file_operations dvb_dvr_fops = {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004729 .owner = THIS_MODULE,
4730 .read = dvb_dvr_read,
4731 .write = dvb_dvr_write,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304732 .mmap = dvb_dvr_mmap,
Arnd Bergmann16ef8de2010-04-27 00:24:00 +02004733 .unlocked_ioctl = dvb_dvr_ioctl,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304734#ifdef CONFIG_COMPAT
4735 .compat_ioctl = dvb_dvr_compat_ioctl,
4736#endif
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004737 .open = dvb_dvr_open,
4738 .release = dvb_dvr_release,
4739 .poll = dvb_dvr_poll,
Arnd Bergmann6038f372010-08-15 18:52:59 +02004740 .llseek = default_llseek,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741};
4742
Mauro Carvalho Chehab8afd52e2015-01-02 22:28:53 -03004743static const struct dvb_device dvbdev_dvr = {
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004744 .priv = NULL,
Trent Piepho5e85bd02006-03-30 15:53:32 -03004745 .readers = 1,
Markus Rechberger57861b42007-04-14 10:19:18 -03004746 .users = 1,
Mauro Carvalho Chehab8afd52e2015-01-02 22:28:53 -03004747#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
Mauro Carvalho Chehabe4fd3bc2015-02-18 12:09:27 -03004748 .name = "dvb-dvr",
Mauro Carvalho Chehab8afd52e2015-01-02 22:28:53 -03004749#endif
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004750 .fops = &dvb_dvr_fops
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751};
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304752
4753
4754/**
4755 * debugfs service to print active filters information.
4756 */
4757static int dvb_dmxdev_dbgfs_print(struct seq_file *s, void *p)
4758{
4759 int i;
4760 struct dmxdev *dmxdev = s->private;
4761 struct dmxdev_filter *filter;
4762 int active_count = 0;
4763 struct dmx_buffer_status buffer_status;
4764 struct dmx_scrambling_bits scrambling_bits;
4765 static const char * const pes_feeds[] = {"DEC", "PES", "DVR", "REC"};
4766 int ret;
4767
4768 if (!dmxdev)
4769 return 0;
4770
4771 for (i = 0; i < dmxdev->filternum; i++) {
4772 filter = &dmxdev->filter[i];
4773 if (filter->state >= DMXDEV_STATE_GO) {
4774 active_count++;
4775
4776 seq_printf(s, "filter_%02d - ", i);
4777
4778 if (filter->type == DMXDEV_TYPE_SEC) {
4779 seq_puts(s, "type: SEC, ");
4780 seq_printf(s, "PID %04d ",
4781 filter->params.sec.pid);
4782 scrambling_bits.pid = filter->params.sec.pid;
4783 } else {
4784 seq_printf(s, "type: %s, ",
4785 pes_feeds[filter->params.pes.output]);
4786 seq_printf(s, "PID: %04d ",
4787 filter->params.pes.pid);
4788 scrambling_bits.pid = filter->params.pes.pid;
4789 }
4790
4791 dvb_dmxdev_get_scrambling_bits(filter,
4792 &scrambling_bits);
4793
4794 if (filter->type == DMXDEV_TYPE_PES &&
4795 filter->params.pes.output == DMX_OUT_TS_TAP)
4796 ret = dvb_dvr_get_buffer_status(dmxdev,
4797 O_RDONLY, &buffer_status);
4798 else
4799 ret = dvb_dmxdev_get_buffer_status(filter,
4800 &buffer_status);
4801 if (!ret) {
4802 seq_printf(s, "size: %08d, ",
4803 buffer_status.size);
4804 seq_printf(s, "fullness: %08d, ",
4805 buffer_status.fullness);
4806 seq_printf(s, "error: %d, ",
4807 buffer_status.error);
4808 }
4809
4810 seq_printf(s, "scramble: %d, ",
4811 scrambling_bits.value);
4812 seq_printf(s, "secured: %d\n",
4813 filter->sec_mode.is_secured);
4814 }
4815 }
4816
4817 if (!active_count)
4818 seq_puts(s, "No active filters\n");
4819
4820 return 0;
4821}
4822
4823static int dvb_dmxdev_dbgfs_open(struct inode *inode, struct file *file)
4824{
4825 return single_open(file, dvb_dmxdev_dbgfs_print, inode->i_private);
4826}
4827
4828static const struct file_operations dbgfs_filters_fops = {
4829 .open = dvb_dmxdev_dbgfs_open,
4830 .read = seq_read,
4831 .llseek = seq_lseek,
4832 .release = single_release,
4833 .owner = THIS_MODULE,
4834};
4835
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004836int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004837{
4838 int i;
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304839 struct dmx_caps caps;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004840
4841 if (dmxdev->demux->open(dmxdev->demux) < 0)
4842 return -EUSERS;
4843
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004844 dmxdev->filter = vmalloc(dmxdev->filternum * sizeof(struct dmxdev_filter));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004845 if (!dmxdev->filter)
4846 return -ENOMEM;
4847
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304848 dmxdev->playback_mode = DMX_PB_MODE_PUSH;
4849 dmxdev->demux->dvr_input_protected = 0;
4850
Ingo Molnar3593cab2006-02-07 06:49:14 -02004851 mutex_init(&dmxdev->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004852 spin_lock_init(&dmxdev->lock);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304853 spin_lock_init(&dmxdev->dvr_in_lock);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004854 for (i = 0; i < dmxdev->filternum; i++) {
4855 dmxdev->filter[i].dev = dmxdev;
4856 dmxdev->filter[i].buffer.data = NULL;
4857 dvb_dmxdev_filter_state_set(&dmxdev->filter[i],
4858 DMXDEV_STATE_FREE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859 }
4860
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004861 dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304862 DVB_DEVICE_DEMUX, 0);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004863 dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr,
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304864 dmxdev, DVB_DEVICE_DVR, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865
Andreas Oberritter34731df2006-03-14 17:31:01 -03004866 dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192);
Udaya Bhaskara Reddy Mallavarapuba29a3e82017-06-12 14:57:05 +05304867 dvb_ringbuffer_init(&dmxdev->dvr_input_buffer, NULL, 8192);
4868
4869 /* Disable auto buffer flushing if plugin does not allow it */
4870 if (dmxdev->demux->get_caps) {
4871 dmxdev->demux->get_caps(dmxdev->demux, &caps);
4872 if (!(caps.caps & DMX_CAP_AUTO_BUFFER_FLUSH))
4873 overflow_auto_flush = 0;
4874 }
4875
4876 if (dmxdev->demux->debugfs_demux_dir)
4877 debugfs_create_file("filters", 0444,
4878 dmxdev->demux->debugfs_demux_dir, dmxdev,
4879 &dbgfs_filters_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004880
4881 return 0;
4882}
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004883
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884EXPORT_SYMBOL(dvb_dmxdev_init);
4885
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004886void dvb_dmxdev_release(struct dmxdev *dmxdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004887{
Markus Rechberger57861b42007-04-14 10:19:18 -03004888 dmxdev->exit=1;
4889 if (dmxdev->dvbdev->users > 1) {
4890 wait_event(dmxdev->dvbdev->wait_queue,
4891 dmxdev->dvbdev->users==1);
4892 }
4893 if (dmxdev->dvr_dvbdev->users > 1) {
4894 wait_event(dmxdev->dvr_dvbdev->wait_queue,
4895 dmxdev->dvr_dvbdev->users==1);
4896 }
4897
Linus Torvalds1da177e2005-04-16 15:20:36 -07004898 dvb_unregister_device(dmxdev->dvbdev);
4899 dvb_unregister_device(dmxdev->dvr_dvbdev);
4900
4901 vfree(dmxdev->filter);
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004902 dmxdev->filter = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004903 dmxdev->demux->close(dmxdev->demux);
4904}
Andreas Oberritterf705e6e2006-03-10 15:22:31 -03004905
Linus Torvalds1da177e2005-04-16 15:20:36 -07004906EXPORT_SYMBOL(dvb_dmxdev_release);