blob: 9c0d380c96f260573b39458b6d8631052e764a71 [file] [log] [blame]
Eric Anholtc8b75bc2015-03-02 13:01:12 -08001/*
2 * Copyright (C) 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
Eric Anholtcdec4d32017-04-12 12:12:02 -07009#include <linux/reservation.h>
Masahiro Yamadab7e8e252017-05-18 13:29:38 +090010#include <drm/drmP.h>
Laurent Pinchart93382032016-11-28 20:51:09 +020011#include <drm/drm_encoder.h>
Masahiro Yamadab7e8e252017-05-18 13:29:38 +090012#include <drm/drm_gem_cma_helper.h>
Laurent Pinchart93382032016-11-28 20:51:09 +020013
Eric Anholtf3099462017-07-25 11:27:17 -070014/* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
15 * this.
16 */
17enum vc4_kernel_bo_type {
18 /* Any kernel allocation (gem_create_object hook) before it
19 * gets another type set.
20 */
21 VC4_BO_TYPE_KERNEL,
22 VC4_BO_TYPE_V3D,
23 VC4_BO_TYPE_V3D_SHADER,
24 VC4_BO_TYPE_DUMB,
25 VC4_BO_TYPE_BIN,
26 VC4_BO_TYPE_RCL,
27 VC4_BO_TYPE_BCL,
28 VC4_BO_TYPE_KERNEL_CACHE,
29 VC4_BO_TYPE_COUNT
30};
31
Eric Anholtc8b75bc2015-03-02 13:01:12 -080032struct vc4_dev {
33 struct drm_device *dev;
34
35 struct vc4_hdmi *hdmi;
36 struct vc4_hvs *hvs;
Eric Anholtd3f51682015-03-02 13:01:12 -080037 struct vc4_v3d *v3d;
Eric Anholt08302c32016-02-10 11:42:32 -080038 struct vc4_dpi *dpi;
Eric Anholt4078f572017-01-31 11:29:11 -080039 struct vc4_dsi *dsi1;
Boris Brezillone4b81f82016-12-02 14:48:10 +010040 struct vc4_vec *vec;
Derek Foreman48666d52015-07-02 11:19:54 -050041
42 struct drm_fbdev_cma *fbdev;
Eric Anholtc826a6e2015-10-09 20:25:07 -070043
Eric Anholt21461362015-10-30 10:09:02 -070044 struct vc4_hang_state *hang_state;
45
Eric Anholtc826a6e2015-10-09 20:25:07 -070046 /* The kernel-space BO cache. Tracks buffers that have been
47 * unreferenced by all other users (refcounts of 0!) but not
48 * yet freed, so we can do cheap allocations.
49 */
50 struct vc4_bo_cache {
51 /* Array of list heads for entries in the BO cache,
52 * based on number of pages, so we can do O(1) lookups
53 * in the cache when allocating.
54 */
55 struct list_head *size_list;
56 uint32_t size_list_size;
57
58 /* List of all BOs in the cache, ordered by age, so we
59 * can do O(1) lookups when trying to free old
60 * buffers.
61 */
62 struct list_head time_list;
63 struct work_struct time_work;
64 struct timer_list time_timer;
65 } bo_cache;
66
Eric Anholtf3099462017-07-25 11:27:17 -070067 u32 num_labels;
68 struct vc4_label {
69 const char *name;
Eric Anholtc826a6e2015-10-09 20:25:07 -070070 u32 num_allocated;
71 u32 size_allocated;
Eric Anholtf3099462017-07-25 11:27:17 -070072 } *bo_labels;
Eric Anholtc826a6e2015-10-09 20:25:07 -070073
Eric Anholtf3099462017-07-25 11:27:17 -070074 /* Protects bo_cache and bo_labels. */
Eric Anholtc826a6e2015-10-09 20:25:07 -070075 struct mutex bo_lock;
Eric Anholtd5b1a782015-11-30 12:13:37 -080076
Boris Brezillonb9f19252017-10-19 14:57:48 +020077 /* Purgeable BO pool. All BOs in this pool can have their memory
78 * reclaimed if the driver is unable to allocate new BOs. We also
79 * keep stats related to the purge mechanism here.
80 */
81 struct {
82 struct list_head list;
83 unsigned int num;
84 size_t size;
85 unsigned int purged_num;
86 size_t purged_size;
87 struct mutex lock;
88 } purgeable;
89
Eric Anholtcdec4d32017-04-12 12:12:02 -070090 uint64_t dma_fence_context;
91
Varad Gautamca26d282016-02-17 19:08:21 +053092 /* Sequence number for the last job queued in bin_job_list.
Eric Anholtd5b1a782015-11-30 12:13:37 -080093 * Starts at 0 (no jobs emitted).
94 */
95 uint64_t emit_seqno;
96
97 /* Sequence number for the last completed job on the GPU.
98 * Starts at 0 (no jobs completed).
99 */
100 uint64_t finished_seqno;
101
Varad Gautamca26d282016-02-17 19:08:21 +0530102 /* List of all struct vc4_exec_info for jobs to be executed in
103 * the binner. The first job in the list is the one currently
104 * programmed into ct0ca for execution.
Eric Anholtd5b1a782015-11-30 12:13:37 -0800105 */
Varad Gautamca26d282016-02-17 19:08:21 +0530106 struct list_head bin_job_list;
107
108 /* List of all struct vc4_exec_info for jobs that have
109 * completed binning and are ready for rendering. The first
110 * job in the list is the one currently programmed into ct1ca
111 * for execution.
112 */
113 struct list_head render_job_list;
114
Eric Anholtd5b1a782015-11-30 12:13:37 -0800115 /* List of the finished vc4_exec_infos waiting to be freed by
116 * job_done_work.
117 */
118 struct list_head job_done_list;
119 /* Spinlock used to synchronize the job_list and seqno
120 * accesses between the IRQ handler and GEM ioctls.
121 */
122 spinlock_t job_lock;
123 wait_queue_head_t job_wait_queue;
124 struct work_struct job_done_work;
125
Eric Anholtb501bac2015-11-30 12:34:01 -0800126 /* List of struct vc4_seqno_cb for callbacks to be made from a
127 * workqueue when the given seqno is passed.
128 */
129 struct list_head seqno_cb_list;
130
Eric Anholt553c9422017-03-27 16:10:25 -0700131 /* The memory used for storing binner tile alloc, tile state,
132 * and overflow memory allocations. This is freed when V3D
133 * powers down.
Eric Anholtd5b1a782015-11-30 12:13:37 -0800134 */
Eric Anholt553c9422017-03-27 16:10:25 -0700135 struct vc4_bo *bin_bo;
136
137 /* Size of blocks allocated within bin_bo. */
138 uint32_t bin_alloc_size;
139
140 /* Bitmask of the bin_alloc_size chunks in bin_bo that are
141 * used.
142 */
143 uint32_t bin_alloc_used;
144
145 /* Bitmask of the current bin_alloc used for overflow memory. */
146 uint32_t bin_alloc_overflow;
147
Eric Anholtd5b1a782015-11-30 12:13:37 -0800148 struct work_struct overflow_mem_work;
149
Eric Anholt36cb6252016-02-08 12:59:02 -0800150 int power_refcount;
151
152 /* Mutex controlling the power refcount. */
153 struct mutex power_lock;
154
Eric Anholtd5b1a782015-11-30 12:13:37 -0800155 struct {
Eric Anholtd5b1a782015-11-30 12:13:37 -0800156 struct timer_list timer;
157 struct work_struct reset_work;
158 } hangcheck;
159
160 struct semaphore async_modeset;
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800161};
162
163static inline struct vc4_dev *
164to_vc4_dev(struct drm_device *dev)
165{
166 return (struct vc4_dev *)dev->dev_private;
167}
168
169struct vc4_bo {
170 struct drm_gem_cma_object base;
Eric Anholtc826a6e2015-10-09 20:25:07 -0700171
Eric Anholt7edabee2016-09-27 09:03:13 -0700172 /* seqno of the last job to render using this BO. */
Eric Anholtd5b1a782015-11-30 12:13:37 -0800173 uint64_t seqno;
174
Eric Anholt7edabee2016-09-27 09:03:13 -0700175 /* seqno of the last job to use the RCL to write to this BO.
176 *
177 * Note that this doesn't include binner overflow memory
178 * writes.
179 */
180 uint64_t write_seqno;
181
Eric Anholt83753112017-06-07 17:13:36 -0700182 bool t_format;
183
Eric Anholtc826a6e2015-10-09 20:25:07 -0700184 /* List entry for the BO's position in either
185 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
186 */
187 struct list_head unref_head;
188
189 /* Time in jiffies when the BO was put in vc4->bo_cache. */
190 unsigned long free_time;
191
192 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
193 struct list_head size_head;
Eric Anholt463873d2015-11-30 11:41:40 -0800194
195 /* Struct for shader validation state, if created by
196 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
197 */
198 struct vc4_validated_shader_info *validated_shader;
Eric Anholtcdec4d32017-04-12 12:12:02 -0700199
200 /* normally (resv == &_resv) except for imported bo's */
201 struct reservation_object *resv;
202 struct reservation_object _resv;
Eric Anholtf3099462017-07-25 11:27:17 -0700203
204 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
205 * for user-allocated labels.
206 */
207 int label;
Boris Brezillonb9f19252017-10-19 14:57:48 +0200208
209 /* Count the number of active users. This is needed to determine
210 * whether we can move the BO to the purgeable list or not (when the BO
211 * is used by the GPU or the display engine we can't purge it).
212 */
213 refcount_t usecnt;
214
215 /* Store purgeable/purged state here */
216 u32 madv;
217 struct mutex madv_lock;
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800218};
219
220static inline struct vc4_bo *
221to_vc4_bo(struct drm_gem_object *bo)
222{
223 return (struct vc4_bo *)bo;
224}
225
Eric Anholtcdec4d32017-04-12 12:12:02 -0700226struct vc4_fence {
227 struct dma_fence base;
228 struct drm_device *dev;
229 /* vc4 seqno for signaled() test */
230 uint64_t seqno;
231};
232
233static inline struct vc4_fence *
234to_vc4_fence(struct dma_fence *fence)
235{
236 return (struct vc4_fence *)fence;
237}
238
Eric Anholtb501bac2015-11-30 12:34:01 -0800239struct vc4_seqno_cb {
240 struct work_struct work;
241 uint64_t seqno;
242 void (*func)(struct vc4_seqno_cb *cb);
243};
244
Eric Anholtd3f51682015-03-02 13:01:12 -0800245struct vc4_v3d {
Eric Anholt001bdb52016-02-05 17:41:49 -0800246 struct vc4_dev *vc4;
Eric Anholtd3f51682015-03-02 13:01:12 -0800247 struct platform_device *pdev;
248 void __iomem *regs;
Eric Anholtb72a2812017-04-28 15:42:21 -0700249 struct clk *clk;
Eric Anholtd3f51682015-03-02 13:01:12 -0800250};
251
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800252struct vc4_hvs {
253 struct platform_device *pdev;
254 void __iomem *regs;
Eric Anholtd8dbf442015-12-28 13:25:41 -0800255 u32 __iomem *dlist;
256
257 /* Memory manager for CRTCs to allocate space in the display
258 * list. Units are dwords.
259 */
260 struct drm_mm dlist_mm;
Eric Anholt21af94c2015-10-20 16:06:57 +0100261 /* Memory manager for the LBM memory used by HVS scaling. */
262 struct drm_mm lbm_mm;
Eric Anholtd8dbf442015-12-28 13:25:41 -0800263 spinlock_t mm_lock;
Eric Anholt21af94c2015-10-20 16:06:57 +0100264
265 struct drm_mm_node mitchell_netravali_filter;
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800266};
267
268struct vc4_plane {
269 struct drm_plane base;
270};
271
272static inline struct vc4_plane *
273to_vc4_plane(struct drm_plane *plane)
274{
275 return (struct vc4_plane *)plane;
276}
277
278enum vc4_encoder_type {
Boris Brezillonab8df602016-12-02 14:48:07 +0100279 VC4_ENCODER_TYPE_NONE,
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800280 VC4_ENCODER_TYPE_HDMI,
281 VC4_ENCODER_TYPE_VEC,
282 VC4_ENCODER_TYPE_DSI0,
283 VC4_ENCODER_TYPE_DSI1,
284 VC4_ENCODER_TYPE_SMI,
285 VC4_ENCODER_TYPE_DPI,
286};
287
288struct vc4_encoder {
289 struct drm_encoder base;
290 enum vc4_encoder_type type;
291 u32 clock_select;
292};
293
294static inline struct vc4_encoder *
295to_vc4_encoder(struct drm_encoder *encoder)
296{
297 return container_of(encoder, struct vc4_encoder, base);
298}
299
Eric Anholtd3f51682015-03-02 13:01:12 -0800300#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
301#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800302#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
303#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
304
Eric Anholtd5b1a782015-11-30 12:13:37 -0800305struct vc4_exec_info {
306 /* Sequence number for this bin/render job. */
307 uint64_t seqno;
308
Eric Anholt7edabee2016-09-27 09:03:13 -0700309 /* Latest write_seqno of any BO that binning depends on. */
310 uint64_t bin_dep_seqno;
311
Eric Anholtcdec4d32017-04-12 12:12:02 -0700312 struct dma_fence *fence;
313
Eric Anholtc4ce60d2016-02-08 11:19:14 -0800314 /* Last current addresses the hardware was processing when the
315 * hangcheck timer checked on us.
316 */
317 uint32_t last_ct0ca, last_ct1ca;
318
Eric Anholtd5b1a782015-11-30 12:13:37 -0800319 /* Kernel-space copy of the ioctl arguments */
320 struct drm_vc4_submit_cl *args;
321
322 /* This is the array of BOs that were looked up at the start of exec.
323 * Command validation will use indices into this array.
324 */
325 struct drm_gem_cma_object **bo;
326 uint32_t bo_count;
327
Eric Anholt7edabee2016-09-27 09:03:13 -0700328 /* List of BOs that are being written by the RCL. Other than
329 * the binner temporary storage, this is all the BOs written
330 * by the job.
331 */
332 struct drm_gem_cma_object *rcl_write_bo[4];
333 uint32_t rcl_write_bo_count;
334
Eric Anholtd5b1a782015-11-30 12:13:37 -0800335 /* Pointers for our position in vc4->job_list */
336 struct list_head head;
337
338 /* List of other BOs used in the job that need to be released
339 * once the job is complete.
340 */
341 struct list_head unref_list;
342
343 /* Current unvalidated indices into @bo loaded by the non-hardware
344 * VC4_PACKET_GEM_HANDLES.
345 */
346 uint32_t bo_index[2];
347
348 /* This is the BO where we store the validated command lists, shader
349 * records, and uniforms.
350 */
351 struct drm_gem_cma_object *exec_bo;
352
353 /**
354 * This tracks the per-shader-record state (packet 64) that
355 * determines the length of the shader record and the offset
356 * it's expected to be found at. It gets read in from the
357 * command lists.
358 */
359 struct vc4_shader_state {
360 uint32_t addr;
361 /* Maximum vertex index referenced by any primitive using this
362 * shader state.
363 */
364 uint32_t max_index;
365 } *shader_state;
366
367 /** How many shader states the user declared they were using. */
368 uint32_t shader_state_size;
369 /** How many shader state records the validator has seen. */
370 uint32_t shader_state_count;
371
372 bool found_tile_binning_mode_config_packet;
373 bool found_start_tile_binning_packet;
374 bool found_increment_semaphore_packet;
375 bool found_flush;
376 uint8_t bin_tiles_x, bin_tiles_y;
Eric Anholt553c9422017-03-27 16:10:25 -0700377 /* Physical address of the start of the tile alloc array
378 * (where each tile's binned CL will start)
379 */
Eric Anholtd5b1a782015-11-30 12:13:37 -0800380 uint32_t tile_alloc_offset;
Eric Anholt553c9422017-03-27 16:10:25 -0700381 /* Bitmask of which binner slots are freed when this job completes. */
382 uint32_t bin_slots;
Eric Anholtd5b1a782015-11-30 12:13:37 -0800383
384 /**
385 * Computed addresses pointing into exec_bo where we start the
386 * bin thread (ct0) and render thread (ct1).
387 */
388 uint32_t ct0ca, ct0ea;
389 uint32_t ct1ca, ct1ea;
390
391 /* Pointer to the unvalidated bin CL (if present). */
392 void *bin_u;
393
394 /* Pointers to the shader recs. These paddr gets incremented as CL
395 * packets are relocated in validate_gl_shader_state, and the vaddrs
396 * (u and v) get incremented and size decremented as the shader recs
397 * themselves are validated.
398 */
399 void *shader_rec_u;
400 void *shader_rec_v;
401 uint32_t shader_rec_p;
402 uint32_t shader_rec_size;
403
404 /* Pointers to the uniform data. These pointers are incremented, and
405 * size decremented, as each batch of uniforms is uploaded.
406 */
407 void *uniforms_u;
408 void *uniforms_v;
409 uint32_t uniforms_p;
410 uint32_t uniforms_size;
411};
412
413static inline struct vc4_exec_info *
Varad Gautamca26d282016-02-17 19:08:21 +0530414vc4_first_bin_job(struct vc4_dev *vc4)
Eric Anholtd5b1a782015-11-30 12:13:37 -0800415{
Masahiro Yamada57b9f562016-09-13 03:35:20 +0900416 return list_first_entry_or_null(&vc4->bin_job_list,
417 struct vc4_exec_info, head);
Varad Gautamca26d282016-02-17 19:08:21 +0530418}
419
420static inline struct vc4_exec_info *
421vc4_first_render_job(struct vc4_dev *vc4)
422{
Masahiro Yamada57b9f562016-09-13 03:35:20 +0900423 return list_first_entry_or_null(&vc4->render_job_list,
424 struct vc4_exec_info, head);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800425}
426
Eric Anholt9326e6f2016-07-26 13:47:14 -0700427static inline struct vc4_exec_info *
428vc4_last_render_job(struct vc4_dev *vc4)
429{
430 if (list_empty(&vc4->render_job_list))
431 return NULL;
432 return list_last_entry(&vc4->render_job_list,
433 struct vc4_exec_info, head);
434}
435
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800436/**
Eric Anholt463873d2015-11-30 11:41:40 -0800437 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
438 * setup parameters.
439 *
440 * This will be used at draw time to relocate the reference to the texture
441 * contents in p0, and validate that the offset combined with
442 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
443 * Note that the hardware treats unprovided config parameters as 0, so not all
444 * of them need to be set up for every texure sample, and we'll store ~0 as
445 * the offset to mark the unused ones.
446 *
447 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
448 * Setup") for definitions of the texture parameters.
449 */
450struct vc4_texture_sample_info {
451 bool is_direct;
452 uint32_t p_offset[4];
453};
454
455/**
456 * struct vc4_validated_shader_info - information about validated shaders that
457 * needs to be used from command list validation.
458 *
459 * For a given shader, each time a shader state record references it, we need
460 * to verify that the shader doesn't read more uniforms than the shader state
461 * record's uniform BO pointer can provide, and we need to apply relocations
462 * and validate the shader state record's uniforms that define the texture
463 * samples.
464 */
465struct vc4_validated_shader_info {
466 uint32_t uniforms_size;
467 uint32_t uniforms_src_size;
468 uint32_t num_texture_samples;
469 struct vc4_texture_sample_info *texture_samples;
Eric Anholt6d45c812016-07-02 12:17:10 -0700470
471 uint32_t num_uniform_addr_offsets;
472 uint32_t *uniform_addr_offsets;
Jonas Pfeilc778cc52016-11-08 00:18:39 +0100473
474 bool is_threaded;
Eric Anholt463873d2015-11-30 11:41:40 -0800475};
476
477/**
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800478 * _wait_for - magic (register) wait macro
479 *
480 * Does the right thing for modeset paths when run under kdgb or similar atomic
481 * contexts. Note that it's important that we check the condition again after
482 * having timed out, since the timeout could be due to preemption or similar and
483 * we've never had a chance to check the condition before the timeout.
484 */
485#define _wait_for(COND, MS, W) ({ \
486 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
487 int ret__ = 0; \
488 while (!(COND)) { \
489 if (time_after(jiffies, timeout__)) { \
490 if (!(COND)) \
491 ret__ = -ETIMEDOUT; \
492 break; \
493 } \
494 if (W && drm_can_sleep()) { \
495 msleep(W); \
496 } else { \
497 cpu_relax(); \
498 } \
499 } \
500 ret__; \
501})
502
503#define wait_for(COND, MS) _wait_for(COND, MS, 1)
504
505/* vc4_bo.c */
Eric Anholtc826a6e2015-10-09 20:25:07 -0700506struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800507void vc4_free_object(struct drm_gem_object *gem_obj);
Eric Anholtc826a6e2015-10-09 20:25:07 -0700508struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
Eric Anholtf3099462017-07-25 11:27:17 -0700509 bool from_cache, enum vc4_kernel_bo_type type);
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800510int vc4_dumb_create(struct drm_file *file_priv,
511 struct drm_device *dev,
512 struct drm_mode_create_dumb *args);
513struct dma_buf *vc4_prime_export(struct drm_device *dev,
514 struct drm_gem_object *obj, int flags);
Eric Anholtd5bc60f2015-01-18 09:33:17 +1300515int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
516 struct drm_file *file_priv);
Eric Anholt463873d2015-11-30 11:41:40 -0800517int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
518 struct drm_file *file_priv);
Eric Anholtd5bc60f2015-01-18 09:33:17 +1300519int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
520 struct drm_file *file_priv);
Eric Anholt83753112017-06-07 17:13:36 -0700521int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
522 struct drm_file *file_priv);
523int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
524 struct drm_file *file_priv);
Eric Anholt21461362015-10-30 10:09:02 -0700525int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
526 struct drm_file *file_priv);
Eric Anholtf3099462017-07-25 11:27:17 -0700527int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
528 struct drm_file *file_priv);
Boris Brezillonb9f19252017-10-19 14:57:48 +0200529int vc4_fault(struct vm_fault *vmf);
Eric Anholt463873d2015-11-30 11:41:40 -0800530int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
Eric Anholtcdec4d32017-04-12 12:12:02 -0700531struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
Eric Anholt463873d2015-11-30 11:41:40 -0800532int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
Eric Anholtcdec4d32017-04-12 12:12:02 -0700533struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
534 struct dma_buf_attachment *attach,
535 struct sg_table *sgt);
Eric Anholt463873d2015-11-30 11:41:40 -0800536void *vc4_prime_vmap(struct drm_gem_object *obj);
Eric Anholtf3099462017-07-25 11:27:17 -0700537int vc4_bo_cache_init(struct drm_device *dev);
Eric Anholtc826a6e2015-10-09 20:25:07 -0700538void vc4_bo_cache_destroy(struct drm_device *dev);
539int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
Boris Brezillonb9f19252017-10-19 14:57:48 +0200540int vc4_bo_inc_usecnt(struct vc4_bo *bo);
541void vc4_bo_dec_usecnt(struct vc4_bo *bo);
542void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
543void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800544
545/* vc4_crtc.c */
546extern struct platform_driver vc4_crtc_driver;
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800547int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
Daniel Vetter1bf6ad62017-05-09 16:03:28 +0200548bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
549 bool in_vblank_irq, int *vpos, int *hpos,
550 ktime_t *stime, ktime_t *etime,
551 const struct drm_display_mode *mode);
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800552
553/* vc4_debugfs.c */
554int vc4_debugfs_init(struct drm_minor *minor);
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800555
556/* vc4_drv.c */
557void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
558
Eric Anholt08302c32016-02-10 11:42:32 -0800559/* vc4_dpi.c */
560extern struct platform_driver vc4_dpi_driver;
561int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
562
Eric Anholt4078f572017-01-31 11:29:11 -0800563/* vc4_dsi.c */
564extern struct platform_driver vc4_dsi_driver;
565int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
566
Eric Anholtcdec4d32017-04-12 12:12:02 -0700567/* vc4_fence.c */
568extern const struct dma_fence_ops vc4_fence_ops;
569
Eric Anholtd5b1a782015-11-30 12:13:37 -0800570/* vc4_gem.c */
571void vc4_gem_init(struct drm_device *dev);
572void vc4_gem_destroy(struct drm_device *dev);
573int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
574 struct drm_file *file_priv);
575int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
576 struct drm_file *file_priv);
577int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
578 struct drm_file *file_priv);
Varad Gautamca26d282016-02-17 19:08:21 +0530579void vc4_submit_next_bin_job(struct drm_device *dev);
580void vc4_submit_next_render_job(struct drm_device *dev);
581void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800582int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
583 uint64_t timeout_ns, bool interruptible);
584void vc4_job_handle_completed(struct vc4_dev *vc4);
Eric Anholtb501bac2015-11-30 12:34:01 -0800585int vc4_queue_seqno_cb(struct drm_device *dev,
586 struct vc4_seqno_cb *cb, uint64_t seqno,
587 void (*func)(struct vc4_seqno_cb *cb));
Boris Brezillonb9f19252017-10-19 14:57:48 +0200588int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
589 struct drm_file *file_priv);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800590
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800591/* vc4_hdmi.c */
592extern struct platform_driver vc4_hdmi_driver;
593int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
594
Boris Brezillon9a8d5e42017-05-23 16:36:27 +0200595/* vc4_vec.c */
Boris Brezillone4b81f82016-12-02 14:48:10 +0100596extern struct platform_driver vc4_vec_driver;
597int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
598
Eric Anholtd5b1a782015-11-30 12:13:37 -0800599/* vc4_irq.c */
600irqreturn_t vc4_irq(int irq, void *arg);
601void vc4_irq_preinstall(struct drm_device *dev);
602int vc4_irq_postinstall(struct drm_device *dev);
603void vc4_irq_uninstall(struct drm_device *dev);
604void vc4_irq_reset(struct drm_device *dev);
605
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800606/* vc4_hvs.c */
607extern struct platform_driver vc4_hvs_driver;
608void vc4_hvs_dump_state(struct drm_device *dev);
609int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
610
611/* vc4_kms.c */
612int vc4_kms_load(struct drm_device *dev);
613
614/* vc4_plane.c */
615struct drm_plane *vc4_plane_init(struct drm_device *dev,
616 enum drm_plane_type type);
617u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
Daniel Vetter2f196b72016-06-02 16:21:44 +0200618u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
Eric Anholtb501bac2015-11-30 12:34:01 -0800619void vc4_plane_async_set_fb(struct drm_plane *plane,
620 struct drm_framebuffer *fb);
Eric Anholt463873d2015-11-30 11:41:40 -0800621
Eric Anholtd3f51682015-03-02 13:01:12 -0800622/* vc4_v3d.c */
623extern struct platform_driver vc4_v3d_driver;
624int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
625int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
Eric Anholt553c9422017-03-27 16:10:25 -0700626int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
Eric Anholtd5b1a782015-11-30 12:13:37 -0800627
628/* vc4_validate.c */
629int
630vc4_validate_bin_cl(struct drm_device *dev,
631 void *validated,
632 void *unvalidated,
633 struct vc4_exec_info *exec);
634
635int
636vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
637
638struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
639 uint32_t hindex);
640
641int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
642
643bool vc4_check_tex_size(struct vc4_exec_info *exec,
644 struct drm_gem_cma_object *fbo,
645 uint32_t offset, uint8_t tiling_format,
646 uint32_t width, uint32_t height, uint8_t cpp);
Eric Anholtd3f51682015-03-02 13:01:12 -0800647
Eric Anholt463873d2015-11-30 11:41:40 -0800648/* vc4_validate_shader.c */
649struct vc4_validated_shader_info *
650vc4_validate_shader(struct drm_gem_cma_object *shader_obj);