blob: b8eac4b26df540ac5d57b6c6bb19a6ca8b6abde7 [file] [log] [blame]
Rob Clark41fc2cc2012-10-07 18:57:31 -05001/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3/*
4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29#ifndef FREEDRENO_PRIV_H_
30#define FREEDRENO_PRIV_H_
31
32#include <stdlib.h>
33#include <errno.h>
34#include <string.h>
35#include <unistd.h>
36#include <errno.h>
37#include <fcntl.h>
38#include <sys/ioctl.h>
Rob Clark0b89e272013-05-15 13:18:02 -040039#include <pthread.h>
Rob Clarkb2b18852013-07-10 15:20:04 -040040#include <stdio.h>
41#include <assert.h>
Rob Clark41fc2cc2012-10-07 18:57:31 -050042
Emil Velikov42465fe2015-04-05 15:51:59 +010043#include "libdrm_macros.h"
Rob Clark41fc2cc2012-10-07 18:57:31 -050044#include "xf86drm.h"
45#include "xf86atomic.h"
46
Alex Deucher4ee0fa22015-05-07 13:03:47 -040047#include "util_double_list.h"
Rob Clark6f0f6ce2018-01-24 15:08:46 -050048#include "util_math.h"
Rob Clark41fc2cc2012-10-07 18:57:31 -050049
50#include "freedreno_drmif.h"
Rob Clarkb2b18852013-07-10 15:20:04 -040051#include "freedreno_ringbuffer.h"
Emil Velikov126c4582013-08-29 21:31:52 +010052#include "drm.h"
Rob Clarkb2b18852013-07-10 15:20:04 -040053
Rob Clarkeb846d42016-05-31 12:06:50 -040054#ifndef TRUE
55# define TRUE 1
56#endif
57#ifndef FALSE
58# define FALSE 0
59#endif
60
Rob Clarkb2b18852013-07-10 15:20:04 -040061struct fd_device_funcs {
62 int (*bo_new_handle)(struct fd_device *dev, uint32_t size,
63 uint32_t flags, uint32_t *handle);
64 struct fd_bo * (*bo_from_handle)(struct fd_device *dev,
65 uint32_t size, uint32_t handle);
Rob Clark7064b2e2017-08-23 17:08:39 -040066 struct fd_pipe * (*pipe_new)(struct fd_device *dev, enum fd_pipe_id id,
67 unsigned prio);
Rob Clarkb2b18852013-07-10 15:20:04 -040068 void (*destroy)(struct fd_device *dev);
69};
Rob Clark41fc2cc2012-10-07 18:57:31 -050070
Rob Clark068ea682013-12-13 12:48:30 -050071struct fd_bo_bucket {
72 uint32_t size;
73 struct list_head list;
74};
75
Rob Clarkb18b6e22016-05-30 11:49:39 -040076struct fd_bo_cache {
77 struct fd_bo_bucket cache_bucket[14 * 4];
78 int num_buckets;
79 time_t time;
80};
81
Rob Clark41fc2cc2012-10-07 18:57:31 -050082struct fd_device {
83 int fd;
Rob Clark904f1362016-06-01 14:35:44 -040084 enum fd_version version;
Rob Clark0b89e272013-05-15 13:18:02 -040085 atomic_t refcnt;
86
87 /* tables to keep track of bo's, to avoid "evil-twin" fd_bo objects:
88 *
89 * handle_table: maps handle to fd_bo
90 * name_table: maps flink name to fd_bo
91 *
92 * We end up needing two tables, because DRM_IOCTL_GEM_OPEN always
93 * returns a new handle. So we need to figure out if the bo is already
94 * open in the process first, before calling gem-open.
95 */
96 void *handle_table, *name_table;
Rob Clarkb2b18852013-07-10 15:20:04 -040097
Emil Velikov6a6d6682015-08-15 17:17:52 +010098 const struct fd_device_funcs *funcs;
Rob Clark068ea682013-12-13 12:48:30 -050099
Rob Clarkb18b6e22016-05-30 11:49:39 -0400100 struct fd_bo_cache bo_cache;
Rob Clark28328292018-08-17 15:58:17 -0400101 struct fd_bo_cache ring_cache;
Rob Clark8279c8f2014-01-12 08:27:36 -0500102
103 int closefd; /* call close(fd) upon destruction */
Rob Clarkd0dae262017-03-21 19:44:57 -0400104
105 /* just for valgrind: */
106 int bo_size;
Rob Clarkb2b18852013-07-10 15:20:04 -0400107};
108
Rob Clark8a6a8512016-06-01 15:37:52 -0400109drm_private void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse);
Rob Clark0b34b682016-05-30 12:45:33 -0400110drm_private void fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time);
111drm_private struct fd_bo * fd_bo_cache_alloc(struct fd_bo_cache *cache,
112 uint32_t *size, uint32_t flags);
113drm_private int fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo);
Rob Clark068ea682013-12-13 12:48:30 -0500114
115/* for where @table_lock is already held: */
Emil Velikov44e9a022015-03-23 21:35:38 +0000116drm_private void fd_device_del_locked(struct fd_device *dev);
Rob Clark068ea682013-12-13 12:48:30 -0500117
Rob Clarkb2b18852013-07-10 15:20:04 -0400118struct fd_pipe_funcs {
Rob Clarkfcbf2062018-07-22 11:44:15 -0400119 struct fd_ringbuffer * (*ringbuffer_new)(struct fd_pipe *pipe, uint32_t size,
120 enum fd_ringbuffer_flags flags);
Rob Clarkb2b18852013-07-10 15:20:04 -0400121 int (*get_param)(struct fd_pipe *pipe, enum fd_param_id param, uint64_t *value);
Rob Clark15ba8762015-08-17 10:33:59 -0400122 int (*wait)(struct fd_pipe *pipe, uint32_t timestamp, uint64_t timeout);
Rob Clarkb2b18852013-07-10 15:20:04 -0400123 void (*destroy)(struct fd_pipe *pipe);
Rob Clark41fc2cc2012-10-07 18:57:31 -0500124};
125
126struct fd_pipe {
127 struct fd_device *dev;
128 enum fd_pipe_id id;
Rob Clark23d10b82016-11-09 09:02:09 -0500129 uint32_t gpu_id;
Rob Clarkc5a65682018-05-09 07:40:29 -0400130 atomic_t refcnt;
Emil Velikov6a6d6682015-08-15 17:17:52 +0100131 const struct fd_pipe_funcs *funcs;
Rob Clark41fc2cc2012-10-07 18:57:31 -0500132};
133
Rob Clarkb2b18852013-07-10 15:20:04 -0400134struct fd_ringbuffer_funcs {
135 void * (*hostptr)(struct fd_ringbuffer *ring);
Rob Clarke9eb44b2016-08-15 13:26:18 -0400136 int (*flush)(struct fd_ringbuffer *ring, uint32_t *last_start,
137 int in_fence_fd, int *out_fence_fd);
Rob Clark419a1542016-06-20 14:06:24 -0400138 void (*grow)(struct fd_ringbuffer *ring, uint32_t size);
Rob Clarkc5de5ab2014-02-19 11:01:23 -0500139 void (*reset)(struct fd_ringbuffer *ring);
Rob Clarkb2b18852013-07-10 15:20:04 -0400140 void (*emit_reloc)(struct fd_ringbuffer *ring,
141 const struct fd_reloc *reloc);
Rob Clark419a1542016-06-20 14:06:24 -0400142 uint32_t (*emit_reloc_ring)(struct fd_ringbuffer *ring,
Rob Clark5c4722e2018-10-10 11:10:39 -0400143 struct fd_ringbuffer *target, uint32_t cmd_idx);
Rob Clark419a1542016-06-20 14:06:24 -0400144 uint32_t (*cmd_count)(struct fd_ringbuffer *ring);
Rob Clarkb2b18852013-07-10 15:20:04 -0400145 void (*destroy)(struct fd_ringbuffer *ring);
146};
147
148struct fd_bo_funcs {
149 int (*offset)(struct fd_bo *bo, uint64_t *offset);
150 int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
151 void (*cpu_fini)(struct fd_bo *bo);
Rob Clarkeb846d42016-05-31 12:06:50 -0400152 int (*madvise)(struct fd_bo *bo, int willneed);
Rob Clark1384c082018-02-25 14:56:18 -0500153 uint64_t (*iova)(struct fd_bo *bo);
Rob Clarkb2b18852013-07-10 15:20:04 -0400154 void (*destroy)(struct fd_bo *bo);
155};
Rob Clark41fc2cc2012-10-07 18:57:31 -0500156
157struct fd_bo {
158 struct fd_device *dev;
159 uint32_t size;
160 uint32_t handle;
161 uint32_t name;
Rob Clark41fc2cc2012-10-07 18:57:31 -0500162 void *map;
Rob Clark41fc2cc2012-10-07 18:57:31 -0500163 atomic_t refcnt;
Emil Velikov6a6d6682015-08-15 17:17:52 +0100164 const struct fd_bo_funcs *funcs;
Rob Clark068ea682013-12-13 12:48:30 -0500165
Rob Clark28328292018-08-17 15:58:17 -0400166 enum {
167 NO_CACHE = 0,
168 BO_CACHE = 1,
169 RING_CACHE = 2,
170 } bo_reuse;
171
Rob Clark068ea682013-12-13 12:48:30 -0500172 struct list_head list; /* bucket-list entry */
173 time_t free_time; /* time when added to bucket-list */
Rob Clark41fc2cc2012-10-07 18:57:31 -0500174};
175
Rob Clark28328292018-08-17 15:58:17 -0400176drm_private struct fd_bo *fd_bo_new_ring(struct fd_device *dev,
177 uint32_t size, uint32_t flags);
178
Rob Clark41fc2cc2012-10-07 18:57:31 -0500179#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
180
Rob Clarkf17d4172013-07-20 20:35:31 -0400181#define enable_debug 0 /* TODO make dynamic */
Rob Clark41fc2cc2012-10-07 18:57:31 -0500182
183#define INFO_MSG(fmt, ...) \
184 do { drmMsg("[I] "fmt " (%s:%d)\n", \
185 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
186#define DEBUG_MSG(fmt, ...) \
187 do if (enable_debug) { drmMsg("[D] "fmt " (%s:%d)\n", \
188 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
189#define WARN_MSG(fmt, ...) \
190 do { drmMsg("[W] "fmt " (%s:%d)\n", \
191 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
192#define ERROR_MSG(fmt, ...) \
193 do { drmMsg("[E] " fmt " (%s:%d)\n", \
194 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
195
Rob Clarkb2b18852013-07-10 15:20:04 -0400196#define U642VOID(x) ((void *)(unsigned long)(x))
197#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
198
Rob Clark73db0a02016-05-20 17:14:43 -0400199static inline uint32_t
200offset_bytes(void *end, void *start)
201{
202 return ((char *)end) - ((char *)start);
203}
204
Eric Engestrom1d7bbf82018-01-26 15:08:39 +0000205#if HAVE_VALGRIND
Rob Clarkd0dae262017-03-21 19:44:57 -0400206# include <memcheck.h>
207
208/*
209 * For tracking the backing memory (if valgrind enabled, we force a mmap
210 * for the purposes of tracking)
211 */
212static inline void VG_BO_ALLOC(struct fd_bo *bo)
213{
214 if (bo && RUNNING_ON_VALGRIND) {
215 VALGRIND_MALLOCLIKE_BLOCK(fd_bo_map(bo), bo->size, 0, 1);
216 }
217}
218
219static inline void VG_BO_FREE(struct fd_bo *bo)
220{
221 VALGRIND_FREELIKE_BLOCK(bo->map, 0);
222}
223
224/*
225 * For tracking bo structs that are in the buffer-cache, so that valgrind
226 * doesn't attribute ownership to the first one to allocate the recycled
227 * bo.
228 *
229 * Note that the list_head in fd_bo is used to track the buffers in cache
230 * so disable error reporting on the range while they are in cache so
231 * valgrind doesn't squawk about list traversal.
232 *
233 */
234static inline void VG_BO_RELEASE(struct fd_bo *bo)
235{
236 if (RUNNING_ON_VALGRIND) {
237 VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
238 VALGRIND_MAKE_MEM_NOACCESS(bo, bo->dev->bo_size);
239 VALGRIND_FREELIKE_BLOCK(bo->map, 0);
240 }
241}
242static inline void VG_BO_OBTAIN(struct fd_bo *bo)
243{
244 if (RUNNING_ON_VALGRIND) {
245 VALGRIND_MAKE_MEM_DEFINED(bo, bo->dev->bo_size);
246 VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
247 VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
248 }
249}
250#else
251static inline void VG_BO_ALLOC(struct fd_bo *bo) {}
252static inline void VG_BO_FREE(struct fd_bo *bo) {}
253static inline void VG_BO_RELEASE(struct fd_bo *bo) {}
254static inline void VG_BO_OBTAIN(struct fd_bo *bo) {}
255#endif
256
257
Rob Clark41fc2cc2012-10-07 18:57:31 -0500258#endif /* FREEDRENO_PRIV_H_ */