blob: e35923ffd95eb6929fd7825be2077549f99dd125 [file] [log] [blame]
Alex Deucher09361392015-04-20 12:04:22 -04001/*
2 * Copyright © 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef _AMDGPU_INTERNAL_H_
25#define _AMDGPU_INTERNAL_H_
26
27#ifdef HAVE_CONFIG_H
28#include "config.h"
29#endif
30
31#include <assert.h>
32#include <pthread.h>
33#include "xf86atomic.h"
34#include "amdgpu.h"
35#include "util_double_list.h"
36
37#define AMDGPU_CS_MAX_RINGS 8
monk.liu2f2c8ac2015-04-23 13:18:59 +080038/* do not use below macro if b is not power of 2 aligned value */
Jack Xiao74547792015-05-07 16:07:03 +080039#define __round_mask(x, y) ((__typeof__(x))((y)-1))
40#define ROUND_UP(x, y) ((((x)-1) | __round_mask(x, y))+1)
41#define ROUND_DOWN(x, y) ((x) & ~__round_mask(x, y))
Alex Deucher09361392015-04-20 12:04:22 -040042
Jammy Zhou241cf6d2015-05-13 01:14:11 +080043#define AMDGPU_INVALID_VA_ADDRESS 0xffffffffffffffff
44
Alex Deucher09361392015-04-20 12:04:22 -040045struct amdgpu_bo_va_hole {
46 struct list_head list;
47 uint64_t offset;
48 uint64_t size;
49};
50
51struct amdgpu_bo_va_mgr {
Ken Wang322d02d2015-05-21 17:21:21 +080052 atomic_t refcount;
Alex Deucher09361392015-04-20 12:04:22 -040053 /* the start virtual address */
54 uint64_t va_offset;
Jammy Zhou241cf6d2015-05-13 01:14:11 +080055 uint64_t va_max;
Alex Deucher09361392015-04-20 12:04:22 -040056 struct list_head va_holes;
57 pthread_mutex_t bo_va_mutex;
58 uint32_t va_alignment;
59};
60
Sabre Shao23fab592015-07-09 13:50:36 +080061struct amdgpu_va {
62 amdgpu_device_handle dev;
63 uint64_t address;
64 uint64_t size;
65 enum amdgpu_gpu_va_range range;
66};
67
Alex Deucher09361392015-04-20 12:04:22 -040068struct amdgpu_device {
69 atomic_t refcount;
70 int fd;
71 int flink_fd;
72 unsigned major_version;
73 unsigned minor_version;
74
75 /** List of buffer handles. Protected by bo_table_mutex. */
76 struct util_hash_table *bo_handles;
77 /** List of buffer GEM flink names. Protected by bo_table_mutex. */
78 struct util_hash_table *bo_flink_names;
Alex Deucher09361392015-04-20 12:04:22 -040079 /** This protects all hash tables. */
80 pthread_mutex_t bo_table_mutex;
Alex Deucher09361392015-04-20 12:04:22 -040081 struct drm_amdgpu_info_device dev_info;
82 struct amdgpu_gpu_info info;
Ken Wang322d02d2015-05-21 17:21:21 +080083 struct amdgpu_bo_va_mgr *vamgr;
Alex Deucher09361392015-04-20 12:04:22 -040084};
85
86struct amdgpu_bo {
87 atomic_t refcount;
88 struct amdgpu_device *dev;
89
90 uint64_t alloc_size;
91 uint64_t virtual_mc_base_address;
92
93 uint32_t handle;
94 uint32_t flink_name;
95
96 pthread_mutex_t cpu_access_mutex;
97 void *cpu_ptr;
98 int cpu_map_count;
99};
100
Christian König6dc2eaf2015-04-22 14:52:34 +0200101struct amdgpu_bo_list {
102 struct amdgpu_device *dev;
103
104 uint32_t handle;
105};
106
Alex Deucher09361392015-04-20 12:04:22 -0400107struct amdgpu_context {
Christian König9c2afff2015-04-22 12:21:13 +0200108 struct amdgpu_device *dev;
Alex Deucher09361392015-04-20 12:04:22 -0400109 /** Mutex for accessing fences and to maintain command submissions
Jammy Zhou40c53362015-05-29 12:59:59 +0200110 in good sequence. */
Alex Deucher09361392015-04-20 12:04:22 -0400111 pthread_mutex_t sequence_mutex;
112 /** Buffer for user fences */
Marek Olšák2a344a82015-05-29 17:13:12 +0200113 struct amdgpu_bo *fence_bo;
114 void *fence_cpu;
Alex Deucher09361392015-04-20 12:04:22 -0400115 /** The newest expired fence for the ring of the ip blocks. */
116 uint64_t expired_fences[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
Alex Deucher09361392015-04-20 12:04:22 -0400117 /* context id*/
118 uint32_t id;
119};
120
Alex Deucher09361392015-04-20 12:04:22 -0400121/**
122 * Functions.
123 */
124
125void amdgpu_device_free_internal(amdgpu_device_handle dev);
126
127void amdgpu_bo_free_internal(amdgpu_bo_handle bo);
128
Ken Wang322d02d2015-05-21 17:21:21 +0800129struct amdgpu_bo_va_mgr* amdgpu_vamgr_get_global(struct amdgpu_device *dev);
130
131void amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst, struct amdgpu_bo_va_mgr *src);
Alex Deucher09361392015-04-20 12:04:22 -0400132
Ken Wang5b019082015-07-09 13:48:25 +0800133uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
Sabre Shao23fab592015-07-09 13:50:36 +0800134 uint64_t alignment, uint64_t base_required);
Alex Deucher09361392015-04-20 12:04:22 -0400135
Ken Wang322d02d2015-05-21 17:21:21 +0800136void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va,
137 uint64_t size);
Alex Deucher09361392015-04-20 12:04:22 -0400138
139int amdgpu_query_gpu_info_init(amdgpu_device_handle dev);
140
141uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout);
142
143/**
144 * Inline functions.
145 */
146
147/**
148 * Increment src and decrement dst as if we were updating references
149 * for an assignment between 2 pointers of some objects.
150 *
151 * \return true if dst is 0
152 */
153static inline bool update_references(atomic_t *dst, atomic_t *src)
154{
155 if (dst != src) {
156 /* bump src first */
157 if (src) {
158 assert(atomic_read(src) > 0);
159 atomic_inc(src);
160 }
161 if (dst) {
162 assert(atomic_read(dst) > 0);
163 return atomic_dec_and_test(dst);
164 }
165 }
166 return false;
167}
168
169/**
170 * Assignment between two amdgpu_bo pointers with reference counting.
171 *
172 * Usage:
173 * struct amdgpu_bo *dst = ... , *src = ...;
174 *
175 * dst = src;
176 * // No reference counting. Only use this when you need to move
177 * // a reference from one pointer to another.
178 *
179 * amdgpu_bo_reference(&dst, src);
180 * // Reference counters are updated. dst is decremented and src is
181 * // incremented. dst is freed if its reference counter is 0.
182 */
183static inline void amdgpu_bo_reference(struct amdgpu_bo **dst,
184 struct amdgpu_bo *src)
185{
186 if (update_references(&(*dst)->refcount, &src->refcount))
187 amdgpu_bo_free_internal(*dst);
188 *dst = src;
189}
190
191/**
192 * Assignment between two amdgpu_device pointers with reference counting.
193 *
194 * Usage:
195 * struct amdgpu_device *dst = ... , *src = ...;
196 *
197 * dst = src;
198 * // No reference counting. Only use this when you need to move
199 * // a reference from one pointer to another.
200 *
201 * amdgpu_device_reference(&dst, src);
202 * // Reference counters are updated. dst is decremented and src is
203 * // incremented. dst is freed if its reference counter is 0.
204 */
205void amdgpu_device_reference(struct amdgpu_device **dst,
206 struct amdgpu_device *src);
207#endif