| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright 2008 Jerome Glisse. | 
 | 3 |  * All Rights Reserved. | 
 | 4 |  * | 
 | 5 |  * Permission is hereby granted, free of charge, to any person obtaining a | 
 | 6 |  * copy of this software and associated documentation files (the "Software"), | 
 | 7 |  * to deal in the Software without restriction, including without limitation | 
 | 8 |  * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
 | 9 |  * and/or sell copies of the Software, and to permit persons to whom the | 
 | 10 |  * Software is furnished to do so, subject to the following conditions: | 
 | 11 |  * | 
 | 12 |  * The above copyright notice and this permission notice (including the next | 
 | 13 |  * paragraph) shall be included in all copies or substantial portions of the | 
 | 14 |  * Software. | 
 | 15 |  * | 
 | 16 |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
 | 17 |  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
 | 18 |  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | 
 | 19 |  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | 
 | 20 |  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 
 | 21 |  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 
 | 22 |  * DEALINGS IN THE SOFTWARE. | 
 | 23 |  * | 
 | 24 |  * Authors: | 
 | 25 |  *    Jerome Glisse <glisse@freedesktop.org> | 
 | 26 |  */ | 
| Stephen Rothwell | 568d7c7 | 2016-03-17 15:30:49 +1100 | [diff] [blame] | 27 | #include <linux/pagemap.h> | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 28 | #include <drm/drmP.h> | 
 | 29 | #include <drm/amdgpu_drm.h> | 
 | 30 | #include "amdgpu.h" | 
 | 31 | #include "amdgpu_trace.h" | 
 | 32 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 33 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | 
 | 34 | 		       u32 ip_instance, u32 ring, | 
 | 35 | 		       struct amdgpu_ring **out_ring) | 
 | 36 | { | 
 | 37 | 	/* Right now all IPs have only one instance - multiple rings. */ | 
 | 38 | 	if (ip_instance != 0) { | 
 | 39 | 		DRM_ERROR("invalid ip instance: %d\n", ip_instance); | 
 | 40 | 		return -EINVAL; | 
 | 41 | 	} | 
 | 42 |  | 
 | 43 | 	switch (ip_type) { | 
 | 44 | 	default: | 
 | 45 | 		DRM_ERROR("unknown ip type: %d\n", ip_type); | 
 | 46 | 		return -EINVAL; | 
 | 47 | 	case AMDGPU_HW_IP_GFX: | 
 | 48 | 		if (ring < adev->gfx.num_gfx_rings) { | 
 | 49 | 			*out_ring = &adev->gfx.gfx_ring[ring]; | 
 | 50 | 		} else { | 
 | 51 | 			DRM_ERROR("only %d gfx rings are supported now\n", | 
 | 52 | 				  adev->gfx.num_gfx_rings); | 
 | 53 | 			return -EINVAL; | 
 | 54 | 		} | 
 | 55 | 		break; | 
 | 56 | 	case AMDGPU_HW_IP_COMPUTE: | 
 | 57 | 		if (ring < adev->gfx.num_compute_rings) { | 
 | 58 | 			*out_ring = &adev->gfx.compute_ring[ring]; | 
 | 59 | 		} else { | 
 | 60 | 			DRM_ERROR("only %d compute rings are supported now\n", | 
 | 61 | 				  adev->gfx.num_compute_rings); | 
 | 62 | 			return -EINVAL; | 
 | 63 | 		} | 
 | 64 | 		break; | 
 | 65 | 	case AMDGPU_HW_IP_DMA: | 
| Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 66 | 		if (ring < adev->sdma.num_instances) { | 
 | 67 | 			*out_ring = &adev->sdma.instance[ring].ring; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 68 | 		} else { | 
| Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 69 | 			DRM_ERROR("only %d SDMA rings are supported\n", | 
 | 70 | 				  adev->sdma.num_instances); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 71 | 			return -EINVAL; | 
 | 72 | 		} | 
 | 73 | 		break; | 
 | 74 | 	case AMDGPU_HW_IP_UVD: | 
 | 75 | 		*out_ring = &adev->uvd.ring; | 
 | 76 | 		break; | 
 | 77 | 	case AMDGPU_HW_IP_VCE: | 
 | 78 | 		if (ring < 2){ | 
 | 79 | 			*out_ring = &adev->vce.ring[ring]; | 
 | 80 | 		} else { | 
 | 81 | 			DRM_ERROR("only two VCE rings are supported\n"); | 
 | 82 | 			return -EINVAL; | 
 | 83 | 		} | 
 | 84 | 		break; | 
 | 85 | 	} | 
 | 86 | 	return 0; | 
 | 87 | } | 
 | 88 |  | 
| Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 89 | static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, | 
| Christian König | 758ac17 | 2016-05-06 22:14:00 +0200 | [diff] [blame] | 90 | 				      struct drm_amdgpu_cs_chunk_fence *data, | 
 | 91 | 				      uint32_t *offset) | 
| Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 92 | { | 
 | 93 | 	struct drm_gem_object *gobj; | 
| Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 94 |  | 
| Chris Wilson | a8ad0bd | 2016-05-09 11:04:54 +0100 | [diff] [blame] | 95 | 	gobj = drm_gem_object_lookup(p->filp, data->handle); | 
| Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 96 | 	if (gobj == NULL) | 
 | 97 | 		return -EINVAL; | 
 | 98 |  | 
| Christian König | 758ac17 | 2016-05-06 22:14:00 +0200 | [diff] [blame] | 99 | 	p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); | 
| Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 100 | 	p->uf_entry.priority = 0; | 
 | 101 | 	p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; | 
 | 102 | 	p->uf_entry.tv.shared = true; | 
| Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 103 | 	p->uf_entry.user_pages = NULL; | 
| Christian König | 758ac17 | 2016-05-06 22:14:00 +0200 | [diff] [blame] | 104 | 	*offset = data->offset; | 
| Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 105 |  | 
 | 106 | 	drm_gem_object_unreference_unlocked(gobj); | 
| Christian König | 758ac17 | 2016-05-06 22:14:00 +0200 | [diff] [blame] | 107 |  | 
 | 108 | 	if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { | 
 | 109 | 		amdgpu_bo_unref(&p->uf_entry.robj); | 
 | 110 | 		return -EINVAL; | 
 | 111 | 	} | 
 | 112 |  | 
| Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 113 | 	return 0; | 
 | 114 | } | 
 | 115 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 116 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | 
 | 117 | { | 
| Christian König | 4c0b242 | 2016-02-01 11:20:37 +0100 | [diff] [blame] | 118 | 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | 
| Monk Liu | c563783 | 2016-04-19 20:11:32 +0800 | [diff] [blame] | 119 | 	struct amdgpu_vm *vm = &fpriv->vm; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 120 | 	union drm_amdgpu_cs *cs = data; | 
 | 121 | 	uint64_t *chunk_array_user; | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 122 | 	uint64_t *chunk_array; | 
| Christian König | 50838c8 | 2016-02-03 13:44:52 +0100 | [diff] [blame] | 123 | 	unsigned size, num_ibs = 0; | 
| Christian König | 758ac17 | 2016-05-06 22:14:00 +0200 | [diff] [blame] | 124 | 	uint32_t uf_offset = 0; | 
| Dan Carpenter | 5431350 | 2015-09-25 14:36:55 +0300 | [diff] [blame] | 125 | 	int i; | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 126 | 	int ret; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 127 |  | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 128 | 	if (cs->in.num_chunks == 0) | 
 | 129 | 		return 0; | 
 | 130 |  | 
 | 131 | 	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); | 
 | 132 | 	if (!chunk_array) | 
 | 133 | 		return -ENOMEM; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 134 |  | 
| Christian König | 3cb485f | 2015-05-11 15:34:59 +0200 | [diff] [blame] | 135 | 	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); | 
 | 136 | 	if (!p->ctx) { | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 137 | 		ret = -EINVAL; | 
 | 138 | 		goto free_chunk; | 
| Christian König | 3cb485f | 2015-05-11 15:34:59 +0200 | [diff] [blame] | 139 | 	} | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 140 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 141 | 	/* get chunks */ | 
| Arnd Bergmann | 028423b | 2015-10-07 09:41:27 +0200 | [diff] [blame] | 142 | 	chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 143 | 	if (copy_from_user(chunk_array, chunk_array_user, | 
 | 144 | 			   sizeof(uint64_t)*cs->in.num_chunks)) { | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 145 | 		ret = -EFAULT; | 
| Christian König | 2a7d9bd | 2015-12-18 20:33:52 +0100 | [diff] [blame] | 146 | 		goto put_ctx; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 147 | 	} | 
 | 148 |  | 
 | 149 | 	p->nchunks = cs->in.num_chunks; | 
| monk.liu | e60b344 | 2015-07-17 18:39:25 +0800 | [diff] [blame] | 150 | 	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 151 | 			    GFP_KERNEL); | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 152 | 	if (!p->chunks) { | 
 | 153 | 		ret = -ENOMEM; | 
| Christian König | 2a7d9bd | 2015-12-18 20:33:52 +0100 | [diff] [blame] | 154 | 		goto put_ctx; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 155 | 	} | 
 | 156 |  | 
 | 157 | 	for (i = 0; i < p->nchunks; i++) { | 
 | 158 | 		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; | 
 | 159 | 		struct drm_amdgpu_cs_chunk user_chunk; | 
 | 160 | 		uint32_t __user *cdata; | 
 | 161 |  | 
| Arnd Bergmann | 028423b | 2015-10-07 09:41:27 +0200 | [diff] [blame] | 162 | 		chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 163 | 		if (copy_from_user(&user_chunk, chunk_ptr, | 
 | 164 | 				       sizeof(struct drm_amdgpu_cs_chunk))) { | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 165 | 			ret = -EFAULT; | 
 | 166 | 			i--; | 
 | 167 | 			goto free_partial_kdata; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 168 | 		} | 
 | 169 | 		p->chunks[i].chunk_id = user_chunk.chunk_id; | 
 | 170 | 		p->chunks[i].length_dw = user_chunk.length_dw; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 171 |  | 
 | 172 | 		size = p->chunks[i].length_dw; | 
| Arnd Bergmann | 028423b | 2015-10-07 09:41:27 +0200 | [diff] [blame] | 173 | 		cdata = (void __user *)(unsigned long)user_chunk.chunk_data; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 174 |  | 
 | 175 | 		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); | 
 | 176 | 		if (p->chunks[i].kdata == NULL) { | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 177 | 			ret = -ENOMEM; | 
 | 178 | 			i--; | 
 | 179 | 			goto free_partial_kdata; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 180 | 		} | 
 | 181 | 		size *= sizeof(uint32_t); | 
 | 182 | 		if (copy_from_user(p->chunks[i].kdata, cdata, size)) { | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 183 | 			ret = -EFAULT; | 
 | 184 | 			goto free_partial_kdata; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 185 | 		} | 
 | 186 |  | 
| Christian König | 9a5e8fb | 2015-06-23 17:07:03 +0200 | [diff] [blame] | 187 | 		switch (p->chunks[i].chunk_id) { | 
 | 188 | 		case AMDGPU_CHUNK_ID_IB: | 
| Christian König | 50838c8 | 2016-02-03 13:44:52 +0100 | [diff] [blame] | 189 | 			++num_ibs; | 
| Christian König | 9a5e8fb | 2015-06-23 17:07:03 +0200 | [diff] [blame] | 190 | 			break; | 
 | 191 |  | 
 | 192 | 		case AMDGPU_CHUNK_ID_FENCE: | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 193 | 			size = sizeof(struct drm_amdgpu_cs_chunk_fence); | 
| Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 194 | 			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 195 | 				ret = -EINVAL; | 
 | 196 | 				goto free_partial_kdata; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 197 | 			} | 
| Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 198 |  | 
| Christian König | 758ac17 | 2016-05-06 22:14:00 +0200 | [diff] [blame] | 199 | 			ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata, | 
 | 200 | 							 &uf_offset); | 
| Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 201 | 			if (ret) | 
 | 202 | 				goto free_partial_kdata; | 
 | 203 |  | 
| Christian König | 9a5e8fb | 2015-06-23 17:07:03 +0200 | [diff] [blame] | 204 | 			break; | 
 | 205 |  | 
| Christian König | 2b48d32 | 2015-06-19 17:31:29 +0200 | [diff] [blame] | 206 | 		case AMDGPU_CHUNK_ID_DEPENDENCIES: | 
 | 207 | 			break; | 
 | 208 |  | 
| Christian König | 9a5e8fb | 2015-06-23 17:07:03 +0200 | [diff] [blame] | 209 | 		default: | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 210 | 			ret = -EINVAL; | 
 | 211 | 			goto free_partial_kdata; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 212 | 		} | 
 | 213 | 	} | 
 | 214 |  | 
| Monk Liu | c563783 | 2016-04-19 20:11:32 +0800 | [diff] [blame] | 215 | 	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm); | 
| Christian König | 50838c8 | 2016-02-03 13:44:52 +0100 | [diff] [blame] | 216 | 	if (ret) | 
| Christian König | 4acabfe | 2016-01-31 11:32:04 +0100 | [diff] [blame] | 217 | 		goto free_all_kdata; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 218 |  | 
| Christian König | b5f5acb | 2016-06-29 13:26:41 +0200 | [diff] [blame] | 219 | 	if (p->uf_entry.robj) | 
 | 220 | 		p->job->uf_addr = uf_offset; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 221 | 	kfree(chunk_array); | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 222 | 	return 0; | 
 | 223 |  | 
 | 224 | free_all_kdata: | 
 | 225 | 	i = p->nchunks - 1; | 
 | 226 | free_partial_kdata: | 
 | 227 | 	for (; i >= 0; i--) | 
 | 228 | 		drm_free_large(p->chunks[i].kdata); | 
 | 229 | 	kfree(p->chunks); | 
| Christian König | 2a7d9bd | 2015-12-18 20:33:52 +0100 | [diff] [blame] | 230 | put_ctx: | 
| Dan Carpenter | 1d26347 | 2015-09-23 13:59:28 +0300 | [diff] [blame] | 231 | 	amdgpu_ctx_put(p->ctx); | 
 | 232 | free_chunk: | 
 | 233 | 	kfree(chunk_array); | 
 | 234 |  | 
 | 235 | 	return ret; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 236 | } | 
 | 237 |  | 
 | 238 | /* Returns how many bytes TTM can move per IB. | 
 | 239 |  */ | 
 | 240 | static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) | 
 | 241 | { | 
 | 242 | 	u64 real_vram_size = adev->mc.real_vram_size; | 
 | 243 | 	u64 vram_usage = atomic64_read(&adev->vram_usage); | 
 | 244 |  | 
 | 245 | 	/* This function is based on the current VRAM usage. | 
 | 246 | 	 * | 
 | 247 | 	 * - If all of VRAM is free, allow relocating the number of bytes that | 
 | 248 | 	 *   is equal to 1/4 of the size of VRAM for this IB. | 
 | 249 |  | 
 | 250 | 	 * - If more than one half of VRAM is occupied, only allow relocating | 
 | 251 | 	 *   1 MB of data for this IB. | 
 | 252 | 	 * | 
 | 253 | 	 * - From 0 to one half of used VRAM, the threshold decreases | 
 | 254 | 	 *   linearly. | 
 | 255 | 	 *         __________________ | 
 | 256 | 	 * 1/4 of -|\               | | 
 | 257 | 	 * VRAM    | \              | | 
 | 258 | 	 *         |  \             | | 
 | 259 | 	 *         |   \            | | 
 | 260 | 	 *         |    \           | | 
 | 261 | 	 *         |     \          | | 
 | 262 | 	 *         |      \         | | 
 | 263 | 	 *         |       \________|1 MB | 
 | 264 | 	 *         |----------------| | 
 | 265 | 	 *    VRAM 0 %             100 % | 
 | 266 | 	 *         used            used | 
 | 267 | 	 * | 
 | 268 | 	 * Note: It's a threshold, not a limit. The threshold must be crossed | 
 | 269 | 	 * for buffer relocations to stop, so any buffer of an arbitrary size | 
 | 270 | 	 * can be moved as long as the threshold isn't crossed before | 
 | 271 | 	 * the relocation takes place. We don't want to disable buffer | 
 | 272 | 	 * relocations completely. | 
 | 273 | 	 * | 
 | 274 | 	 * The idea is that buffers should be placed in VRAM at creation time | 
 | 275 | 	 * and TTM should only do a minimum number of relocations during | 
 | 276 | 	 * command submission. In practice, you need to submit at least | 
 | 277 | 	 * a dozen IBs to move all buffers to VRAM if they are in GTT. | 
 | 278 | 	 * | 
 | 279 | 	 * Also, things can get pretty crazy under memory pressure and actual | 
 | 280 | 	 * VRAM usage can change a lot, so playing safe even at 50% does | 
 | 281 | 	 * consistently increase performance. | 
 | 282 | 	 */ | 
 | 283 |  | 
 | 284 | 	u64 half_vram = real_vram_size >> 1; | 
 | 285 | 	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; | 
 | 286 | 	u64 bytes_moved_threshold = half_free_vram >> 1; | 
 | 287 | 	return max(bytes_moved_threshold, 1024*1024ull); | 
 | 288 | } | 
 | 289 |  | 
| Christian König | f69f90a1 | 2015-12-21 19:47:42 +0100 | [diff] [blame] | 290 | int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, | 
| Christian König | a5b7505 | 2015-09-03 16:40:39 +0200 | [diff] [blame] | 291 | 			    struct list_head *validated) | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 292 | { | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 293 | 	struct amdgpu_bo_list_entry *lobj; | 
| Christian König | f69f90a1 | 2015-12-21 19:47:42 +0100 | [diff] [blame] | 294 | 	u64 initial_bytes_moved; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 295 | 	int r; | 
 | 296 |  | 
| Christian König | a5b7505 | 2015-09-03 16:40:39 +0200 | [diff] [blame] | 297 | 	list_for_each_entry(lobj, validated, tv.head) { | 
| Christian König | 36409d12 | 2015-12-21 20:31:35 +0100 | [diff] [blame] | 298 | 		struct amdgpu_bo *bo = lobj->robj; | 
| Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 299 | 		bool binding_userptr = false; | 
| Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame] | 300 | 		struct mm_struct *usermm; | 
| Christian König | 36409d12 | 2015-12-21 20:31:35 +0100 | [diff] [blame] | 301 | 		uint32_t domain; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 302 |  | 
| Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame] | 303 | 		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); | 
 | 304 | 		if (usermm && usermm != current->mm) | 
 | 305 | 			return -EPERM; | 
 | 306 |  | 
| Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 307 | 		/* Check if we have user pages and nobody bound the BO already */ | 
 | 308 | 		if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) { | 
 | 309 | 			size_t size = sizeof(struct page *); | 
 | 310 |  | 
 | 311 | 			size *= bo->tbo.ttm->num_pages; | 
 | 312 | 			memcpy(bo->tbo.ttm->pages, lobj->user_pages, size); | 
 | 313 | 			binding_userptr = true; | 
 | 314 | 		} | 
 | 315 |  | 
| Christian König | 36409d12 | 2015-12-21 20:31:35 +0100 | [diff] [blame] | 316 | 		if (bo->pin_count) | 
 | 317 | 			continue; | 
 | 318 |  | 
 | 319 | 		/* Avoid moving this one if we have moved too many buffers | 
 | 320 | 		 * for this IB already. | 
 | 321 | 		 * | 
 | 322 | 		 * Note that this allows moving at least one buffer of | 
 | 323 | 		 * any size, because it doesn't take the current "bo" | 
 | 324 | 		 * into account. We don't want to disallow buffer moves | 
 | 325 | 		 * completely. | 
 | 326 | 		 */ | 
 | 327 | 		if (p->bytes_moved <= p->bytes_moved_threshold) | 
| Christian König | 1ea863f | 2015-12-18 22:13:12 +0100 | [diff] [blame] | 328 | 			domain = bo->prefered_domains; | 
| Christian König | 36409d12 | 2015-12-21 20:31:35 +0100 | [diff] [blame] | 329 | 		else | 
| Christian König | 1ea863f | 2015-12-18 22:13:12 +0100 | [diff] [blame] | 330 | 			domain = bo->allowed_domains; | 
| Christian König | 36409d12 | 2015-12-21 20:31:35 +0100 | [diff] [blame] | 331 |  | 
 | 332 | 	retry: | 
 | 333 | 		amdgpu_ttm_placement_from_domain(bo, domain); | 
 | 334 | 		initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); | 
 | 335 | 		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | 
 | 336 | 		p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - | 
 | 337 | 			       initial_bytes_moved; | 
 | 338 |  | 
 | 339 | 		if (unlikely(r)) { | 
| Christian König | 1ea863f | 2015-12-18 22:13:12 +0100 | [diff] [blame] | 340 | 			if (r != -ERESTARTSYS && domain != bo->allowed_domains) { | 
 | 341 | 				domain = bo->allowed_domains; | 
| Christian König | 36409d12 | 2015-12-21 20:31:35 +0100 | [diff] [blame] | 342 | 				goto retry; | 
 | 343 | 			} | 
 | 344 | 			return r; | 
 | 345 | 		} | 
| Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 346 |  | 
 | 347 | 		if (binding_userptr) { | 
 | 348 | 			drm_free_large(lobj->user_pages); | 
 | 349 | 			lobj->user_pages = NULL; | 
 | 350 | 		} | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 351 | 	} | 
 | 352 | 	return 0; | 
 | 353 | } | 
 | 354 |  | 
| Christian König | 2a7d9bd | 2015-12-18 20:33:52 +0100 | [diff] [blame] | 355 | static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | 
 | 356 | 				union drm_amdgpu_cs *cs) | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 357 | { | 
 | 358 | 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | 
| Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 359 | 	struct amdgpu_bo_list_entry *e; | 
| Christian König | a5b7505 | 2015-09-03 16:40:39 +0200 | [diff] [blame] | 360 | 	struct list_head duplicates; | 
| monk.liu | 840d514 | 2015-04-27 15:19:20 +0800 | [diff] [blame] | 361 | 	bool need_mmap_lock = false; | 
| Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 362 | 	unsigned i, tries = 10; | 
| Christian König | 636ce25 | 2015-12-18 21:26:47 +0100 | [diff] [blame] | 363 | 	int r; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 364 |  | 
| Christian König | 2a7d9bd | 2015-12-18 20:33:52 +0100 | [diff] [blame] | 365 | 	INIT_LIST_HEAD(&p->validated); | 
 | 366 |  | 
 | 367 | 	p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); | 
| monk.liu | 840d514 | 2015-04-27 15:19:20 +0800 | [diff] [blame] | 368 | 	if (p->bo_list) { | 
| Christian König | 211dff5 | 2016-02-22 15:40:59 +0100 | [diff] [blame] | 369 | 		need_mmap_lock = p->bo_list->first_userptr != | 
 | 370 | 			p->bo_list->num_entries; | 
| Christian König | 636ce25 | 2015-12-18 21:26:47 +0100 | [diff] [blame] | 371 | 		amdgpu_bo_list_get_list(p->bo_list, &p->validated); | 
| monk.liu | 840d514 | 2015-04-27 15:19:20 +0800 | [diff] [blame] | 372 | 	} | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 373 |  | 
| Christian König | 3c0eea6 | 2015-12-11 14:39:05 +0100 | [diff] [blame] | 374 | 	INIT_LIST_HEAD(&duplicates); | 
| Christian König | 56467eb | 2015-12-11 15:16:32 +0100 | [diff] [blame] | 375 | 	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 376 |  | 
| Christian König | 758ac17 | 2016-05-06 22:14:00 +0200 | [diff] [blame] | 377 | 	if (p->uf_entry.robj) | 
| Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 378 | 		list_add(&p->uf_entry.tv.head, &p->validated); | 
 | 379 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 380 | 	if (need_mmap_lock) | 
 | 381 | 		down_read(¤t->mm->mmap_sem); | 
 | 382 |  | 
| Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 383 | 	while (1) { | 
 | 384 | 		struct list_head need_pages; | 
 | 385 | 		unsigned i; | 
 | 386 |  | 
 | 387 | 		r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, | 
 | 388 | 					   &duplicates); | 
 | 389 | 		if (unlikely(r != 0)) | 
 | 390 | 			goto error_free_pages; | 
 | 391 |  | 
 | 392 | 		/* Without a BO list we don't have userptr BOs */ | 
 | 393 | 		if (!p->bo_list) | 
 | 394 | 			break; | 
 | 395 |  | 
 | 396 | 		INIT_LIST_HEAD(&need_pages); | 
 | 397 | 		for (i = p->bo_list->first_userptr; | 
 | 398 | 		     i < p->bo_list->num_entries; ++i) { | 
 | 399 |  | 
 | 400 | 			e = &p->bo_list->array[i]; | 
 | 401 |  | 
 | 402 | 			if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm, | 
 | 403 | 				 &e->user_invalidated) && e->user_pages) { | 
 | 404 |  | 
 | 405 | 				/* We acquired a page array, but somebody | 
 | 406 | 				 * invalidated it. Free it an try again | 
 | 407 | 				 */ | 
 | 408 | 				release_pages(e->user_pages, | 
 | 409 | 					      e->robj->tbo.ttm->num_pages, | 
 | 410 | 					      false); | 
 | 411 | 				drm_free_large(e->user_pages); | 
 | 412 | 				e->user_pages = NULL; | 
 | 413 | 			} | 
 | 414 |  | 
 | 415 | 			if (e->robj->tbo.ttm->state != tt_bound && | 
 | 416 | 			    !e->user_pages) { | 
 | 417 | 				list_del(&e->tv.head); | 
 | 418 | 				list_add(&e->tv.head, &need_pages); | 
 | 419 |  | 
 | 420 | 				amdgpu_bo_unreserve(e->robj); | 
 | 421 | 			} | 
 | 422 | 		} | 
 | 423 |  | 
 | 424 | 		if (list_empty(&need_pages)) | 
 | 425 | 			break; | 
 | 426 |  | 
 | 427 | 		/* Unreserve everything again. */ | 
 | 428 | 		ttm_eu_backoff_reservation(&p->ticket, &p->validated); | 
 | 429 |  | 
 | 430 | 		/* We tried to often, just abort */ | 
 | 431 | 		if (!--tries) { | 
 | 432 | 			r = -EDEADLK; | 
 | 433 | 			goto error_free_pages; | 
 | 434 | 		} | 
 | 435 |  | 
 | 436 | 		/* Fill the page arrays for all useptrs. */ | 
 | 437 | 		list_for_each_entry(e, &need_pages, tv.head) { | 
 | 438 | 			struct ttm_tt *ttm = e->robj->tbo.ttm; | 
 | 439 |  | 
 | 440 | 			e->user_pages = drm_calloc_large(ttm->num_pages, | 
 | 441 | 							 sizeof(struct page*)); | 
 | 442 | 			if (!e->user_pages) { | 
 | 443 | 				r = -ENOMEM; | 
 | 444 | 				goto error_free_pages; | 
 | 445 | 			} | 
 | 446 |  | 
 | 447 | 			r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages); | 
 | 448 | 			if (r) { | 
 | 449 | 				drm_free_large(e->user_pages); | 
 | 450 | 				e->user_pages = NULL; | 
 | 451 | 				goto error_free_pages; | 
 | 452 | 			} | 
 | 453 | 		} | 
 | 454 |  | 
 | 455 | 		/* And try again. */ | 
 | 456 | 		list_splice(&need_pages, &p->validated); | 
 | 457 | 	} | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 458 |  | 
| Christian König | 5a712a8 | 2016-06-21 16:28:15 +0200 | [diff] [blame] | 459 | 	amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates); | 
| Christian König | 56467eb | 2015-12-11 15:16:32 +0100 | [diff] [blame] | 460 |  | 
| Christian König | f69f90a1 | 2015-12-21 19:47:42 +0100 | [diff] [blame] | 461 | 	p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); | 
 | 462 | 	p->bytes_moved = 0; | 
 | 463 |  | 
 | 464 | 	r = amdgpu_cs_list_validate(p, &duplicates); | 
| Christian König | a5b7505 | 2015-09-03 16:40:39 +0200 | [diff] [blame] | 465 | 	if (r) | 
 | 466 | 		goto error_validate; | 
 | 467 |  | 
| Christian König | f69f90a1 | 2015-12-21 19:47:42 +0100 | [diff] [blame] | 468 | 	r = amdgpu_cs_list_validate(p, &p->validated); | 
| Christian König | a848030 | 2016-01-05 16:03:39 +0100 | [diff] [blame] | 469 | 	if (r) | 
 | 470 | 		goto error_validate; | 
 | 471 |  | 
| Christian König | 5a712a8 | 2016-06-21 16:28:15 +0200 | [diff] [blame] | 472 | 	fpriv->vm.last_eviction_counter = | 
 | 473 | 		atomic64_read(&p->adev->num_evictions); | 
 | 474 |  | 
| Christian König | a848030 | 2016-01-05 16:03:39 +0100 | [diff] [blame] | 475 | 	if (p->bo_list) { | 
| Christian König | d88bf58 | 2016-05-06 17:50:03 +0200 | [diff] [blame] | 476 | 		struct amdgpu_bo *gds = p->bo_list->gds_obj; | 
 | 477 | 		struct amdgpu_bo *gws = p->bo_list->gws_obj; | 
 | 478 | 		struct amdgpu_bo *oa = p->bo_list->oa_obj; | 
| Christian König | a848030 | 2016-01-05 16:03:39 +0100 | [diff] [blame] | 479 | 		struct amdgpu_vm *vm = &fpriv->vm; | 
 | 480 | 		unsigned i; | 
 | 481 |  | 
 | 482 | 		for (i = 0; i < p->bo_list->num_entries; i++) { | 
 | 483 | 			struct amdgpu_bo *bo = p->bo_list->array[i].robj; | 
 | 484 |  | 
 | 485 | 			p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); | 
 | 486 | 		} | 
| Christian König | d88bf58 | 2016-05-06 17:50:03 +0200 | [diff] [blame] | 487 |  | 
 | 488 | 		if (gds) { | 
 | 489 | 			p->job->gds_base = amdgpu_bo_gpu_offset(gds); | 
 | 490 | 			p->job->gds_size = amdgpu_bo_size(gds); | 
 | 491 | 		} | 
 | 492 | 		if (gws) { | 
 | 493 | 			p->job->gws_base = amdgpu_bo_gpu_offset(gws); | 
 | 494 | 			p->job->gws_size = amdgpu_bo_size(gws); | 
 | 495 | 		} | 
 | 496 | 		if (oa) { | 
 | 497 | 			p->job->oa_base = amdgpu_bo_gpu_offset(oa); | 
 | 498 | 			p->job->oa_size = amdgpu_bo_size(oa); | 
 | 499 | 		} | 
| Christian König | a848030 | 2016-01-05 16:03:39 +0100 | [diff] [blame] | 500 | 	} | 
| Christian König | a5b7505 | 2015-09-03 16:40:39 +0200 | [diff] [blame] | 501 |  | 
| Christian König | b5f5acb | 2016-06-29 13:26:41 +0200 | [diff] [blame] | 502 | 	if (p->uf_entry.robj) | 
 | 503 | 		p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj); | 
 | 504 |  | 
| Christian König | a5b7505 | 2015-09-03 16:40:39 +0200 | [diff] [blame] | 505 | error_validate: | 
| Christian König | eceb8a1 | 2016-01-11 15:35:21 +0100 | [diff] [blame] | 506 | 	if (r) { | 
 | 507 | 		amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); | 
| Christian König | a5b7505 | 2015-09-03 16:40:39 +0200 | [diff] [blame] | 508 | 		ttm_eu_backoff_reservation(&p->ticket, &p->validated); | 
| Christian König | eceb8a1 | 2016-01-11 15:35:21 +0100 | [diff] [blame] | 509 | 	} | 
| Christian König | a5b7505 | 2015-09-03 16:40:39 +0200 | [diff] [blame] | 510 |  | 
| Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 511 | error_free_pages: | 
 | 512 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 513 | 	if (need_mmap_lock) | 
 | 514 | 		up_read(¤t->mm->mmap_sem); | 
 | 515 |  | 
| Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 516 | 	if (p->bo_list) { | 
 | 517 | 		for (i = p->bo_list->first_userptr; | 
 | 518 | 		     i < p->bo_list->num_entries; ++i) { | 
 | 519 | 			e = &p->bo_list->array[i]; | 
 | 520 |  | 
 | 521 | 			if (!e->user_pages) | 
 | 522 | 				continue; | 
 | 523 |  | 
 | 524 | 			release_pages(e->user_pages, | 
 | 525 | 				      e->robj->tbo.ttm->num_pages, | 
 | 526 | 				      false); | 
 | 527 | 			drm_free_large(e->user_pages); | 
 | 528 | 		} | 
 | 529 | 	} | 
 | 530 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 531 | 	return r; | 
 | 532 | } | 
 | 533 |  | 
 | 534 | static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) | 
 | 535 | { | 
 | 536 | 	struct amdgpu_bo_list_entry *e; | 
 | 537 | 	int r; | 
 | 538 |  | 
 | 539 | 	list_for_each_entry(e, &p->validated, tv.head) { | 
 | 540 | 		struct reservation_object *resv = e->robj->tbo.resv; | 
| Christian König | e86f9ce | 2016-02-08 12:13:05 +0100 | [diff] [blame] | 541 | 		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 542 |  | 
 | 543 | 		if (r) | 
 | 544 | 			return r; | 
 | 545 | 	} | 
 | 546 | 	return 0; | 
 | 547 | } | 
 | 548 |  | 
| Christian König | 984810f | 2015-11-14 21:05:35 +0100 | [diff] [blame] | 549 | /** | 
 | 550 |  * cs_parser_fini() - clean parser states | 
 | 551 |  * @parser:	parser structure holding parsing context. | 
 | 552 |  * @error:	error number | 
 | 553 |  * | 
 | 554 |  * If error is set than unvalidate buffer, otherwise just free memory | 
 | 555 |  * used by parsing context. | 
 | 556 |  **/ | 
 | 557 | static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) | 
| Chunming Zhou | 049fc52 | 2015-07-21 14:36:51 +0800 | [diff] [blame] | 558 | { | 
| Christian König | eceb8a1 | 2016-01-11 15:35:21 +0100 | [diff] [blame] | 559 | 	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | 
| Christian König | 984810f | 2015-11-14 21:05:35 +0100 | [diff] [blame] | 560 | 	unsigned i; | 
 | 561 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 562 | 	if (!error) { | 
| Nicolai Hähnle | 28b8d66 | 2016-01-27 11:04:19 -0500 | [diff] [blame] | 563 | 		amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); | 
 | 564 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 565 | 		ttm_eu_fence_buffer_objects(&parser->ticket, | 
| Christian König | 984810f | 2015-11-14 21:05:35 +0100 | [diff] [blame] | 566 | 					    &parser->validated, | 
 | 567 | 					    parser->fence); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 568 | 	} else if (backoff) { | 
 | 569 | 		ttm_eu_backoff_reservation(&parser->ticket, | 
 | 570 | 					   &parser->validated); | 
 | 571 | 	} | 
| Christian König | 984810f | 2015-11-14 21:05:35 +0100 | [diff] [blame] | 572 | 	fence_put(parser->fence); | 
| Christian König | 7e52a81 | 2015-11-04 15:44:39 +0100 | [diff] [blame] | 573 |  | 
| Christian König | 3cb485f | 2015-05-11 15:34:59 +0200 | [diff] [blame] | 574 | 	if (parser->ctx) | 
 | 575 | 		amdgpu_ctx_put(parser->ctx); | 
| Chunming Zhou | a3348bb | 2015-08-18 16:25:46 +0800 | [diff] [blame] | 576 | 	if (parser->bo_list) | 
 | 577 | 		amdgpu_bo_list_put(parser->bo_list); | 
 | 578 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 579 | 	for (i = 0; i < parser->nchunks; i++) | 
 | 580 | 		drm_free_large(parser->chunks[i].kdata); | 
 | 581 | 	kfree(parser->chunks); | 
| Christian König | 50838c8 | 2016-02-03 13:44:52 +0100 | [diff] [blame] | 582 | 	if (parser->job) | 
 | 583 | 		amdgpu_job_free(parser->job); | 
| Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 584 | 	amdgpu_bo_unref(&parser->uf_entry.robj); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 585 | } | 
 | 586 |  | 
 | 587 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | 
 | 588 | 				   struct amdgpu_vm *vm) | 
 | 589 | { | 
 | 590 | 	struct amdgpu_device *adev = p->adev; | 
 | 591 | 	struct amdgpu_bo_va *bo_va; | 
 | 592 | 	struct amdgpu_bo *bo; | 
 | 593 | 	int i, r; | 
 | 594 |  | 
 | 595 | 	r = amdgpu_vm_update_page_directory(adev, vm); | 
 | 596 | 	if (r) | 
 | 597 | 		return r; | 
 | 598 |  | 
| Christian König | e86f9ce | 2016-02-08 12:13:05 +0100 | [diff] [blame] | 599 | 	r = amdgpu_sync_fence(adev, &p->job->sync, vm->page_directory_fence); | 
| Bas Nieuwenhuizen | 05906de | 2015-08-14 20:08:40 +0200 | [diff] [blame] | 600 | 	if (r) | 
 | 601 | 		return r; | 
 | 602 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 603 | 	r = amdgpu_vm_clear_freed(adev, vm); | 
 | 604 | 	if (r) | 
 | 605 | 		return r; | 
 | 606 |  | 
 | 607 | 	if (p->bo_list) { | 
 | 608 | 		for (i = 0; i < p->bo_list->num_entries; i++) { | 
| Christian König | 91e1a52 | 2015-07-06 22:06:40 +0200 | [diff] [blame] | 609 | 			struct fence *f; | 
 | 610 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 611 | 			/* ignore duplicates */ | 
 | 612 | 			bo = p->bo_list->array[i].robj; | 
 | 613 | 			if (!bo) | 
 | 614 | 				continue; | 
 | 615 |  | 
 | 616 | 			bo_va = p->bo_list->array[i].bo_va; | 
 | 617 | 			if (bo_va == NULL) | 
 | 618 | 				continue; | 
 | 619 |  | 
 | 620 | 			r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem); | 
 | 621 | 			if (r) | 
 | 622 | 				return r; | 
 | 623 |  | 
| Chunming Zhou | bb1e38a4 | 2015-08-03 18:19:38 +0800 | [diff] [blame] | 624 | 			f = bo_va->last_pt_update; | 
| Christian König | e86f9ce | 2016-02-08 12:13:05 +0100 | [diff] [blame] | 625 | 			r = amdgpu_sync_fence(adev, &p->job->sync, f); | 
| Christian König | 91e1a52 | 2015-07-06 22:06:40 +0200 | [diff] [blame] | 626 | 			if (r) | 
 | 627 | 				return r; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 628 | 		} | 
| Christian König | b495bd3 | 2015-09-10 14:00:35 +0200 | [diff] [blame] | 629 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 630 | 	} | 
 | 631 |  | 
| Christian König | e86f9ce | 2016-02-08 12:13:05 +0100 | [diff] [blame] | 632 | 	r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync); | 
| Christian König | b495bd3 | 2015-09-10 14:00:35 +0200 | [diff] [blame] | 633 |  | 
 | 634 | 	if (amdgpu_vm_debug && p->bo_list) { | 
 | 635 | 		/* Invalidate all BOs to test for userspace bugs */ | 
 | 636 | 		for (i = 0; i < p->bo_list->num_entries; i++) { | 
 | 637 | 			/* ignore duplicates */ | 
 | 638 | 			bo = p->bo_list->array[i].robj; | 
 | 639 | 			if (!bo) | 
 | 640 | 				continue; | 
 | 641 |  | 
 | 642 | 			amdgpu_vm_bo_invalidate(adev, bo); | 
 | 643 | 		} | 
 | 644 | 	} | 
 | 645 |  | 
 | 646 | 	return r; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 647 | } | 
 | 648 |  | 
 | 649 | static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, | 
| Christian König | b07c60c | 2016-01-31 12:29:04 +0100 | [diff] [blame] | 650 | 				 struct amdgpu_cs_parser *p) | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 651 | { | 
| Christian König | b07c60c | 2016-01-31 12:29:04 +0100 | [diff] [blame] | 652 | 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 653 | 	struct amdgpu_vm *vm = &fpriv->vm; | 
| Christian König | b07c60c | 2016-01-31 12:29:04 +0100 | [diff] [blame] | 654 | 	struct amdgpu_ring *ring = p->job->ring; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 655 | 	int i, r; | 
 | 656 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 657 | 	/* Only for UVD/VCE VM emulation */ | 
| Christian König | b07c60c | 2016-01-31 12:29:04 +0100 | [diff] [blame] | 658 | 	if (ring->funcs->parse_cs) { | 
| Christian König | 9a79588 | 2016-06-22 14:25:55 +0200 | [diff] [blame] | 659 | 		p->job->vm = NULL; | 
| Christian König | b07c60c | 2016-01-31 12:29:04 +0100 | [diff] [blame] | 660 | 		for (i = 0; i < p->job->num_ibs; i++) { | 
 | 661 | 			r = amdgpu_ring_parse_cs(ring, p, i); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 662 | 			if (r) | 
 | 663 | 				return r; | 
 | 664 | 		} | 
| Christian König | 9a79588 | 2016-06-22 14:25:55 +0200 | [diff] [blame] | 665 | 	} else { | 
 | 666 | 		p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); | 
 | 667 |  | 
 | 668 | 		r = amdgpu_bo_vm_update_pte(p, vm); | 
 | 669 | 		if (r) | 
 | 670 | 			return r; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 671 | 	} | 
 | 672 |  | 
| Christian König | 9a79588 | 2016-06-22 14:25:55 +0200 | [diff] [blame] | 673 | 	return amdgpu_cs_sync_rings(p); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 674 | } | 
 | 675 |  | 
 | 676 | static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r) | 
 | 677 | { | 
 | 678 | 	if (r == -EDEADLK) { | 
 | 679 | 		r = amdgpu_gpu_reset(adev); | 
 | 680 | 		if (!r) | 
 | 681 | 			r = -EAGAIN; | 
 | 682 | 	} | 
 | 683 | 	return r; | 
 | 684 | } | 
 | 685 |  | 
 | 686 | static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | 
 | 687 | 			     struct amdgpu_cs_parser *parser) | 
 | 688 | { | 
 | 689 | 	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | 
 | 690 | 	struct amdgpu_vm *vm = &fpriv->vm; | 
 | 691 | 	int i, j; | 
 | 692 | 	int r; | 
 | 693 |  | 
| Christian König | 50838c8 | 2016-02-03 13:44:52 +0100 | [diff] [blame] | 694 | 	for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 695 | 		struct amdgpu_cs_chunk *chunk; | 
 | 696 | 		struct amdgpu_ib *ib; | 
 | 697 | 		struct drm_amdgpu_cs_chunk_ib *chunk_ib; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 698 | 		struct amdgpu_ring *ring; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 699 |  | 
 | 700 | 		chunk = &parser->chunks[i]; | 
| Christian König | 50838c8 | 2016-02-03 13:44:52 +0100 | [diff] [blame] | 701 | 		ib = &parser->job->ibs[j]; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 702 | 		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; | 
 | 703 |  | 
 | 704 | 		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) | 
 | 705 | 			continue; | 
 | 706 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 707 | 		r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type, | 
 | 708 | 				       chunk_ib->ip_instance, chunk_ib->ring, | 
 | 709 | 				       &ring); | 
| Marek Olšák | 3ccec53 | 2015-06-02 17:44:49 +0200 | [diff] [blame] | 710 | 		if (r) | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 711 | 			return r; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 712 |  | 
| Christian König | b07c60c | 2016-01-31 12:29:04 +0100 | [diff] [blame] | 713 | 		if (parser->job->ring && parser->job->ring != ring) | 
 | 714 | 			return -EINVAL; | 
 | 715 |  | 
 | 716 | 		parser->job->ring = ring; | 
 | 717 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 718 | 		if (ring->funcs->parse_cs) { | 
| Christian König | 4802ce1 | 2015-06-10 17:20:11 +0200 | [diff] [blame] | 719 | 			struct amdgpu_bo_va_mapping *m; | 
| Marek Olšák | 3ccec53 | 2015-06-02 17:44:49 +0200 | [diff] [blame] | 720 | 			struct amdgpu_bo *aobj = NULL; | 
| Christian König | 4802ce1 | 2015-06-10 17:20:11 +0200 | [diff] [blame] | 721 | 			uint64_t offset; | 
 | 722 | 			uint8_t *kptr; | 
| Marek Olšák | 3ccec53 | 2015-06-02 17:44:49 +0200 | [diff] [blame] | 723 |  | 
| Christian König | 4802ce1 | 2015-06-10 17:20:11 +0200 | [diff] [blame] | 724 | 			m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start, | 
 | 725 | 						   &aobj); | 
| Marek Olšák | 3ccec53 | 2015-06-02 17:44:49 +0200 | [diff] [blame] | 726 | 			if (!aobj) { | 
 | 727 | 				DRM_ERROR("IB va_start is invalid\n"); | 
 | 728 | 				return -EINVAL; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 729 | 			} | 
 | 730 |  | 
| Christian König | 4802ce1 | 2015-06-10 17:20:11 +0200 | [diff] [blame] | 731 | 			if ((chunk_ib->va_start + chunk_ib->ib_bytes) > | 
 | 732 | 			    (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { | 
 | 733 | 				DRM_ERROR("IB va_start+ib_bytes is invalid\n"); | 
 | 734 | 				return -EINVAL; | 
 | 735 | 			} | 
 | 736 |  | 
| Marek Olšák | 3ccec53 | 2015-06-02 17:44:49 +0200 | [diff] [blame] | 737 | 			/* the IB should be reserved at this point */ | 
| Christian König | 4802ce1 | 2015-06-10 17:20:11 +0200 | [diff] [blame] | 738 | 			r = amdgpu_bo_kmap(aobj, (void **)&kptr); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 739 | 			if (r) { | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 740 | 				return r; | 
 | 741 | 			} | 
 | 742 |  | 
| Christian König | 4802ce1 | 2015-06-10 17:20:11 +0200 | [diff] [blame] | 743 | 			offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; | 
 | 744 | 			kptr += chunk_ib->va_start - offset; | 
 | 745 |  | 
| Christian König | b07c60c | 2016-01-31 12:29:04 +0100 | [diff] [blame] | 746 | 			r =  amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 747 | 			if (r) { | 
 | 748 | 				DRM_ERROR("Failed to get ib !\n"); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 749 | 				return r; | 
 | 750 | 			} | 
 | 751 |  | 
 | 752 | 			memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); | 
 | 753 | 			amdgpu_bo_kunmap(aobj); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 754 | 		} else { | 
| Christian König | b07c60c | 2016-01-31 12:29:04 +0100 | [diff] [blame] | 755 | 			r =  amdgpu_ib_get(adev, vm, 0, ib); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 756 | 			if (r) { | 
 | 757 | 				DRM_ERROR("Failed to get ib !\n"); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 758 | 				return r; | 
 | 759 | 			} | 
 | 760 |  | 
 | 761 | 			ib->gpu_addr = chunk_ib->va_start; | 
 | 762 | 		} | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 763 |  | 
| Marek Olšák | 3ccec53 | 2015-06-02 17:44:49 +0200 | [diff] [blame] | 764 | 		ib->length_dw = chunk_ib->ib_bytes / 4; | 
| Jammy Zhou | de807f8 | 2015-05-11 23:41:41 +0800 | [diff] [blame] | 765 | 		ib->flags = chunk_ib->flags; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 766 | 		j++; | 
 | 767 | 	} | 
 | 768 |  | 
| Christian König | 758ac17 | 2016-05-06 22:14:00 +0200 | [diff] [blame] | 769 | 	/* UVD & VCE fw doesn't support user fences */ | 
| Christian König | b5f5acb | 2016-06-29 13:26:41 +0200 | [diff] [blame] | 770 | 	if (parser->job->uf_addr && ( | 
| Christian König | 758ac17 | 2016-05-06 22:14:00 +0200 | [diff] [blame] | 771 | 	    parser->job->ring->type == AMDGPU_RING_TYPE_UVD || | 
 | 772 | 	    parser->job->ring->type == AMDGPU_RING_TYPE_VCE)) | 
 | 773 | 		return -EINVAL; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 774 |  | 
 | 775 | 	return 0; | 
 | 776 | } | 
 | 777 |  | 
| Christian König | 2b48d32 | 2015-06-19 17:31:29 +0200 | [diff] [blame] | 778 | static int amdgpu_cs_dependencies(struct amdgpu_device *adev, | 
 | 779 | 				  struct amdgpu_cs_parser *p) | 
 | 780 | { | 
| Christian König | 76a1ea6 | 2015-07-06 19:42:10 +0200 | [diff] [blame] | 781 | 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | 
| Christian König | 2b48d32 | 2015-06-19 17:31:29 +0200 | [diff] [blame] | 782 | 	int i, j, r; | 
 | 783 |  | 
| Christian König | 2b48d32 | 2015-06-19 17:31:29 +0200 | [diff] [blame] | 784 | 	for (i = 0; i < p->nchunks; ++i) { | 
 | 785 | 		struct drm_amdgpu_cs_chunk_dep *deps; | 
 | 786 | 		struct amdgpu_cs_chunk *chunk; | 
 | 787 | 		unsigned num_deps; | 
 | 788 |  | 
 | 789 | 		chunk = &p->chunks[i]; | 
 | 790 |  | 
 | 791 | 		if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES) | 
 | 792 | 			continue; | 
 | 793 |  | 
 | 794 | 		deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; | 
 | 795 | 		num_deps = chunk->length_dw * 4 / | 
 | 796 | 			sizeof(struct drm_amdgpu_cs_chunk_dep); | 
 | 797 |  | 
 | 798 | 		for (j = 0; j < num_deps; ++j) { | 
| Christian König | 2b48d32 | 2015-06-19 17:31:29 +0200 | [diff] [blame] | 799 | 			struct amdgpu_ring *ring; | 
| Christian König | 76a1ea6 | 2015-07-06 19:42:10 +0200 | [diff] [blame] | 800 | 			struct amdgpu_ctx *ctx; | 
| Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 801 | 			struct fence *fence; | 
| Christian König | 2b48d32 | 2015-06-19 17:31:29 +0200 | [diff] [blame] | 802 |  | 
 | 803 | 			r = amdgpu_cs_get_ring(adev, deps[j].ip_type, | 
 | 804 | 					       deps[j].ip_instance, | 
 | 805 | 					       deps[j].ring, &ring); | 
 | 806 | 			if (r) | 
 | 807 | 				return r; | 
 | 808 |  | 
| Christian König | 76a1ea6 | 2015-07-06 19:42:10 +0200 | [diff] [blame] | 809 | 			ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id); | 
 | 810 | 			if (ctx == NULL) | 
 | 811 | 				return -EINVAL; | 
 | 812 |  | 
| Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 813 | 			fence = amdgpu_ctx_get_fence(ctx, ring, | 
 | 814 | 						     deps[j].handle); | 
 | 815 | 			if (IS_ERR(fence)) { | 
 | 816 | 				r = PTR_ERR(fence); | 
| Christian König | 76a1ea6 | 2015-07-06 19:42:10 +0200 | [diff] [blame] | 817 | 				amdgpu_ctx_put(ctx); | 
| Christian König | 2b48d32 | 2015-06-19 17:31:29 +0200 | [diff] [blame] | 818 | 				return r; | 
| Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 819 |  | 
 | 820 | 			} else if (fence) { | 
| Christian König | e86f9ce | 2016-02-08 12:13:05 +0100 | [diff] [blame] | 821 | 				r = amdgpu_sync_fence(adev, &p->job->sync, | 
 | 822 | 						      fence); | 
| Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 823 | 				fence_put(fence); | 
 | 824 | 				amdgpu_ctx_put(ctx); | 
 | 825 | 				if (r) | 
 | 826 | 					return r; | 
| Christian König | 76a1ea6 | 2015-07-06 19:42:10 +0200 | [diff] [blame] | 827 | 			} | 
| Christian König | 2b48d32 | 2015-06-19 17:31:29 +0200 | [diff] [blame] | 828 | 		} | 
 | 829 | 	} | 
 | 830 |  | 
 | 831 | 	return 0; | 
 | 832 | } | 
 | 833 |  | 
| Christian König | cd75dc6 | 2016-01-31 11:30:55 +0100 | [diff] [blame] | 834 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | 
 | 835 | 			    union drm_amdgpu_cs *cs) | 
 | 836 | { | 
| Christian König | b07c60c | 2016-01-31 12:29:04 +0100 | [diff] [blame] | 837 | 	struct amdgpu_ring *ring = p->job->ring; | 
| Christian König | 92f2509 | 2016-05-06 15:57:42 +0200 | [diff] [blame] | 838 | 	struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; | 
| Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 839 | 	struct fence *fence; | 
| Christian König | cd75dc6 | 2016-01-31 11:30:55 +0100 | [diff] [blame] | 840 | 	struct amdgpu_job *job; | 
| Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 841 | 	int r; | 
| Christian König | cd75dc6 | 2016-01-31 11:30:55 +0100 | [diff] [blame] | 842 |  | 
| Christian König | 50838c8 | 2016-02-03 13:44:52 +0100 | [diff] [blame] | 843 | 	job = p->job; | 
 | 844 | 	p->job = NULL; | 
| Christian König | cd75dc6 | 2016-01-31 11:30:55 +0100 | [diff] [blame] | 845 |  | 
| Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 846 | 	r = amd_sched_job_init(&job->base, &ring->sched, | 
| Christian König | c5f74f7 | 2016-05-19 09:54:15 +0200 | [diff] [blame] | 847 | 			       entity, p->filp, &fence); | 
| Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 848 | 	if (r) { | 
| Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 849 | 		amdgpu_job_free(job); | 
| Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 850 | 		return r; | 
| Christian König | cd75dc6 | 2016-01-31 11:30:55 +0100 | [diff] [blame] | 851 | 	} | 
 | 852 |  | 
| Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 853 | 	job->owner = p->filp; | 
| Christian König | 92f2509 | 2016-05-06 15:57:42 +0200 | [diff] [blame] | 854 | 	job->ctx = entity->fence_context; | 
| Monk Liu | e686941 | 2016-03-07 12:49:55 +0800 | [diff] [blame] | 855 | 	p->fence = fence_get(fence); | 
 | 856 | 	cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence); | 
| Christian König | 758ac17 | 2016-05-06 22:14:00 +0200 | [diff] [blame] | 857 | 	job->uf_sequence = cs->out.handle; | 
| Christian König | a5fb4ec | 2016-06-29 15:10:31 +0200 | [diff] [blame^] | 858 | 	amdgpu_job_free_resources(job); | 
| Christian König | cd75dc6 | 2016-01-31 11:30:55 +0100 | [diff] [blame] | 859 |  | 
 | 860 | 	trace_amdgpu_cs_ioctl(job); | 
 | 861 | 	amd_sched_entity_push_job(&job->base); | 
 | 862 |  | 
 | 863 | 	return 0; | 
 | 864 | } | 
 | 865 |  | 
| Chunming Zhou | 049fc52 | 2015-07-21 14:36:51 +0800 | [diff] [blame] | 866 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | 
 | 867 | { | 
 | 868 | 	struct amdgpu_device *adev = dev->dev_private; | 
 | 869 | 	union drm_amdgpu_cs *cs = data; | 
| Christian König | 7e52a81 | 2015-11-04 15:44:39 +0100 | [diff] [blame] | 870 | 	struct amdgpu_cs_parser parser = {}; | 
| Christian König | 26a6980 | 2015-08-18 21:09:33 +0200 | [diff] [blame] | 871 | 	bool reserved_buffers = false; | 
 | 872 | 	int i, r; | 
| Chunming Zhou | 049fc52 | 2015-07-21 14:36:51 +0800 | [diff] [blame] | 873 |  | 
| Christian König | 0c418f1 | 2015-09-01 15:13:53 +0200 | [diff] [blame] | 874 | 	if (!adev->accel_working) | 
| Chunming Zhou | 049fc52 | 2015-07-21 14:36:51 +0800 | [diff] [blame] | 875 | 		return -EBUSY; | 
| Chunming Zhou | 049fc52 | 2015-07-21 14:36:51 +0800 | [diff] [blame] | 876 |  | 
| Christian König | 7e52a81 | 2015-11-04 15:44:39 +0100 | [diff] [blame] | 877 | 	parser.adev = adev; | 
 | 878 | 	parser.filp = filp; | 
 | 879 |  | 
 | 880 | 	r = amdgpu_cs_parser_init(&parser, data); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 881 | 	if (r) { | 
| Chunming Zhou | 049fc52 | 2015-07-21 14:36:51 +0800 | [diff] [blame] | 882 | 		DRM_ERROR("Failed to initialize parser !\n"); | 
| Christian König | 7e52a81 | 2015-11-04 15:44:39 +0100 | [diff] [blame] | 883 | 		amdgpu_cs_parser_fini(&parser, r, false); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 884 | 		r = amdgpu_cs_handle_lockup(adev, r); | 
 | 885 | 		return r; | 
 | 886 | 	} | 
| Christian König | 2a7d9bd | 2015-12-18 20:33:52 +0100 | [diff] [blame] | 887 | 	r = amdgpu_cs_parser_bos(&parser, data); | 
| Christian König | 26a6980 | 2015-08-18 21:09:33 +0200 | [diff] [blame] | 888 | 	if (r == -ENOMEM) | 
 | 889 | 		DRM_ERROR("Not enough memory for command submission!\n"); | 
 | 890 | 	else if (r && r != -ERESTARTSYS) | 
 | 891 | 		DRM_ERROR("Failed to process the buffer list %d!\n", r); | 
 | 892 | 	else if (!r) { | 
 | 893 | 		reserved_buffers = true; | 
| Christian König | 7e52a81 | 2015-11-04 15:44:39 +0100 | [diff] [blame] | 894 | 		r = amdgpu_cs_ib_fill(adev, &parser); | 
| Christian König | 26a6980 | 2015-08-18 21:09:33 +0200 | [diff] [blame] | 895 | 	} | 
 | 896 |  | 
 | 897 | 	if (!r) { | 
| Christian König | 7e52a81 | 2015-11-04 15:44:39 +0100 | [diff] [blame] | 898 | 		r = amdgpu_cs_dependencies(adev, &parser); | 
| Christian König | 26a6980 | 2015-08-18 21:09:33 +0200 | [diff] [blame] | 899 | 		if (r) | 
 | 900 | 			DRM_ERROR("Failed in the dependencies handling %d!\n", r); | 
 | 901 | 	} | 
 | 902 |  | 
 | 903 | 	if (r) | 
 | 904 | 		goto out; | 
 | 905 |  | 
| Christian König | 50838c8 | 2016-02-03 13:44:52 +0100 | [diff] [blame] | 906 | 	for (i = 0; i < parser.job->num_ibs; i++) | 
| Christian König | 7e52a81 | 2015-11-04 15:44:39 +0100 | [diff] [blame] | 907 | 		trace_amdgpu_cs(&parser, i); | 
| Christian König | 26a6980 | 2015-08-18 21:09:33 +0200 | [diff] [blame] | 908 |  | 
| Christian König | 7e52a81 | 2015-11-04 15:44:39 +0100 | [diff] [blame] | 909 | 	r = amdgpu_cs_ib_vm_chunk(adev, &parser); | 
| Chunming Zhou | 4fe6311 | 2015-08-18 16:12:15 +0800 | [diff] [blame] | 910 | 	if (r) | 
 | 911 | 		goto out; | 
 | 912 |  | 
| Christian König | 4acabfe | 2016-01-31 11:32:04 +0100 | [diff] [blame] | 913 | 	r = amdgpu_cs_submit(&parser, cs); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 914 |  | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 915 | out: | 
| Christian König | 7e52a81 | 2015-11-04 15:44:39 +0100 | [diff] [blame] | 916 | 	amdgpu_cs_parser_fini(&parser, r, reserved_buffers); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 917 | 	r = amdgpu_cs_handle_lockup(adev, r); | 
 | 918 | 	return r; | 
 | 919 | } | 
 | 920 |  | 
 | 921 | /** | 
 | 922 |  * amdgpu_cs_wait_ioctl - wait for a command submission to finish | 
 | 923 |  * | 
 | 924 |  * @dev: drm device | 
 | 925 |  * @data: data from userspace | 
 | 926 |  * @filp: file private | 
 | 927 |  * | 
 | 928 |  * Wait for the command submission identified by handle to finish. | 
 | 929 |  */ | 
 | 930 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | 
 | 931 | 			 struct drm_file *filp) | 
 | 932 | { | 
 | 933 | 	union drm_amdgpu_wait_cs *wait = data; | 
 | 934 | 	struct amdgpu_device *adev = dev->dev_private; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 935 | 	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); | 
| Christian König | 03507c4 | 2015-06-19 17:00:19 +0200 | [diff] [blame] | 936 | 	struct amdgpu_ring *ring = NULL; | 
| Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 937 | 	struct amdgpu_ctx *ctx; | 
| Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 938 | 	struct fence *fence; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 939 | 	long r; | 
 | 940 |  | 
| Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 941 | 	r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, | 
 | 942 | 			       wait->in.ring, &ring); | 
 | 943 | 	if (r) | 
 | 944 | 		return r; | 
 | 945 |  | 
| Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 946 | 	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); | 
 | 947 | 	if (ctx == NULL) | 
 | 948 | 		return -EINVAL; | 
| Chunming Zhou | 4b559c9 | 2015-07-21 15:53:04 +0800 | [diff] [blame] | 949 |  | 
 | 950 | 	fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); | 
 | 951 | 	if (IS_ERR(fence)) | 
 | 952 | 		r = PTR_ERR(fence); | 
 | 953 | 	else if (fence) { | 
 | 954 | 		r = fence_wait_timeout(fence, true, timeout); | 
 | 955 | 		fence_put(fence); | 
 | 956 | 	} else | 
| Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 957 | 		r = 1; | 
 | 958 |  | 
| Jammy Zhou | 66b3cf2 | 2015-05-08 17:29:40 +0800 | [diff] [blame] | 959 | 	amdgpu_ctx_put(ctx); | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 960 | 	if (r < 0) | 
 | 961 | 		return r; | 
 | 962 |  | 
 | 963 | 	memset(wait, 0, sizeof(*wait)); | 
 | 964 | 	wait->out.status = (r == 0); | 
 | 965 |  | 
 | 966 | 	return 0; | 
 | 967 | } | 
 | 968 |  | 
 | 969 | /** | 
 | 970 |  * amdgpu_cs_find_bo_va - find bo_va for VM address | 
 | 971 |  * | 
 | 972 |  * @parser: command submission parser context | 
 | 973 |  * @addr: VM address | 
 | 974 |  * @bo: resulting BO of the mapping found | 
 | 975 |  * | 
 | 976 |  * Search the buffer objects in the command submission context for a certain | 
 | 977 |  * virtual memory address. Returns allocation structure when found, NULL | 
 | 978 |  * otherwise. | 
 | 979 |  */ | 
 | 980 | struct amdgpu_bo_va_mapping * | 
 | 981 | amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, | 
 | 982 | 		       uint64_t addr, struct amdgpu_bo **bo) | 
 | 983 | { | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 984 | 	struct amdgpu_bo_va_mapping *mapping; | 
| Christian König | 15486fd2 | 2015-12-22 16:06:12 +0100 | [diff] [blame] | 985 | 	unsigned i; | 
 | 986 |  | 
 | 987 | 	if (!parser->bo_list) | 
 | 988 | 		return NULL; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 989 |  | 
 | 990 | 	addr /= AMDGPU_GPU_PAGE_SIZE; | 
 | 991 |  | 
| Christian König | 15486fd2 | 2015-12-22 16:06:12 +0100 | [diff] [blame] | 992 | 	for (i = 0; i < parser->bo_list->num_entries; i++) { | 
 | 993 | 		struct amdgpu_bo_list_entry *lobj; | 
 | 994 |  | 
 | 995 | 		lobj = &parser->bo_list->array[i]; | 
 | 996 | 		if (!lobj->bo_va) | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 997 | 			continue; | 
 | 998 |  | 
| Christian König | 15486fd2 | 2015-12-22 16:06:12 +0100 | [diff] [blame] | 999 | 		list_for_each_entry(mapping, &lobj->bo_va->valids, list) { | 
| Christian König | 7fc1195 | 2015-07-30 11:53:42 +0200 | [diff] [blame] | 1000 | 			if (mapping->it.start > addr || | 
 | 1001 | 			    addr > mapping->it.last) | 
 | 1002 | 				continue; | 
 | 1003 |  | 
| Christian König | 15486fd2 | 2015-12-22 16:06:12 +0100 | [diff] [blame] | 1004 | 			*bo = lobj->bo_va->bo; | 
| Christian König | 7fc1195 | 2015-07-30 11:53:42 +0200 | [diff] [blame] | 1005 | 			return mapping; | 
 | 1006 | 		} | 
 | 1007 |  | 
| Christian König | 15486fd2 | 2015-12-22 16:06:12 +0100 | [diff] [blame] | 1008 | 		list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1009 | 			if (mapping->it.start > addr || | 
 | 1010 | 			    addr > mapping->it.last) | 
 | 1011 | 				continue; | 
 | 1012 |  | 
| Christian König | 15486fd2 | 2015-12-22 16:06:12 +0100 | [diff] [blame] | 1013 | 			*bo = lobj->bo_va->bo; | 
| Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1014 | 			return mapping; | 
 | 1015 | 		} | 
 | 1016 | 	} | 
 | 1017 |  | 
 | 1018 | 	return NULL; | 
 | 1019 | } |