blob: aa7b872b2c438d28101f081c920715b651917f1d [file] [log] [blame]
Christian König2483b4e2013-08-13 11:56:54 +02001/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <drm/drmP.h>
25#include "radeon.h"
26#include "radeon_asic.h"
Christian König74d360f2013-10-29 20:14:48 +010027#include "radeon_trace.h"
Christian König2483b4e2013-08-13 11:56:54 +020028#include "sid.h"
29
30u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
31
32/**
33 * si_dma_is_lockup - Check if the DMA engine is locked up
34 *
35 * @rdev: radeon_device pointer
36 * @ring: radeon_ring structure holding ring information
37 *
38 * Check if the async DMA engine is locked up.
39 * Returns true if the engine appears to be locked up, false if not.
40 */
41bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
42{
43 u32 reset_mask = si_gpu_check_soft_reset(rdev);
44 u32 mask;
45
46 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
47 mask = RADEON_RESET_DMA;
48 else
49 mask = RADEON_RESET_DMA1;
50
51 if (!(reset_mask & mask)) {
Christian Königff212f22014-02-18 14:52:33 +010052 radeon_ring_lockup_update(rdev, ring);
Christian König2483b4e2013-08-13 11:56:54 +020053 return false;
54 }
Christian König2483b4e2013-08-13 11:56:54 +020055 return radeon_ring_test_lockup(rdev, ring);
56}
57
58/**
Christian König03f62ab2014-07-30 21:05:17 +020059 * si_dma_vm_copy_pages - update PTEs by copying them from the GART
60 *
61 * @rdev: radeon_device pointer
62 * @ib: indirect buffer to fill with commands
63 * @pe: addr of the page entry
64 * @src: src addr where to copy from
65 * @count: number of page entries to update
66 *
67 * Update PTEs by copying them from the GART using the DMA (SI).
68 */
69void si_dma_vm_copy_pages(struct radeon_device *rdev,
70 struct radeon_ib *ib,
71 uint64_t pe, uint64_t src,
72 unsigned count)
73{
74 while (count) {
75 unsigned bytes = count * 8;
76 if (bytes > 0xFFFF8)
77 bytes = 0xFFFF8;
78
79 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
80 1, 0, 0, bytes);
81 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
82 ib->ptr[ib->length_dw++] = lower_32_bits(src);
83 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
84 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
85
86 pe += bytes;
87 src += bytes;
88 count -= bytes / 8;
89 }
90}
91
92/**
93 * si_dma_vm_write_pages - update PTEs by writing them manually
94 *
95 * @rdev: radeon_device pointer
96 * @ib: indirect buffer to fill with commands
97 * @pe: addr of the page entry
98 * @addr: dst addr to write into pe
99 * @count: number of page entries to update
100 * @incr: increase next addr by incr bytes
101 * @flags: access flags
102 *
103 * Update PTEs by writing them manually using the DMA (SI).
104 */
105void si_dma_vm_write_pages(struct radeon_device *rdev,
106 struct radeon_ib *ib,
107 uint64_t pe,
108 uint64_t addr, unsigned count,
109 uint32_t incr, uint32_t flags)
110{
111 uint64_t value;
112 unsigned ndw;
113
114 while (count) {
115 ndw = count * 2;
116 if (ndw > 0xFFFFE)
117 ndw = 0xFFFFE;
118
119 /* for non-physically contiguous pages (system) */
120 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
121 ib->ptr[ib->length_dw++] = pe;
122 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
123 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
124 if (flags & R600_PTE_SYSTEM) {
125 value = radeon_vm_map_gart(rdev, addr);
126 value &= 0xFFFFFFFFFFFFF000ULL;
127 } else if (flags & R600_PTE_VALID) {
128 value = addr;
129 } else {
130 value = 0;
131 }
132 addr += incr;
133 value |= flags;
134 ib->ptr[ib->length_dw++] = value;
135 ib->ptr[ib->length_dw++] = upper_32_bits(value);
136 }
137 }
138}
139
140/**
141 * si_dma_vm_set_pages - update the page tables using the DMA
Christian König2483b4e2013-08-13 11:56:54 +0200142 *
143 * @rdev: radeon_device pointer
144 * @ib: indirect buffer to fill with commands
145 * @pe: addr of the page entry
146 * @addr: dst addr to write into pe
147 * @count: number of page entries to update
148 * @incr: increase next addr by incr bytes
149 * @flags: access flags
150 *
151 * Update the page tables using the DMA (SI).
152 */
Christian König03f62ab2014-07-30 21:05:17 +0200153void si_dma_vm_set_pages(struct radeon_device *rdev,
154 struct radeon_ib *ib,
155 uint64_t pe,
156 uint64_t addr, unsigned count,
157 uint32_t incr, uint32_t flags)
Christian König2483b4e2013-08-13 11:56:54 +0200158{
Christian König2483b4e2013-08-13 11:56:54 +0200159 uint64_t value;
160 unsigned ndw;
161
Christian König03f62ab2014-07-30 21:05:17 +0200162 while (count) {
163 ndw = count * 2;
164 if (ndw > 0xFFFFE)
165 ndw = 0xFFFFE;
Christian König74d360f2013-10-29 20:14:48 +0100166
Christian König03f62ab2014-07-30 21:05:17 +0200167 if (flags & R600_PTE_VALID)
168 value = addr;
169 else
170 value = 0;
Christian Königf3982ac2014-05-27 16:47:38 +0200171
Christian König03f62ab2014-07-30 21:05:17 +0200172 /* for physically contiguous pages (vram) */
173 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
174 ib->ptr[ib->length_dw++] = pe; /* dst addr */
175 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
176 ib->ptr[ib->length_dw++] = flags; /* mask */
177 ib->ptr[ib->length_dw++] = 0;
178 ib->ptr[ib->length_dw++] = value; /* value */
179 ib->ptr[ib->length_dw++] = upper_32_bits(value);
180 ib->ptr[ib->length_dw++] = incr; /* increment size */
181 ib->ptr[ib->length_dw++] = 0;
182 pe += ndw * 4;
183 addr += (ndw / 2) * incr;
184 count -= ndw / 2;
Christian König2483b4e2013-08-13 11:56:54 +0200185 }
Christian König2483b4e2013-08-13 11:56:54 +0200186}
187
Christian Königfaffaf62014-11-19 14:01:19 +0100188void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
189 unsigned vm_id, uint64_t pd_addr)
190
Christian König2483b4e2013-08-13 11:56:54 +0200191{
Christian König2483b4e2013-08-13 11:56:54 +0200192 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
Christian Königfaffaf62014-11-19 14:01:19 +0100193 if (vm_id < 8) {
194 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
Christian König2483b4e2013-08-13 11:56:54 +0200195 } else {
Christian Königfaffaf62014-11-19 14:01:19 +0100196 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
Christian König2483b4e2013-08-13 11:56:54 +0200197 }
Christian Königfaffaf62014-11-19 14:01:19 +0100198 radeon_ring_write(ring, pd_addr >> 12);
Christian König2483b4e2013-08-13 11:56:54 +0200199
200 /* flush hdp cache */
201 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
202 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
203 radeon_ring_write(ring, 1);
204
205 /* bits 0-7 are the VM contexts0-7 */
206 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
207 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
Christian Königfaffaf62014-11-19 14:01:19 +0100208 radeon_ring_write(ring, 1 << vm_id);
Alex Deucherd474ea72015-01-05 19:54:50 -0500209
210 /* wait for invalidate to complete */
211 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
212 radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
213 radeon_ring_write(ring, 0xff << 16); /* retry */
214 radeon_ring_write(ring, 1 << vm_id); /* mask */
215 radeon_ring_write(ring, 0); /* value */
216 radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
Christian König2483b4e2013-08-13 11:56:54 +0200217}
218
219/**
220 * si_copy_dma - copy pages using the DMA engine
221 *
222 * @rdev: radeon_device pointer
223 * @src_offset: src GPU address
224 * @dst_offset: dst GPU address
225 * @num_gpu_pages: number of GPU pages to xfer
Christian König57d20a42014-09-04 20:01:53 +0200226 * @resv: reservation object to sync to
Christian König2483b4e2013-08-13 11:56:54 +0200227 *
228 * Copy GPU paging using the DMA engine (SI).
229 * Used by the radeon ttm implementation to move pages if
230 * registered as the asic copy callback.
231 */
Christian König57d20a42014-09-04 20:01:53 +0200232struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
233 uint64_t src_offset, uint64_t dst_offset,
234 unsigned num_gpu_pages,
235 struct reservation_object *resv)
Christian König2483b4e2013-08-13 11:56:54 +0200236{
Christian König57d20a42014-09-04 20:01:53 +0200237 struct radeon_fence *fence;
Christian König975700d22014-11-19 14:01:22 +0100238 struct radeon_sync sync;
Christian König2483b4e2013-08-13 11:56:54 +0200239 int ring_index = rdev->asic->copy.dma_ring_index;
240 struct radeon_ring *ring = &rdev->ring[ring_index];
241 u32 size_in_bytes, cur_size_in_bytes;
242 int i, num_loops;
243 int r = 0;
244
Christian König975700d22014-11-19 14:01:22 +0100245 radeon_sync_create(&sync);
Christian König2483b4e2013-08-13 11:56:54 +0200246
247 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
248 num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
249 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
250 if (r) {
251 DRM_ERROR("radeon: moving bo (%d).\n", r);
Christian König975700d22014-11-19 14:01:22 +0100252 radeon_sync_free(rdev, &sync, NULL);
Christian König57d20a42014-09-04 20:01:53 +0200253 return ERR_PTR(r);
Christian König2483b4e2013-08-13 11:56:54 +0200254 }
255
Christian König975700d22014-11-19 14:01:22 +0100256 radeon_sync_resv(rdev, &sync, resv, false);
257 radeon_sync_rings(rdev, &sync, ring->idx);
Christian König2483b4e2013-08-13 11:56:54 +0200258
259 for (i = 0; i < num_loops; i++) {
260 cur_size_in_bytes = size_in_bytes;
261 if (cur_size_in_bytes > 0xFFFFF)
262 cur_size_in_bytes = 0xFFFFF;
263 size_in_bytes -= cur_size_in_bytes;
264 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
Christian König5e167cd2014-06-03 20:51:46 +0200265 radeon_ring_write(ring, lower_32_bits(dst_offset));
266 radeon_ring_write(ring, lower_32_bits(src_offset));
Christian König2483b4e2013-08-13 11:56:54 +0200267 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
268 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
269 src_offset += cur_size_in_bytes;
270 dst_offset += cur_size_in_bytes;
271 }
272
Christian König57d20a42014-09-04 20:01:53 +0200273 r = radeon_fence_emit(rdev, &fence, ring->idx);
Christian König2483b4e2013-08-13 11:56:54 +0200274 if (r) {
275 radeon_ring_unlock_undo(rdev, ring);
Christian König975700d22014-11-19 14:01:22 +0100276 radeon_sync_free(rdev, &sync, NULL);
Christian König57d20a42014-09-04 20:01:53 +0200277 return ERR_PTR(r);
Christian König2483b4e2013-08-13 11:56:54 +0200278 }
279
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900280 radeon_ring_unlock_commit(rdev, ring, false);
Christian König975700d22014-11-19 14:01:22 +0100281 radeon_sync_free(rdev, &sync, fence);
Christian König2483b4e2013-08-13 11:56:54 +0200282
Christian König57d20a42014-09-04 20:01:53 +0200283 return fence;
Christian König2483b4e2013-08-13 11:56:54 +0200284}
285