blob: 2ed930e02f3a583b1c5ad4a9f0032380e35d597f [file] [log] [blame]
Alex Deucherd7ccd8f2010-09-09 11:33:36 -04001/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Alex Deucher <alexander.deucher@amd.com>
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "radeon_drm.h"
30#include "radeon.h"
31
32#include "evergreend.h"
33#include "evergreen_blit_shaders.h"
34
35#define DI_PT_RECTLIST 0x11
36#define DI_INDEX_SIZE_16_BIT 0x0
37#define DI_SRC_SEL_AUTO_INDEX 0x2
38
39#define FMT_8 0x1
40#define FMT_5_6_5 0x8
41#define FMT_8_8_8_8 0x1a
42#define COLOR_8 0x1
43#define COLOR_5_6_5 0x8
44#define COLOR_8_8_8_8 0x1a
45
46/* emits 17 */
47static void
48set_render_target(struct radeon_device *rdev, int format,
49 int w, int h, u64 gpu_addr)
50{
51 u32 cb_color_info;
52 int pitch, slice;
53
54 h = ALIGN(h, 8);
55 if (h < 8)
56 h = 8;
57
58 cb_color_info = ((format << 2) | (1 << 24));
59 pitch = (w / 8) - 1;
60 slice = ((w * h) / 64) - 1;
61
62 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
63 radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
64 radeon_ring_write(rdev, gpu_addr >> 8);
65 radeon_ring_write(rdev, pitch);
66 radeon_ring_write(rdev, slice);
67 radeon_ring_write(rdev, 0);
68 radeon_ring_write(rdev, cb_color_info);
69 radeon_ring_write(rdev, (1 << 4));
70 radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
71 radeon_ring_write(rdev, 0);
72 radeon_ring_write(rdev, 0);
73 radeon_ring_write(rdev, 0);
74 radeon_ring_write(rdev, 0);
75 radeon_ring_write(rdev, 0);
76 radeon_ring_write(rdev, 0);
77 radeon_ring_write(rdev, 0);
78 radeon_ring_write(rdev, 0);
79}
80
81/* emits 5dw */
82static void
83cp_set_surface_sync(struct radeon_device *rdev,
84 u32 sync_type, u32 size,
85 u64 mc_addr)
86{
87 u32 cp_coher_size;
88
89 if (size == 0xffffffff)
90 cp_coher_size = 0xffffffff;
91 else
92 cp_coher_size = ((size + 255) >> 8);
93
94 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
95 radeon_ring_write(rdev, sync_type);
96 radeon_ring_write(rdev, cp_coher_size);
97 radeon_ring_write(rdev, mc_addr >> 8);
98 radeon_ring_write(rdev, 10); /* poll interval */
99}
100
101/* emits 11dw + 1 surface sync = 16dw */
102static void
103set_shaders(struct radeon_device *rdev)
104{
105 u64 gpu_addr;
106
107 /* VS */
108 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
109 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
110 radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
111 radeon_ring_write(rdev, gpu_addr >> 8);
112 radeon_ring_write(rdev, 2);
113 radeon_ring_write(rdev, 0);
114
115 /* PS */
116 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
117 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
118 radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
119 radeon_ring_write(rdev, gpu_addr >> 8);
120 radeon_ring_write(rdev, 1);
121 radeon_ring_write(rdev, 0);
122 radeon_ring_write(rdev, 2);
123
124 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
125 cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
126}
127
128/* emits 10 + 1 sync (5) = 15 */
129static void
130set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
131{
132 u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
133
134 /* high addr, stride */
135 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
136 /* xyzw swizzles */
137 sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
138
139 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
140 radeon_ring_write(rdev, 0x580);
141 radeon_ring_write(rdev, gpu_addr & 0xffffffff);
142 radeon_ring_write(rdev, 48 - 1); /* size */
143 radeon_ring_write(rdev, sq_vtx_constant_word2);
144 radeon_ring_write(rdev, sq_vtx_constant_word3);
145 radeon_ring_write(rdev, 0);
146 radeon_ring_write(rdev, 0);
147 radeon_ring_write(rdev, 0);
148 radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
149
Alex Deuchere719ebd2010-11-22 17:56:33 -0500150 if ((rdev->family == CHIP_CEDAR) ||
Alex Deucherff5b8562011-01-06 21:19:28 -0500151 (rdev->family == CHIP_PALM) ||
152 (rdev->family == CHIP_CAICOS))
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400153 cp_set_surface_sync(rdev,
154 PACKET3_TC_ACTION_ENA, 48, gpu_addr);
155 else
156 cp_set_surface_sync(rdev,
157 PACKET3_VC_ACTION_ENA, 48, gpu_addr);
158
159}
160
161/* emits 10 */
162static void
163set_tex_resource(struct radeon_device *rdev,
164 int format, int w, int h, int pitch,
165 u64 gpu_addr)
166{
167 u32 sq_tex_resource_word0, sq_tex_resource_word1;
168 u32 sq_tex_resource_word4, sq_tex_resource_word7;
169
170 if (h < 1)
171 h = 1;
172
173 sq_tex_resource_word0 = (1 << 0); /* 2D */
174 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
175 ((w - 1) << 18));
176 sq_tex_resource_word1 = ((h - 1) << 0);
177 /* xyzw swizzles */
178 sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
179
180 sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30);
181
182 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
183 radeon_ring_write(rdev, 0);
184 radeon_ring_write(rdev, sq_tex_resource_word0);
185 radeon_ring_write(rdev, sq_tex_resource_word1);
186 radeon_ring_write(rdev, gpu_addr >> 8);
187 radeon_ring_write(rdev, gpu_addr >> 8);
188 radeon_ring_write(rdev, sq_tex_resource_word4);
189 radeon_ring_write(rdev, 0);
190 radeon_ring_write(rdev, 0);
191 radeon_ring_write(rdev, sq_tex_resource_word7);
192}
193
194/* emits 12 */
195static void
196set_scissors(struct radeon_device *rdev, int x1, int y1,
197 int x2, int y2)
198{
199 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
200 radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
201 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
202 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
203
204 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
205 radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
206 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
207 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
208
209 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
210 radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
211 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
212 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
213}
214
215/* emits 10 */
216static void
217draw_auto(struct radeon_device *rdev)
218{
219 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
220 radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
221 radeon_ring_write(rdev, DI_PT_RECTLIST);
222
223 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
224 radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
225
226 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
227 radeon_ring_write(rdev, 1);
228
229 radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
230 radeon_ring_write(rdev, 3);
231 radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
232
233}
234
Alex Deucher12920592011-02-02 12:37:40 -0500235/* emits 36 */
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400236static void
237set_default_state(struct radeon_device *rdev)
238{
239 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
240 u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
241 u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
242 int num_ps_gprs, num_vs_gprs, num_temp_gprs;
243 int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
244 int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
245 int num_hs_threads, num_ls_threads;
246 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
247 int num_hs_stack_entries, num_ls_stack_entries;
Alex Deucher1e644d62011-01-27 17:01:52 -0500248 u64 gpu_addr;
249 int dwords;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400250
251 switch (rdev->family) {
252 case CHIP_CEDAR:
253 default:
254 num_ps_gprs = 93;
255 num_vs_gprs = 46;
256 num_temp_gprs = 4;
257 num_gs_gprs = 31;
258 num_es_gprs = 31;
259 num_hs_gprs = 23;
260 num_ls_gprs = 23;
261 num_ps_threads = 96;
262 num_vs_threads = 16;
263 num_gs_threads = 16;
264 num_es_threads = 16;
265 num_hs_threads = 16;
266 num_ls_threads = 16;
267 num_ps_stack_entries = 42;
268 num_vs_stack_entries = 42;
269 num_gs_stack_entries = 42;
270 num_es_stack_entries = 42;
271 num_hs_stack_entries = 42;
272 num_ls_stack_entries = 42;
273 break;
274 case CHIP_REDWOOD:
275 num_ps_gprs = 93;
276 num_vs_gprs = 46;
277 num_temp_gprs = 4;
278 num_gs_gprs = 31;
279 num_es_gprs = 31;
280 num_hs_gprs = 23;
281 num_ls_gprs = 23;
282 num_ps_threads = 128;
283 num_vs_threads = 20;
284 num_gs_threads = 20;
285 num_es_threads = 20;
286 num_hs_threads = 20;
287 num_ls_threads = 20;
288 num_ps_stack_entries = 42;
289 num_vs_stack_entries = 42;
290 num_gs_stack_entries = 42;
291 num_es_stack_entries = 42;
292 num_hs_stack_entries = 42;
293 num_ls_stack_entries = 42;
294 break;
295 case CHIP_JUNIPER:
296 num_ps_gprs = 93;
297 num_vs_gprs = 46;
298 num_temp_gprs = 4;
299 num_gs_gprs = 31;
300 num_es_gprs = 31;
301 num_hs_gprs = 23;
302 num_ls_gprs = 23;
303 num_ps_threads = 128;
304 num_vs_threads = 20;
305 num_gs_threads = 20;
306 num_es_threads = 20;
307 num_hs_threads = 20;
308 num_ls_threads = 20;
309 num_ps_stack_entries = 85;
310 num_vs_stack_entries = 85;
311 num_gs_stack_entries = 85;
312 num_es_stack_entries = 85;
313 num_hs_stack_entries = 85;
314 num_ls_stack_entries = 85;
315 break;
316 case CHIP_CYPRESS:
317 case CHIP_HEMLOCK:
318 num_ps_gprs = 93;
319 num_vs_gprs = 46;
320 num_temp_gprs = 4;
321 num_gs_gprs = 31;
322 num_es_gprs = 31;
323 num_hs_gprs = 23;
324 num_ls_gprs = 23;
325 num_ps_threads = 128;
326 num_vs_threads = 20;
327 num_gs_threads = 20;
328 num_es_threads = 20;
329 num_hs_threads = 20;
330 num_ls_threads = 20;
331 num_ps_stack_entries = 85;
332 num_vs_stack_entries = 85;
333 num_gs_stack_entries = 85;
334 num_es_stack_entries = 85;
335 num_hs_stack_entries = 85;
336 num_ls_stack_entries = 85;
337 break;
Alex Deuchere719ebd2010-11-22 17:56:33 -0500338 case CHIP_PALM:
339 num_ps_gprs = 93;
340 num_vs_gprs = 46;
341 num_temp_gprs = 4;
342 num_gs_gprs = 31;
343 num_es_gprs = 31;
344 num_hs_gprs = 23;
345 num_ls_gprs = 23;
346 num_ps_threads = 96;
347 num_vs_threads = 16;
348 num_gs_threads = 16;
349 num_es_threads = 16;
350 num_hs_threads = 16;
351 num_ls_threads = 16;
352 num_ps_stack_entries = 42;
353 num_vs_stack_entries = 42;
354 num_gs_stack_entries = 42;
355 num_es_stack_entries = 42;
356 num_hs_stack_entries = 42;
357 num_ls_stack_entries = 42;
358 break;
Alex Deucherff5b8562011-01-06 21:19:28 -0500359 case CHIP_BARTS:
360 num_ps_gprs = 93;
361 num_vs_gprs = 46;
362 num_temp_gprs = 4;
363 num_gs_gprs = 31;
364 num_es_gprs = 31;
365 num_hs_gprs = 23;
366 num_ls_gprs = 23;
367 num_ps_threads = 128;
368 num_vs_threads = 20;
369 num_gs_threads = 20;
370 num_es_threads = 20;
371 num_hs_threads = 20;
372 num_ls_threads = 20;
373 num_ps_stack_entries = 85;
374 num_vs_stack_entries = 85;
375 num_gs_stack_entries = 85;
376 num_es_stack_entries = 85;
377 num_hs_stack_entries = 85;
378 num_ls_stack_entries = 85;
379 break;
380 case CHIP_TURKS:
381 num_ps_gprs = 93;
382 num_vs_gprs = 46;
383 num_temp_gprs = 4;
384 num_gs_gprs = 31;
385 num_es_gprs = 31;
386 num_hs_gprs = 23;
387 num_ls_gprs = 23;
388 num_ps_threads = 128;
389 num_vs_threads = 20;
390 num_gs_threads = 20;
391 num_es_threads = 20;
392 num_hs_threads = 20;
393 num_ls_threads = 20;
394 num_ps_stack_entries = 42;
395 num_vs_stack_entries = 42;
396 num_gs_stack_entries = 42;
397 num_es_stack_entries = 42;
398 num_hs_stack_entries = 42;
399 num_ls_stack_entries = 42;
400 break;
401 case CHIP_CAICOS:
402 num_ps_gprs = 93;
403 num_vs_gprs = 46;
404 num_temp_gprs = 4;
405 num_gs_gprs = 31;
406 num_es_gprs = 31;
407 num_hs_gprs = 23;
408 num_ls_gprs = 23;
409 num_ps_threads = 128;
410 num_vs_threads = 10;
411 num_gs_threads = 10;
412 num_es_threads = 10;
413 num_hs_threads = 10;
414 num_ls_threads = 10;
415 num_ps_stack_entries = 42;
416 num_vs_stack_entries = 42;
417 num_gs_stack_entries = 42;
418 num_es_stack_entries = 42;
419 num_hs_stack_entries = 42;
420 num_ls_stack_entries = 42;
421 break;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400422 }
423
Alex Deuchere719ebd2010-11-22 17:56:33 -0500424 if ((rdev->family == CHIP_CEDAR) ||
Alex Deucherff5b8562011-01-06 21:19:28 -0500425 (rdev->family == CHIP_PALM) ||
426 (rdev->family == CHIP_CAICOS))
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400427 sq_config = 0;
428 else
429 sq_config = VC_ENABLE;
430
431 sq_config |= (EXPORT_SRC_C |
432 CS_PRIO(0) |
433 LS_PRIO(0) |
434 HS_PRIO(0) |
435 PS_PRIO(0) |
436 VS_PRIO(1) |
437 GS_PRIO(2) |
438 ES_PRIO(3));
439
440 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
441 NUM_VS_GPRS(num_vs_gprs) |
442 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
443 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
444 NUM_ES_GPRS(num_es_gprs));
445 sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
446 NUM_LS_GPRS(num_ls_gprs));
447 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
448 NUM_VS_THREADS(num_vs_threads) |
449 NUM_GS_THREADS(num_gs_threads) |
450 NUM_ES_THREADS(num_es_threads));
451 sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
452 NUM_LS_THREADS(num_ls_threads));
453 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
454 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
455 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
456 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
457 sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
458 NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
459
Alex Deucher2281a372010-10-21 13:31:38 -0400460 /* set clear context state */
461 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
462 radeon_ring_write(rdev, 0);
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400463
464 /* disable dyn gprs */
465 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
466 radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
467 radeon_ring_write(rdev, 0);
468
469 /* SQ config */
470 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
471 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
472 radeon_ring_write(rdev, sq_config);
473 radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
474 radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
475 radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
476 radeon_ring_write(rdev, 0);
477 radeon_ring_write(rdev, 0);
478 radeon_ring_write(rdev, sq_thread_resource_mgmt);
479 radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
480 radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
481 radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
482 radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
Alex Deucher2281a372010-10-21 13:31:38 -0400483
484 /* CONTEXT_CONTROL */
485 radeon_ring_write(rdev, 0xc0012800);
486 radeon_ring_write(rdev, 0x80000000);
487 radeon_ring_write(rdev, 0x80000000);
488
489 /* SQ_VTX_BASE_VTX_LOC */
490 radeon_ring_write(rdev, 0xc0026f00);
491 radeon_ring_write(rdev, 0x00000000);
492 radeon_ring_write(rdev, 0x00000000);
493 radeon_ring_write(rdev, 0x00000000);
494
495 /* SET_SAMPLER */
496 radeon_ring_write(rdev, 0xc0036e00);
497 radeon_ring_write(rdev, 0x00000000);
498 radeon_ring_write(rdev, 0x00000012);
499 radeon_ring_write(rdev, 0x00000000);
500 radeon_ring_write(rdev, 0x00000000);
501
Alex Deucher12920592011-02-02 12:37:40 -0500502 /* set to DX10/11 mode */
503 radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
504 radeon_ring_write(rdev, 1);
505
Alex Deucher1e644d62011-01-27 17:01:52 -0500506 /* emit an IB pointing at default state */
507 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
508 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
509 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
510 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
511 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
512 radeon_ring_write(rdev, dwords);
513
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400514}
515
516static inline uint32_t i2f(uint32_t input)
517{
518 u32 result, i, exponent, fraction;
519
520 if ((input & 0x3fff) == 0)
521 result = 0; /* 0 is a special case */
522 else {
523 exponent = 140; /* exponent biased by 127; */
524 fraction = (input & 0x3fff) << 10; /* cheat and only
525 handle numbers below 2^^15 */
526 for (i = 0; i < 14; i++) {
527 if (fraction & 0x800000)
528 break;
529 else {
530 fraction = fraction << 1; /* keep
531 shifting left until top bit = 1 */
532 exponent = exponent - 1;
533 }
534 }
535 result = exponent << 23 | (fraction & 0x7fffff); /* mask
536 off top bit; assumed 1 */
537 }
538 return result;
539}
540
541int evergreen_blit_init(struct radeon_device *rdev)
542{
543 u32 obj_size;
Alex Deucher1e644d62011-01-27 17:01:52 -0500544 int r, dwords;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400545 void *ptr;
Alex Deucher1e644d62011-01-27 17:01:52 -0500546 u32 packet2s[16];
547 int num_packet2s = 0;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400548
549 /* pin copy shader into vram if already initialized */
550 if (rdev->r600_blit.shader_obj)
551 goto done;
552
553 mutex_init(&rdev->r600_blit.mutex);
554 rdev->r600_blit.state_offset = 0;
Alex Deucher1e644d62011-01-27 17:01:52 -0500555
556 rdev->r600_blit.state_len = evergreen_default_size;
557
558 dwords = rdev->r600_blit.state_len;
559 while (dwords & 0xf) {
560 packet2s[num_packet2s++] = PACKET2(0);
561 dwords++;
562 }
563
564 obj_size = dwords * 4;
565 obj_size = ALIGN(obj_size, 256);
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400566
567 rdev->r600_blit.vs_offset = obj_size;
568 obj_size += evergreen_vs_size * 4;
569 obj_size = ALIGN(obj_size, 256);
570
571 rdev->r600_blit.ps_offset = obj_size;
572 obj_size += evergreen_ps_size * 4;
573 obj_size = ALIGN(obj_size, 256);
574
Daniel Vetter441921d2011-02-18 17:59:16 +0100575 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400576 &rdev->r600_blit.shader_obj);
577 if (r) {
578 DRM_ERROR("evergreen failed to allocate shader\n");
579 return r;
580 }
581
582 DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
583 obj_size,
584 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
585
586 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
587 if (unlikely(r != 0))
588 return r;
589 r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
590 if (r) {
591 DRM_ERROR("failed to map blit object %d\n", r);
592 return r;
593 }
594
Alex Deucher1e644d62011-01-27 17:01:52 -0500595 memcpy_toio(ptr + rdev->r600_blit.state_offset,
596 evergreen_default_state, rdev->r600_blit.state_len * 4);
597
598 if (num_packet2s)
599 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
600 packet2s, num_packet2s * 4);
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400601 memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
602 memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
603 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
604 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
605
606done:
607 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
608 if (unlikely(r != 0))
609 return r;
610 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
611 &rdev->r600_blit.shader_gpu_addr);
612 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
613 if (r) {
614 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
615 return r;
616 }
Alex Deucher7e942502010-10-19 00:36:19 -0400617 rdev->mc.active_vram_size = rdev->mc.real_vram_size;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400618 return 0;
619}
620
621void evergreen_blit_fini(struct radeon_device *rdev)
622{
623 int r;
624
Alex Deucher7e942502010-10-19 00:36:19 -0400625 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400626 if (rdev->r600_blit.shader_obj == NULL)
627 return;
628 /* If we can't reserve the bo, unref should be enough to destroy
629 * it when it becomes idle.
630 */
631 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
632 if (!r) {
633 radeon_bo_unpin(rdev->r600_blit.shader_obj);
634 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
635 }
636 radeon_bo_unref(&rdev->r600_blit.shader_obj);
637}
638
639static int evergreen_vb_ib_get(struct radeon_device *rdev)
640{
641 int r;
642 r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
643 if (r) {
644 DRM_ERROR("failed to get IB for vertex buffer\n");
645 return r;
646 }
647
648 rdev->r600_blit.vb_total = 64*1024;
649 rdev->r600_blit.vb_used = 0;
650 return 0;
651}
652
653static void evergreen_vb_ib_put(struct radeon_device *rdev)
654{
655 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
656 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
657}
658
659int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
660{
661 int r;
662 int ring_size, line_size;
663 int max_size;
664 /* loops of emits + fence emit possible */
665 int dwords_per_loop = 74, num_loops;
666
667 r = evergreen_vb_ib_get(rdev);
668 if (r)
669 return r;
670
671 /* 8 bpp vs 32 bpp for xfer unit */
672 if (size_bytes & 3)
673 line_size = 8192;
674 else
675 line_size = 8192 * 4;
676
677 max_size = 8192 * line_size;
678
679 /* major loops cover the max size transfer */
680 num_loops = ((size_bytes + max_size) / max_size);
681 /* minor loops cover the extra non aligned bits */
682 num_loops += ((size_bytes % line_size) ? 1 : 0);
683 /* calculate number of loops correctly */
684 ring_size = num_loops * dwords_per_loop;
685 /* set default + shaders */
Alex Deucher12920592011-02-02 12:37:40 -0500686 ring_size += 52; /* shaders + def state */
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400687 ring_size += 10; /* fence emit for VB IB */
688 ring_size += 5; /* done copy */
689 ring_size += 10; /* fence emit for done copy */
690 r = radeon_ring_lock(rdev, ring_size);
691 if (r)
692 return r;
693
Alex Deucher12920592011-02-02 12:37:40 -0500694 set_default_state(rdev); /* 36 */
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400695 set_shaders(rdev); /* 16 */
696 return 0;
697}
698
699void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
700{
701 int r;
702
703 if (rdev->r600_blit.vb_ib)
704 evergreen_vb_ib_put(rdev);
705
706 if (fence)
707 r = radeon_fence_emit(rdev, fence);
708
709 radeon_ring_unlock_commit(rdev);
710}
711
712void evergreen_kms_blit_copy(struct radeon_device *rdev,
713 u64 src_gpu_addr, u64 dst_gpu_addr,
714 int size_bytes)
715{
716 int max_bytes;
717 u64 vb_gpu_addr;
718 u32 *vb;
719
720 DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
721 size_bytes, rdev->r600_blit.vb_used);
722 vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
723 if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
724 max_bytes = 8192;
725
726 while (size_bytes) {
727 int cur_size = size_bytes;
728 int src_x = src_gpu_addr & 255;
729 int dst_x = dst_gpu_addr & 255;
730 int h = 1;
Alex Deucher2126d0a2010-10-06 00:13:04 -0400731 src_gpu_addr = src_gpu_addr & ~255ULL;
732 dst_gpu_addr = dst_gpu_addr & ~255ULL;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400733
734 if (!src_x && !dst_x) {
735 h = (cur_size / max_bytes);
736 if (h > 8192)
737 h = 8192;
738 if (h == 0)
739 h = 1;
740 else
741 cur_size = max_bytes;
742 } else {
743 if (cur_size > max_bytes)
744 cur_size = max_bytes;
745 if (cur_size > (max_bytes - dst_x))
746 cur_size = (max_bytes - dst_x);
747 if (cur_size > (max_bytes - src_x))
748 cur_size = (max_bytes - src_x);
749 }
750
751 if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
752 WARN_ON(1);
753 }
754
755 vb[0] = i2f(dst_x);
756 vb[1] = 0;
757 vb[2] = i2f(src_x);
758 vb[3] = 0;
759
760 vb[4] = i2f(dst_x);
761 vb[5] = i2f(h);
762 vb[6] = i2f(src_x);
763 vb[7] = i2f(h);
764
765 vb[8] = i2f(dst_x + cur_size);
766 vb[9] = i2f(h);
767 vb[10] = i2f(src_x + cur_size);
768 vb[11] = i2f(h);
769
770 /* src 10 */
771 set_tex_resource(rdev, FMT_8,
772 src_x + cur_size, h, src_x + cur_size,
773 src_gpu_addr);
774
775 /* 5 */
776 cp_set_surface_sync(rdev,
777 PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
778
779
780 /* dst 17 */
781 set_render_target(rdev, COLOR_8,
782 dst_x + cur_size, h,
783 dst_gpu_addr);
784
785 /* scissors 12 */
786 set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
787
788 /* 15 */
789 vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
790 set_vtx_resource(rdev, vb_gpu_addr);
791
792 /* draw 10 */
793 draw_auto(rdev);
794
795 /* 5 */
796 cp_set_surface_sync(rdev,
797 PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
798 cur_size * h, dst_gpu_addr);
799
800 vb += 12;
801 rdev->r600_blit.vb_used += 12 * 4;
802
803 src_gpu_addr += cur_size * h;
804 dst_gpu_addr += cur_size * h;
805 size_bytes -= cur_size * h;
806 }
807 } else {
808 max_bytes = 8192 * 4;
809
810 while (size_bytes) {
811 int cur_size = size_bytes;
812 int src_x = (src_gpu_addr & 255);
813 int dst_x = (dst_gpu_addr & 255);
814 int h = 1;
Alex Deucher2126d0a2010-10-06 00:13:04 -0400815 src_gpu_addr = src_gpu_addr & ~255ULL;
816 dst_gpu_addr = dst_gpu_addr & ~255ULL;
Alex Deucherd7ccd8f2010-09-09 11:33:36 -0400817
818 if (!src_x && !dst_x) {
819 h = (cur_size / max_bytes);
820 if (h > 8192)
821 h = 8192;
822 if (h == 0)
823 h = 1;
824 else
825 cur_size = max_bytes;
826 } else {
827 if (cur_size > max_bytes)
828 cur_size = max_bytes;
829 if (cur_size > (max_bytes - dst_x))
830 cur_size = (max_bytes - dst_x);
831 if (cur_size > (max_bytes - src_x))
832 cur_size = (max_bytes - src_x);
833 }
834
835 if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
836 WARN_ON(1);
837 }
838
839 vb[0] = i2f(dst_x / 4);
840 vb[1] = 0;
841 vb[2] = i2f(src_x / 4);
842 vb[3] = 0;
843
844 vb[4] = i2f(dst_x / 4);
845 vb[5] = i2f(h);
846 vb[6] = i2f(src_x / 4);
847 vb[7] = i2f(h);
848
849 vb[8] = i2f((dst_x + cur_size) / 4);
850 vb[9] = i2f(h);
851 vb[10] = i2f((src_x + cur_size) / 4);
852 vb[11] = i2f(h);
853
854 /* src 10 */
855 set_tex_resource(rdev, FMT_8_8_8_8,
856 (src_x + cur_size) / 4,
857 h, (src_x + cur_size) / 4,
858 src_gpu_addr);
859 /* 5 */
860 cp_set_surface_sync(rdev,
861 PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
862
863 /* dst 17 */
864 set_render_target(rdev, COLOR_8_8_8_8,
865 (dst_x + cur_size) / 4, h,
866 dst_gpu_addr);
867
868 /* scissors 12 */
869 set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
870
871 /* Vertex buffer setup 15 */
872 vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
873 set_vtx_resource(rdev, vb_gpu_addr);
874
875 /* draw 10 */
876 draw_auto(rdev);
877
878 /* 5 */
879 cp_set_surface_sync(rdev,
880 PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
881 cur_size * h, dst_gpu_addr);
882
883 /* 74 ring dwords per loop */
884 vb += 12;
885 rdev->r600_blit.vb_used += 12 * 4;
886
887 src_gpu_addr += cur_size * h;
888 dst_gpu_addr += cur_size * h;
889 size_bytes -= cur_size * h;
890 }
891 }
892}
893