blob: c246537341c87417b970db03e1ff562c693df4bc [file] [log] [blame]
Daniel Vetter5a851b12011-12-04 21:42:31 +01001/*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
25 *
26 */
27
28#include <stdlib.h>
29#include <stdio.h>
30#include <string.h>
Daniel Vetter5a851b12011-12-04 21:42:31 +010031#include <fcntl.h>
32#include <inttypes.h>
33#include <errno.h>
34#include <sys/stat.h>
35#include <sys/time.h>
36#include "drm.h"
Daniel Vettere49ceb82014-03-22 21:07:37 +010037#include "ioctl_wrappers.h"
Daniel Vetter5a851b12011-12-04 21:42:31 +010038#include "drmtest.h"
Daniel Vettere49ceb82014-03-22 21:07:37 +010039#include "intel_chipset.h"
Daniel Vetter5a851b12011-12-04 21:42:31 +010040#include "intel_bufmgr.h"
41#include "intel_batchbuffer.h"
42#include "intel_gpu_tools.h"
43
44/*
45 * Testcase: pwrite/pread consistency when touching partial cachelines
46 *
47 * Some fancy new pwrite/pread optimizations clflush in-line while
48 * reading/writing. Check whether all required clflushes happen.
49 *
50 */
51
52static drm_intel_bufmgr *bufmgr;
53struct intel_batchbuffer *batch;
54
55drm_intel_bo *scratch_bo;
56drm_intel_bo *staging_bo;
57#define BO_SIZE (4*4096)
58uint32_t devid;
Daniel Vetterd75d69d2012-01-15 18:32:11 +010059uint64_t mappable_gtt_limit;
Daniel Vetter5a851b12011-12-04 21:42:31 +010060int fd;
61
Daniel Vetter5a851b12011-12-04 21:42:31 +010062static void
63copy_bo(drm_intel_bo *src, drm_intel_bo *dst)
64{
Ben Widawskyf4dfa372013-10-08 15:02:07 -070065 BLIT_COPY_BATCH_START(devid, 0);
Daniel Vetter5a851b12011-12-04 21:42:31 +010066 OUT_BATCH((3 << 24) | /* 32 bits */
67 (0xcc << 16) | /* copy ROP */
68 4096);
69 OUT_BATCH(0 << 16 | 0);
70 OUT_BATCH((BO_SIZE/4096) << 16 | 1024);
71 OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
Ben Widawskyf4dfa372013-10-08 15:02:07 -070072 BLIT_RELOC_UDW(devid);
Daniel Vetter5a851b12011-12-04 21:42:31 +010073 OUT_BATCH(0 << 16 | 0);
74 OUT_BATCH(4096);
75 OUT_RELOC_FENCED(src, I915_GEM_DOMAIN_RENDER, 0, 0);
Ben Widawskyf4dfa372013-10-08 15:02:07 -070076 BLIT_RELOC_UDW(devid);
Daniel Vetter5a851b12011-12-04 21:42:31 +010077 ADVANCE_BATCH();
78
79 intel_batchbuffer_flush(batch);
80}
81
82static void
83blt_bo_fill(drm_intel_bo *tmp_bo, drm_intel_bo *bo, int val)
84{
85 uint8_t *gtt_ptr;
86 int i;
87
88 drm_intel_gem_bo_map_gtt(tmp_bo);
89 gtt_ptr = tmp_bo->virtual;
90
91 for (i = 0; i < BO_SIZE; i++)
92 gtt_ptr[i] = val;
93
94 drm_intel_gem_bo_unmap_gtt(tmp_bo);
95
Daniel Vetterd75d69d2012-01-15 18:32:11 +010096 if (bo->offset < mappable_gtt_limit &&
Daniel Vetterff409c52011-12-06 16:57:53 +010097 (IS_G33(devid) || intel_gen(devid) >= 4))
Daniel Vetter1caaf0a2013-08-12 12:17:35 +020098 igt_trash_aperture();
Daniel Vetter5a851b12011-12-04 21:42:31 +010099
100 copy_bo(tmp_bo, bo);
101}
102
103#define MAX_BLT_SIZE 128
Daniel Vetter36a40a52012-01-31 13:52:59 +0100104#define ROUNDS 1000
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100105uint8_t tmp[BO_SIZE];
106
107static void test_partial_reads(void)
Daniel Vetter5a851b12011-12-04 21:42:31 +0100108{
109 int i, j;
Daniel Vetter5a851b12011-12-04 21:42:31 +0100110
111 printf("checking partial reads\n");
Daniel Vetter36a40a52012-01-31 13:52:59 +0100112 for (i = 0; i < ROUNDS; i++) {
Daniel Vetter5a851b12011-12-04 21:42:31 +0100113 int start, len;
114 int val = i % 256;
115
116 blt_bo_fill(staging_bo, scratch_bo, i);
117
118 start = random() % BO_SIZE;
119 len = random() % (BO_SIZE-start) + 1;
120
121 drm_intel_bo_get_subdata(scratch_bo, start, len, tmp);
122 for (j = 0; j < len; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200123 igt_assert_f(tmp[j] == val,
124 "mismatch at %i, got: %i, expected: %i\n",
125 j, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100126 }
Daniel Vetter36a40a52012-01-31 13:52:59 +0100127
Daniel Vetter1caaf0a2013-08-12 12:17:35 +0200128 igt_progress("partial reads test: ", i, ROUNDS);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100129 }
130
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100131}
132
133static void test_partial_writes(void)
134{
135 int i, j;
136 uint8_t *gtt_ptr;
137
Daniel Vetter5a851b12011-12-04 21:42:31 +0100138 printf("checking partial writes\n");
Daniel Vetter36a40a52012-01-31 13:52:59 +0100139 for (i = 0; i < ROUNDS; i++) {
Daniel Vetter5a851b12011-12-04 21:42:31 +0100140 int start, len;
141 int val = i % 256;
142
143 blt_bo_fill(staging_bo, scratch_bo, i);
144
145 start = random() % BO_SIZE;
146 len = random() % (BO_SIZE-start) + 1;
147
148 memset(tmp, i + 63, BO_SIZE);
149
150 drm_intel_bo_subdata(scratch_bo, start, len, tmp);
151
152 copy_bo(scratch_bo, staging_bo);
153 drm_intel_gem_bo_map_gtt(staging_bo);
154 gtt_ptr = staging_bo->virtual;
155
156 for (j = 0; j < start; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200157 igt_assert_f(gtt_ptr[j] == val,
158 "mismatch at %i, got: %i, expected: %i\n",
159 j, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100160 }
161 for (; j < start + len; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200162 igt_assert_f(gtt_ptr[j] == tmp[0],
163 "mismatch at %i, got: %i, expected: %i\n",
164 j, tmp[j], i);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100165 }
166 for (; j < BO_SIZE; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200167 igt_assert_f(gtt_ptr[j] == val,
168 "mismatch at %i, got: %i, expected: %i\n",
169 j, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100170 }
171 drm_intel_gem_bo_unmap_gtt(staging_bo);
Daniel Vetter36a40a52012-01-31 13:52:59 +0100172
Daniel Vetter1caaf0a2013-08-12 12:17:35 +0200173 igt_progress("partial writes test: ", i, ROUNDS);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100174 }
175
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100176}
177
178static void test_partial_read_writes(void)
179{
180 int i, j;
181 uint8_t *gtt_ptr;
182
Daniel Vetter5a851b12011-12-04 21:42:31 +0100183 printf("checking partial writes after partial reads\n");
Daniel Vetter36a40a52012-01-31 13:52:59 +0100184 for (i = 0; i < ROUNDS; i++) {
Daniel Vetter5a851b12011-12-04 21:42:31 +0100185 int start, len;
186 int val = i % 256;
187
188 blt_bo_fill(staging_bo, scratch_bo, i);
189
190 /* partial read */
191 start = random() % BO_SIZE;
192 len = random() % (BO_SIZE-start) + 1;
193
194 drm_intel_bo_get_subdata(scratch_bo, start, len, tmp);
195 for (j = 0; j < len; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200196 igt_assert_f(tmp[j] == val,
197 "mismatch in read at %i, got: %i, expected: %i\n",
198 j, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100199 }
200
201 /* Change contents through gtt to make the pread cachelines
202 * stale. */
203 val = (i + 17) % 256;
204 blt_bo_fill(staging_bo, scratch_bo, val);
205
206 /* partial write */
207 start = random() % BO_SIZE;
208 len = random() % (BO_SIZE-start) + 1;
209
210 memset(tmp, i + 63, BO_SIZE);
211
212 drm_intel_bo_subdata(scratch_bo, start, len, tmp);
213
214 copy_bo(scratch_bo, staging_bo);
215 drm_intel_gem_bo_map_gtt(staging_bo);
216 gtt_ptr = staging_bo->virtual;
217
218 for (j = 0; j < start; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200219 igt_assert_f(gtt_ptr[j] == val,
220 "mismatch at %i, got: %i, expected: %i\n",
221 j, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100222 }
223 for (; j < start + len; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200224 igt_assert_f(gtt_ptr[j] == tmp[0],
225 "mismatch at %i, got: %i, expected: %i\n",
226 j, tmp[j], tmp[0]);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100227 }
228 for (; j < BO_SIZE; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200229 igt_assert_f(gtt_ptr[j] == val,
230 "mismatch at %i, got: %i, expected: %i\n",
231 j, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100232 }
233 drm_intel_gem_bo_unmap_gtt(staging_bo);
Daniel Vetter36a40a52012-01-31 13:52:59 +0100234
Daniel Vetter1caaf0a2013-08-12 12:17:35 +0200235 igt_progress("partial read/writes test: ", i, ROUNDS);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100236 }
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100237}
238
Chris Wilsona661c092013-08-06 15:09:50 +0100239static void do_tests(int cache_level, const char *suffix)
240{
Daniel Vetterb3880d32013-08-14 18:02:46 +0200241 igt_fixture {
242 if (cache_level != -1)
243 gem_set_caching(fd, scratch_bo->handle, cache_level);
244 }
Chris Wilsona661c092013-08-06 15:09:50 +0100245
Daniel Vetter62346582013-08-14 13:47:47 +0200246 igt_subtest_f("reads%s", suffix)
Chris Wilsona661c092013-08-06 15:09:50 +0100247 test_partial_reads();
248
Daniel Vetter62346582013-08-14 13:47:47 +0200249 igt_subtest_f("write%s", suffix)
Chris Wilsona661c092013-08-06 15:09:50 +0100250 test_partial_writes();
251
Daniel Vetter62346582013-08-14 13:47:47 +0200252 igt_subtest_f("writes-after-reads%s", suffix)
Chris Wilsona661c092013-08-06 15:09:50 +0100253 test_partial_read_writes();
254}
255
Daniel Vetter071e9ca2013-10-31 16:23:26 +0100256igt_main
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100257{
258 srandom(0xdeadbeef);
259
Daniel Vetter1caaf0a2013-08-12 12:17:35 +0200260 igt_skip_on_simulation();
Daniel Vetter046b1492012-11-28 13:08:07 +0100261
Daniel Vetterb3880d32013-08-14 18:02:46 +0200262 igt_fixture {
263 fd = drm_open_any();
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100264
Daniel Vetterb3880d32013-08-14 18:02:46 +0200265 bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
266 //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
267 devid = intel_get_drm_devid(fd);
268 batch = intel_batchbuffer_alloc(bufmgr, devid);
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100269
Daniel Vetterb3880d32013-08-14 18:02:46 +0200270 /* overallocate the buffers we're actually using because */
271 scratch_bo = drm_intel_bo_alloc(bufmgr, "scratch bo", BO_SIZE, 4096);
272 staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100273
Daniel Vetterb3880d32013-08-14 18:02:46 +0200274 igt_init_aperture_trashers(bufmgr);
275 mappable_gtt_limit = gem_mappable_aperture_size();
276 }
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100277
Chris Wilsona661c092013-08-06 15:09:50 +0100278 do_tests(-1, "");
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100279
Chris Wilsona661c092013-08-06 15:09:50 +0100280 /* Repeat the tests using different levels of snooping */
281 do_tests(0, "-uncached");
282 do_tests(1, "-snoop");
Chris Wilson467796a2013-08-10 15:49:33 +0100283 do_tests(2, "-display");
Daniel Vetter5a851b12011-12-04 21:42:31 +0100284
Daniel Vetterb3880d32013-08-14 18:02:46 +0200285 igt_fixture {
286 igt_cleanup_aperture_trashers();
287 drm_intel_bufmgr_destroy(bufmgr);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100288
Daniel Vetterb3880d32013-08-14 18:02:46 +0200289 close(fd);
290 }
Daniel Vetter5a851b12011-12-04 21:42:31 +0100291}