blob: dc72f550570babc7ae7f85e16f0b78aa7920dcbf [file] [log] [blame]
Daniel Vetter5a851b12011-12-04 21:42:31 +01001/*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
25 *
26 */
27
28#include <stdlib.h>
29#include <stdio.h>
30#include <string.h>
Daniel Vetter5a851b12011-12-04 21:42:31 +010031#include <fcntl.h>
32#include <inttypes.h>
33#include <errno.h>
34#include <sys/stat.h>
35#include <sys/time.h>
Daniel Vetterf5daeec2014-03-23 13:35:09 +010036
37#include <drm.h>
38
Daniel Vettere49ceb82014-03-22 21:07:37 +010039#include "ioctl_wrappers.h"
Daniel Vetter5a851b12011-12-04 21:42:31 +010040#include "drmtest.h"
Daniel Vettere49ceb82014-03-22 21:07:37 +010041#include "intel_chipset.h"
Daniel Vetterc03c6ce2014-03-22 21:34:29 +010042#include "intel_io.h"
Daniel Vetterf5daeec2014-03-23 13:35:09 +010043#include "igt_aux.h"
Daniel Vetter5a851b12011-12-04 21:42:31 +010044
45/*
46 * Testcase: pwrite/pread consistency when touching partial cachelines
47 *
48 * Some fancy new pwrite/pread optimizations clflush in-line while
49 * reading/writing. Check whether all required clflushes happen.
50 *
51 */
52
53static drm_intel_bufmgr *bufmgr;
54struct intel_batchbuffer *batch;
55
56drm_intel_bo *scratch_bo;
57drm_intel_bo *staging_bo;
58#define BO_SIZE (4*4096)
59uint32_t devid;
Daniel Vetterd75d69d2012-01-15 18:32:11 +010060uint64_t mappable_gtt_limit;
Daniel Vetter5a851b12011-12-04 21:42:31 +010061int fd;
62
Daniel Vetter5a851b12011-12-04 21:42:31 +010063static void
64copy_bo(drm_intel_bo *src, drm_intel_bo *dst)
65{
Ben Widawskyf4dfa372013-10-08 15:02:07 -070066 BLIT_COPY_BATCH_START(devid, 0);
Daniel Vetter5a851b12011-12-04 21:42:31 +010067 OUT_BATCH((3 << 24) | /* 32 bits */
68 (0xcc << 16) | /* copy ROP */
69 4096);
70 OUT_BATCH(0 << 16 | 0);
71 OUT_BATCH((BO_SIZE/4096) << 16 | 1024);
72 OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
Ben Widawskyf4dfa372013-10-08 15:02:07 -070073 BLIT_RELOC_UDW(devid);
Daniel Vetter5a851b12011-12-04 21:42:31 +010074 OUT_BATCH(0 << 16 | 0);
75 OUT_BATCH(4096);
76 OUT_RELOC_FENCED(src, I915_GEM_DOMAIN_RENDER, 0, 0);
Ben Widawskyf4dfa372013-10-08 15:02:07 -070077 BLIT_RELOC_UDW(devid);
Daniel Vetter5a851b12011-12-04 21:42:31 +010078 ADVANCE_BATCH();
79
80 intel_batchbuffer_flush(batch);
81}
82
83static void
84blt_bo_fill(drm_intel_bo *tmp_bo, drm_intel_bo *bo, int val)
85{
86 uint8_t *gtt_ptr;
87 int i;
88
89 drm_intel_gem_bo_map_gtt(tmp_bo);
90 gtt_ptr = tmp_bo->virtual;
91
92 for (i = 0; i < BO_SIZE; i++)
93 gtt_ptr[i] = val;
94
95 drm_intel_gem_bo_unmap_gtt(tmp_bo);
96
Daniel Vetterd75d69d2012-01-15 18:32:11 +010097 if (bo->offset < mappable_gtt_limit &&
Daniel Vetterff409c52011-12-06 16:57:53 +010098 (IS_G33(devid) || intel_gen(devid) >= 4))
Daniel Vetter1caaf0a2013-08-12 12:17:35 +020099 igt_trash_aperture();
Daniel Vetter5a851b12011-12-04 21:42:31 +0100100
101 copy_bo(tmp_bo, bo);
102}
103
104#define MAX_BLT_SIZE 128
Daniel Vetter36a40a52012-01-31 13:52:59 +0100105#define ROUNDS 1000
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100106uint8_t tmp[BO_SIZE];
107
108static void test_partial_reads(void)
Daniel Vetter5a851b12011-12-04 21:42:31 +0100109{
110 int i, j;
Daniel Vetter5a851b12011-12-04 21:42:31 +0100111
112 printf("checking partial reads\n");
Daniel Vetter36a40a52012-01-31 13:52:59 +0100113 for (i = 0; i < ROUNDS; i++) {
Daniel Vetter5a851b12011-12-04 21:42:31 +0100114 int start, len;
115 int val = i % 256;
116
117 blt_bo_fill(staging_bo, scratch_bo, i);
118
119 start = random() % BO_SIZE;
120 len = random() % (BO_SIZE-start) + 1;
121
122 drm_intel_bo_get_subdata(scratch_bo, start, len, tmp);
123 for (j = 0; j < len; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200124 igt_assert_f(tmp[j] == val,
125 "mismatch at %i, got: %i, expected: %i\n",
126 j, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100127 }
Daniel Vetter36a40a52012-01-31 13:52:59 +0100128
Daniel Vetter1caaf0a2013-08-12 12:17:35 +0200129 igt_progress("partial reads test: ", i, ROUNDS);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100130 }
131
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100132}
133
134static void test_partial_writes(void)
135{
136 int i, j;
137 uint8_t *gtt_ptr;
138
Daniel Vetter5a851b12011-12-04 21:42:31 +0100139 printf("checking partial writes\n");
Daniel Vetter36a40a52012-01-31 13:52:59 +0100140 for (i = 0; i < ROUNDS; i++) {
Daniel Vetter5a851b12011-12-04 21:42:31 +0100141 int start, len;
142 int val = i % 256;
143
144 blt_bo_fill(staging_bo, scratch_bo, i);
145
146 start = random() % BO_SIZE;
147 len = random() % (BO_SIZE-start) + 1;
148
149 memset(tmp, i + 63, BO_SIZE);
150
151 drm_intel_bo_subdata(scratch_bo, start, len, tmp);
152
153 copy_bo(scratch_bo, staging_bo);
154 drm_intel_gem_bo_map_gtt(staging_bo);
155 gtt_ptr = staging_bo->virtual;
156
157 for (j = 0; j < start; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200158 igt_assert_f(gtt_ptr[j] == val,
159 "mismatch at %i, got: %i, expected: %i\n",
160 j, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100161 }
162 for (; j < start + len; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200163 igt_assert_f(gtt_ptr[j] == tmp[0],
164 "mismatch at %i, got: %i, expected: %i\n",
165 j, tmp[j], i);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100166 }
167 for (; j < BO_SIZE; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200168 igt_assert_f(gtt_ptr[j] == val,
169 "mismatch at %i, got: %i, expected: %i\n",
170 j, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100171 }
172 drm_intel_gem_bo_unmap_gtt(staging_bo);
Daniel Vetter36a40a52012-01-31 13:52:59 +0100173
Daniel Vetter1caaf0a2013-08-12 12:17:35 +0200174 igt_progress("partial writes test: ", i, ROUNDS);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100175 }
176
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100177}
178
179static void test_partial_read_writes(void)
180{
181 int i, j;
182 uint8_t *gtt_ptr;
183
Daniel Vetter5a851b12011-12-04 21:42:31 +0100184 printf("checking partial writes after partial reads\n");
Daniel Vetter36a40a52012-01-31 13:52:59 +0100185 for (i = 0; i < ROUNDS; i++) {
Daniel Vetter5a851b12011-12-04 21:42:31 +0100186 int start, len;
187 int val = i % 256;
188
189 blt_bo_fill(staging_bo, scratch_bo, i);
190
191 /* partial read */
192 start = random() % BO_SIZE;
193 len = random() % (BO_SIZE-start) + 1;
194
195 drm_intel_bo_get_subdata(scratch_bo, start, len, tmp);
196 for (j = 0; j < len; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200197 igt_assert_f(tmp[j] == val,
198 "mismatch in read at %i, got: %i, expected: %i\n",
199 j, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100200 }
201
202 /* Change contents through gtt to make the pread cachelines
203 * stale. */
204 val = (i + 17) % 256;
205 blt_bo_fill(staging_bo, scratch_bo, val);
206
207 /* partial write */
208 start = random() % BO_SIZE;
209 len = random() % (BO_SIZE-start) + 1;
210
211 memset(tmp, i + 63, BO_SIZE);
212
213 drm_intel_bo_subdata(scratch_bo, start, len, tmp);
214
215 copy_bo(scratch_bo, staging_bo);
216 drm_intel_gem_bo_map_gtt(staging_bo);
217 gtt_ptr = staging_bo->virtual;
218
219 for (j = 0; j < start; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200220 igt_assert_f(gtt_ptr[j] == val,
221 "mismatch at %i, got: %i, expected: %i\n",
222 j, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100223 }
224 for (; j < start + len; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200225 igt_assert_f(gtt_ptr[j] == tmp[0],
226 "mismatch at %i, got: %i, expected: %i\n",
227 j, tmp[j], tmp[0]);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100228 }
229 for (; j < BO_SIZE; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200230 igt_assert_f(gtt_ptr[j] == val,
231 "mismatch at %i, got: %i, expected: %i\n",
232 j, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100233 }
234 drm_intel_gem_bo_unmap_gtt(staging_bo);
Daniel Vetter36a40a52012-01-31 13:52:59 +0100235
Daniel Vetter1caaf0a2013-08-12 12:17:35 +0200236 igt_progress("partial read/writes test: ", i, ROUNDS);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100237 }
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100238}
239
Chris Wilsona661c092013-08-06 15:09:50 +0100240static void do_tests(int cache_level, const char *suffix)
241{
Daniel Vetterb3880d32013-08-14 18:02:46 +0200242 igt_fixture {
243 if (cache_level != -1)
244 gem_set_caching(fd, scratch_bo->handle, cache_level);
245 }
Chris Wilsona661c092013-08-06 15:09:50 +0100246
Daniel Vetter62346582013-08-14 13:47:47 +0200247 igt_subtest_f("reads%s", suffix)
Chris Wilsona661c092013-08-06 15:09:50 +0100248 test_partial_reads();
249
Daniel Vetter62346582013-08-14 13:47:47 +0200250 igt_subtest_f("write%s", suffix)
Chris Wilsona661c092013-08-06 15:09:50 +0100251 test_partial_writes();
252
Daniel Vetter62346582013-08-14 13:47:47 +0200253 igt_subtest_f("writes-after-reads%s", suffix)
Chris Wilsona661c092013-08-06 15:09:50 +0100254 test_partial_read_writes();
255}
256
Daniel Vetter071e9ca2013-10-31 16:23:26 +0100257igt_main
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100258{
259 srandom(0xdeadbeef);
260
Daniel Vetter1caaf0a2013-08-12 12:17:35 +0200261 igt_skip_on_simulation();
Daniel Vetter046b1492012-11-28 13:08:07 +0100262
Daniel Vetterb3880d32013-08-14 18:02:46 +0200263 igt_fixture {
264 fd = drm_open_any();
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100265
Daniel Vetterb3880d32013-08-14 18:02:46 +0200266 bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
267 //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
268 devid = intel_get_drm_devid(fd);
269 batch = intel_batchbuffer_alloc(bufmgr, devid);
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100270
Daniel Vetterb3880d32013-08-14 18:02:46 +0200271 /* overallocate the buffers we're actually using because */
272 scratch_bo = drm_intel_bo_alloc(bufmgr, "scratch bo", BO_SIZE, 4096);
273 staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100274
Daniel Vetterb3880d32013-08-14 18:02:46 +0200275 igt_init_aperture_trashers(bufmgr);
276 mappable_gtt_limit = gem_mappable_aperture_size();
277 }
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100278
Chris Wilsona661c092013-08-06 15:09:50 +0100279 do_tests(-1, "");
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100280
Chris Wilsona661c092013-08-06 15:09:50 +0100281 /* Repeat the tests using different levels of snooping */
282 do_tests(0, "-uncached");
283 do_tests(1, "-snoop");
Chris Wilson467796a2013-08-10 15:49:33 +0100284 do_tests(2, "-display");
Daniel Vetter5a851b12011-12-04 21:42:31 +0100285
Daniel Vetterb3880d32013-08-14 18:02:46 +0200286 igt_fixture {
287 igt_cleanup_aperture_trashers();
288 drm_intel_bufmgr_destroy(bufmgr);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100289
Daniel Vetterb3880d32013-08-14 18:02:46 +0200290 close(fd);
291 }
Daniel Vetter5a851b12011-12-04 21:42:31 +0100292}