blob: bc63910b255acd35ceeebcef3d5385620c9aee78 [file] [log] [blame]
Daniel Vetter5a851b12011-12-04 21:42:31 +01001/*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
25 *
26 */
27
Thomas Wood804e11f2015-08-17 17:57:43 +010028#include "igt.h"
Daniel Vetter5a851b12011-12-04 21:42:31 +010029#include <stdlib.h>
30#include <stdio.h>
31#include <string.h>
Daniel Vetter5a851b12011-12-04 21:42:31 +010032#include <fcntl.h>
33#include <inttypes.h>
34#include <errno.h>
35#include <sys/stat.h>
36#include <sys/time.h>
Daniel Vetterf5daeec2014-03-23 13:35:09 +010037
38#include <drm.h>
39
Daniel Vetter5a851b12011-12-04 21:42:31 +010040
Thomas Woodb2ac2642014-11-28 11:02:44 +000041IGT_TEST_DESCRIPTION("Test pwrite/pread consistency when touching partial"
42 " cachelines.");
43
Daniel Vetter5a851b12011-12-04 21:42:31 +010044/*
45 * Testcase: pwrite/pread consistency when touching partial cachelines
46 *
47 * Some fancy new pwrite/pread optimizations clflush in-line while
48 * reading/writing. Check whether all required clflushes happen.
49 *
50 */
51
52static drm_intel_bufmgr *bufmgr;
53struct intel_batchbuffer *batch;
54
55drm_intel_bo *scratch_bo;
56drm_intel_bo *staging_bo;
57#define BO_SIZE (4*4096)
58uint32_t devid;
Daniel Vetterd75d69d2012-01-15 18:32:11 +010059uint64_t mappable_gtt_limit;
Daniel Vetter5a851b12011-12-04 21:42:31 +010060int fd;
61
Daniel Vetter5a851b12011-12-04 21:42:31 +010062static void
63copy_bo(drm_intel_bo *src, drm_intel_bo *dst)
64{
Chris Wilson10552b52014-08-30 11:44:51 +010065 BLIT_COPY_BATCH_START(0);
Daniel Vetter5a851b12011-12-04 21:42:31 +010066 OUT_BATCH((3 << 24) | /* 32 bits */
67 (0xcc << 16) | /* copy ROP */
68 4096);
69 OUT_BATCH(0 << 16 | 0);
70 OUT_BATCH((BO_SIZE/4096) << 16 | 1024);
71 OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
72 OUT_BATCH(0 << 16 | 0);
73 OUT_BATCH(4096);
74 OUT_RELOC_FENCED(src, I915_GEM_DOMAIN_RENDER, 0, 0);
75 ADVANCE_BATCH();
76
77 intel_batchbuffer_flush(batch);
78}
79
80static void
Chris Wilson47b61372016-01-08 10:40:33 +000081blt_bo_fill(drm_intel_bo *tmp_bo, drm_intel_bo *bo, uint8_t val)
Daniel Vetter5a851b12011-12-04 21:42:31 +010082{
83 uint8_t *gtt_ptr;
84 int i;
85
Chris Wilson47b61372016-01-08 10:40:33 +000086 do_or_die(drm_intel_gem_bo_map_gtt(tmp_bo));
Daniel Vetter5a851b12011-12-04 21:42:31 +010087 gtt_ptr = tmp_bo->virtual;
88
89 for (i = 0; i < BO_SIZE; i++)
90 gtt_ptr[i] = val;
91
92 drm_intel_gem_bo_unmap_gtt(tmp_bo);
93
Daniel Vetterd75d69d2012-01-15 18:32:11 +010094 if (bo->offset < mappable_gtt_limit &&
Daniel Vetterff409c52011-12-06 16:57:53 +010095 (IS_G33(devid) || intel_gen(devid) >= 4))
Daniel Vetter1caaf0a2013-08-12 12:17:35 +020096 igt_trash_aperture();
Daniel Vetter5a851b12011-12-04 21:42:31 +010097
98 copy_bo(tmp_bo, bo);
99}
100
101#define MAX_BLT_SIZE 128
Daniel Vetter36a40a52012-01-31 13:52:59 +0100102#define ROUNDS 1000
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100103uint8_t tmp[BO_SIZE];
104
Chris Wilson47b61372016-01-08 10:40:33 +0000105static void get_range(int *start, int *len)
106{
107 *start = random() % (BO_SIZE - 1);
108 *len = random() % (BO_SIZE - *start - 1) + 1;
109}
110
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100111static void test_partial_reads(void)
Daniel Vetter5a851b12011-12-04 21:42:31 +0100112{
113 int i, j;
Daniel Vetter5a851b12011-12-04 21:42:31 +0100114
Daniel Vettere624fa82014-05-14 00:36:04 +0200115 igt_info("checking partial reads\n");
Daniel Vetter36a40a52012-01-31 13:52:59 +0100116 for (i = 0; i < ROUNDS; i++) {
Chris Wilson47b61372016-01-08 10:40:33 +0000117 uint8_t val = i;
Daniel Vetter5a851b12011-12-04 21:42:31 +0100118 int start, len;
Daniel Vetter5a851b12011-12-04 21:42:31 +0100119
Chris Wilson47b61372016-01-08 10:40:33 +0000120 blt_bo_fill(staging_bo, scratch_bo, val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100121
Chris Wilson47b61372016-01-08 10:40:33 +0000122 get_range(&start, &len);
123 do_or_die(drm_intel_bo_get_subdata(scratch_bo, start, len, tmp));
Daniel Vetter5a851b12011-12-04 21:42:31 +0100124 for (j = 0; j < len; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200125 igt_assert_f(tmp[j] == val,
Chris Wilson47b61372016-01-08 10:40:33 +0000126 "mismatch at %i [%i + %i], got: %i, expected: %i\n",
127 j, start, len, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100128 }
Daniel Vetter36a40a52012-01-31 13:52:59 +0100129
Daniel Vetter1caaf0a2013-08-12 12:17:35 +0200130 igt_progress("partial reads test: ", i, ROUNDS);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100131 }
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100132}
133
134static void test_partial_writes(void)
135{
136 int i, j;
137 uint8_t *gtt_ptr;
138
Daniel Vettere624fa82014-05-14 00:36:04 +0200139 igt_info("checking partial writes\n");
Daniel Vetter36a40a52012-01-31 13:52:59 +0100140 for (i = 0; i < ROUNDS; i++) {
Chris Wilson47b61372016-01-08 10:40:33 +0000141 uint8_t val = i;
Daniel Vetter5a851b12011-12-04 21:42:31 +0100142 int start, len;
Daniel Vetter5a851b12011-12-04 21:42:31 +0100143
Chris Wilson47b61372016-01-08 10:40:33 +0000144 blt_bo_fill(staging_bo, scratch_bo, val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100145
146 memset(tmp, i + 63, BO_SIZE);
147
Chris Wilson47b61372016-01-08 10:40:33 +0000148 get_range(&start, &len);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100149 drm_intel_bo_subdata(scratch_bo, start, len, tmp);
150
151 copy_bo(scratch_bo, staging_bo);
152 drm_intel_gem_bo_map_gtt(staging_bo);
153 gtt_ptr = staging_bo->virtual;
154
155 for (j = 0; j < start; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200156 igt_assert_f(gtt_ptr[j] == val,
Chris Wilson47b61372016-01-08 10:40:33 +0000157 "mismatch at %i (start=%i), got: %i, expected: %i\n",
158 j, start, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100159 }
160 for (; j < start + len; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200161 igt_assert_f(gtt_ptr[j] == tmp[0],
Chris Wilson47b61372016-01-08 10:40:33 +0000162 "mismatch at %i (%i/%i), got: %i, expected: %i\n",
163 j, j-start, len, tmp[j], i);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100164 }
165 for (; j < BO_SIZE; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200166 igt_assert_f(gtt_ptr[j] == val,
Chris Wilson47b61372016-01-08 10:40:33 +0000167 "mismatch at %i (end=%i), got: %i, expected: %i\n",
168 j, start+len, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100169 }
170 drm_intel_gem_bo_unmap_gtt(staging_bo);
Daniel Vetter36a40a52012-01-31 13:52:59 +0100171
Daniel Vetter1caaf0a2013-08-12 12:17:35 +0200172 igt_progress("partial writes test: ", i, ROUNDS);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100173 }
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100174}
175
176static void test_partial_read_writes(void)
177{
178 int i, j;
179 uint8_t *gtt_ptr;
180
Daniel Vettere624fa82014-05-14 00:36:04 +0200181 igt_info("checking partial writes after partial reads\n");
Daniel Vetter36a40a52012-01-31 13:52:59 +0100182 for (i = 0; i < ROUNDS; i++) {
Chris Wilson47b61372016-01-08 10:40:33 +0000183 uint8_t val = i;
Daniel Vetter5a851b12011-12-04 21:42:31 +0100184 int start, len;
Daniel Vetter5a851b12011-12-04 21:42:31 +0100185
Chris Wilson47b61372016-01-08 10:40:33 +0000186 blt_bo_fill(staging_bo, scratch_bo, val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100187
188 /* partial read */
Chris Wilson47b61372016-01-08 10:40:33 +0000189 get_range(&start, &len);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100190 drm_intel_bo_get_subdata(scratch_bo, start, len, tmp);
191 for (j = 0; j < len; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200192 igt_assert_f(tmp[j] == val,
Chris Wilson47b61372016-01-08 10:40:33 +0000193 "mismatch in read at %i [%i + %i], got: %i, expected: %i\n",
194 j, start, len, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100195 }
196
197 /* Change contents through gtt to make the pread cachelines
198 * stale. */
Chris Wilson47b61372016-01-08 10:40:33 +0000199 val += 17;
Daniel Vetter5a851b12011-12-04 21:42:31 +0100200 blt_bo_fill(staging_bo, scratch_bo, val);
201
202 /* partial write */
Daniel Vetter5a851b12011-12-04 21:42:31 +0100203 memset(tmp, i + 63, BO_SIZE);
204
Chris Wilson47b61372016-01-08 10:40:33 +0000205 get_range(&start, &len);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100206 drm_intel_bo_subdata(scratch_bo, start, len, tmp);
207
208 copy_bo(scratch_bo, staging_bo);
Chris Wilson47b61372016-01-08 10:40:33 +0000209 do_or_die(drm_intel_gem_bo_map_gtt(staging_bo));
Daniel Vetter5a851b12011-12-04 21:42:31 +0100210 gtt_ptr = staging_bo->virtual;
211
212 for (j = 0; j < start; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200213 igt_assert_f(gtt_ptr[j] == val,
Chris Wilson47b61372016-01-08 10:40:33 +0000214 "mismatch at %i (start=%i), got: %i, expected: %i\n",
215 j, start, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100216 }
217 for (; j < start + len; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200218 igt_assert_f(gtt_ptr[j] == tmp[0],
Chris Wilson47b61372016-01-08 10:40:33 +0000219 "mismatch at %i (%i/%i), got: %i, expected: %i\n",
220 j, j - start, len, tmp[j], tmp[0]);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100221 }
222 for (; j < BO_SIZE; j++) {
Daniel Vetterf3c54d02013-09-25 14:36:59 +0200223 igt_assert_f(gtt_ptr[j] == val,
Chris Wilson47b61372016-01-08 10:40:33 +0000224 "mismatch at %i (end=%i), got: %i, expected: %i\n",
225 j, start + len, tmp[j], val);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100226 }
227 drm_intel_gem_bo_unmap_gtt(staging_bo);
Daniel Vetter36a40a52012-01-31 13:52:59 +0100228
Daniel Vetter1caaf0a2013-08-12 12:17:35 +0200229 igt_progress("partial read/writes test: ", i, ROUNDS);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100230 }
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100231}
232
Chris Wilsona661c092013-08-06 15:09:50 +0100233static void do_tests(int cache_level, const char *suffix)
234{
Daniel Vetterb3880d32013-08-14 18:02:46 +0200235 igt_fixture {
236 if (cache_level != -1)
237 gem_set_caching(fd, scratch_bo->handle, cache_level);
238 }
Chris Wilsona661c092013-08-06 15:09:50 +0100239
Daniel Vetter62346582013-08-14 13:47:47 +0200240 igt_subtest_f("reads%s", suffix)
Chris Wilsona661c092013-08-06 15:09:50 +0100241 test_partial_reads();
242
Daniel Vetter62346582013-08-14 13:47:47 +0200243 igt_subtest_f("write%s", suffix)
Chris Wilsona661c092013-08-06 15:09:50 +0100244 test_partial_writes();
245
Daniel Vetter62346582013-08-14 13:47:47 +0200246 igt_subtest_f("writes-after-reads%s", suffix)
Chris Wilsona661c092013-08-06 15:09:50 +0100247 test_partial_read_writes();
248}
249
Daniel Vetter071e9ca2013-10-31 16:23:26 +0100250igt_main
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100251{
252 srandom(0xdeadbeef);
253
Daniel Vetter1caaf0a2013-08-12 12:17:35 +0200254 igt_skip_on_simulation();
Daniel Vetter046b1492012-11-28 13:08:07 +0100255
Daniel Vetterb3880d32013-08-14 18:02:46 +0200256 igt_fixture {
Micah Fedkec81d2932015-07-22 21:54:02 +0000257 fd = drm_open_driver(DRIVER_INTEL);
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100258
Daniel Vetterb3880d32013-08-14 18:02:46 +0200259 bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
260 //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
261 devid = intel_get_drm_devid(fd);
262 batch = intel_batchbuffer_alloc(bufmgr, devid);
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100263
Daniel Vetterb3880d32013-08-14 18:02:46 +0200264 /* overallocate the buffers we're actually using because */
265 scratch_bo = drm_intel_bo_alloc(bufmgr, "scratch bo", BO_SIZE, 4096);
266 staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100267
Daniel Vetterb3880d32013-08-14 18:02:46 +0200268 igt_init_aperture_trashers(bufmgr);
269 mappable_gtt_limit = gem_mappable_aperture_size();
270 }
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100271
Chris Wilsona661c092013-08-06 15:09:50 +0100272 do_tests(-1, "");
Daniel Vetter1a55ca72012-11-28 12:47:26 +0100273
Chris Wilsona661c092013-08-06 15:09:50 +0100274 /* Repeat the tests using different levels of snooping */
275 do_tests(0, "-uncached");
276 do_tests(1, "-snoop");
Chris Wilson467796a2013-08-10 15:49:33 +0100277 do_tests(2, "-display");
Daniel Vetter5a851b12011-12-04 21:42:31 +0100278
Daniel Vetterb3880d32013-08-14 18:02:46 +0200279 igt_fixture {
280 igt_cleanup_aperture_trashers();
281 drm_intel_bufmgr_destroy(bufmgr);
Daniel Vetter5a851b12011-12-04 21:42:31 +0100282
Daniel Vetterb3880d32013-08-14 18:02:46 +0200283 close(fd);
284 }
Daniel Vetter5a851b12011-12-04 21:42:31 +0100285}