blob: af352de70be199d8eb8a3be49a138d279f80f9e3 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
Eric Anholt280b7132009-03-12 16:56:27 -070028#include "linux/string.h"
29#include "linux/bitops.h"
Eric Anholt673a3942008-07-30 12:06:12 -070030#include "drmP.h"
31#include "drm.h"
32#include "i915_drm.h"
33#include "i915_drv.h"
34
35/** @file i915_gem_tiling.c
36 *
37 * Support for managing tiling state of buffer objects.
38 *
39 * The idea behind tiling is to increase cache hit rates by rearranging
40 * pixel data so that a group of pixel accesses are in the same cacheline.
41 * Performance improvement from doing this on the back/depth buffer are on
42 * the order of 30%.
43 *
44 * Intel architectures make this somewhat more complicated, though, by
45 * adjustments made to addressing of data when the memory is in interleaved
46 * mode (matched pairs of DIMMS) to improve memory bandwidth.
47 * For interleaved memory, the CPU sends every sequential 64 bytes
48 * to an alternate memory channel so it can get the bandwidth from both.
49 *
50 * The GPU also rearranges its accesses for increased bandwidth to interleaved
51 * memory, and it matches what the CPU does for non-tiled. However, when tiled
52 * it does it a little differently, since one walks addresses not just in the
53 * X direction but also Y. So, along with alternating channels when bit
54 * 6 of the address flips, it also alternates when other bits flip -- Bits 9
55 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
56 * are common to both the 915 and 965-class hardware.
57 *
58 * The CPU also sometimes XORs in higher bits as well, to improve
59 * bandwidth doing strided access like we do so frequently in graphics. This
60 * is called "Channel XOR Randomization" in the MCH documentation. The result
61 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
62 * decode.
63 *
64 * All of this bit 6 XORing has an effect on our memory management,
65 * as we need to make sure that the 3d driver can correctly address object
66 * contents.
67 *
68 * If we don't have interleaved memory, all tiling is safe and no swizzling is
69 * required.
70 *
71 * When bit 17 is XORed in, we simply refuse to tile at all. Bit
72 * 17 is not just a page offset, so as we page an objet out and back in,
73 * individual pages in it will have different bit 17 addresses, resulting in
74 * each 64 bytes being swapped with its neighbor!
75 *
76 * Otherwise, if interleaved, we have to tell the 3d driver what the address
77 * swizzling it needs to do is, since it's writing with the CPU to the pages
78 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
79 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
80 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
81 * to match what the GPU expects.
82 */
83
84/**
85 * Detects bit 6 swizzling of address lookup between IGD access and CPU
86 * access through main memory.
87 */
88void
89i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
90{
91 drm_i915_private_t *dev_priv = dev->dev_private;
92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
94
Chris Wilsonf00a3dd2010-10-21 14:57:17 +010095 if (IS_GEN5(dev) || IS_GEN6(dev)) {
Adam Jacksonf2b115e2009-12-03 17:14:42 -050096 /* On Ironlake whatever DRAM config, GPU always do
Zhenyu Wang553bd142009-09-02 10:57:52 +080097 * same swizzling setup.
98 */
99 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
100 swizzle_y = I915_BIT_6_SWIZZLE_9;
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100101 } else if (IS_GEN2(dev)) {
Eric Anholt673a3942008-07-30 12:06:12 -0700102 /* As far as we know, the 865 doesn't have these bit 6
103 * swizzling issues.
104 */
105 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
106 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
Eric Anholt568d9a82009-03-12 16:27:11 -0700107 } else if (IS_MOBILE(dev)) {
Eric Anholt673a3942008-07-30 12:06:12 -0700108 uint32_t dcc;
109
Eric Anholt568d9a82009-03-12 16:27:11 -0700110 /* On mobile 9xx chipsets, channel interleave by the CPU is
111 * determined by DCC. For single-channel, neither the CPU
112 * nor the GPU do swizzling. For dual channel interleaved,
113 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
114 * 9 for Y tiled. The CPU's interleave is independent, and
115 * can be based on either bit 11 (haven't seen this yet) or
116 * bit 17 (common).
Eric Anholt673a3942008-07-30 12:06:12 -0700117 */
118 dcc = I915_READ(DCC);
119 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
120 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
121 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
122 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
123 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
124 break;
125 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
Eric Anholt568d9a82009-03-12 16:27:11 -0700126 if (dcc & DCC_CHANNEL_XOR_DISABLE) {
127 /* This is the base swizzling by the GPU for
128 * tiled buffers.
129 */
Eric Anholt673a3942008-07-30 12:06:12 -0700130 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
131 swizzle_y = I915_BIT_6_SWIZZLE_9;
Eric Anholt568d9a82009-03-12 16:27:11 -0700132 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
133 /* Bit 11 swizzling by the CPU in addition. */
Eric Anholt673a3942008-07-30 12:06:12 -0700134 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
135 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
136 } else {
Eric Anholt568d9a82009-03-12 16:27:11 -0700137 /* Bit 17 swizzling by the CPU in addition. */
Eric Anholt280b7132009-03-12 16:56:27 -0700138 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
139 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
Eric Anholt673a3942008-07-30 12:06:12 -0700140 }
141 break;
142 }
143 if (dcc == 0xffffffff) {
144 DRM_ERROR("Couldn't read from MCHBAR. "
145 "Disabling tiling.\n");
146 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
147 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
148 }
149 } else {
150 /* The 965, G33, and newer, have a very flexible memory
151 * configuration. It will enable dual-channel mode
152 * (interleaving) on as much memory as it can, and the GPU
153 * will additionally sometimes enable different bit 6
154 * swizzling for tiled objects from the CPU.
155 *
156 * Here's what I found on the G965:
157 * slot fill memory size swizzling
158 * 0A 0B 1A 1B 1-ch 2-ch
159 * 512 0 0 0 512 0 O
160 * 512 0 512 0 16 1008 X
161 * 512 0 0 512 16 1008 X
162 * 0 512 0 512 16 1008 X
163 * 1024 1024 1024 0 2048 1024 O
164 *
165 * We could probably detect this based on either the DRB
166 * matching, which was the case for the swizzling required in
167 * the table above, or from the 1-ch value being less than
168 * the minimum size of a rank.
169 */
170 if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
171 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
172 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
173 } else {
174 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
175 swizzle_y = I915_BIT_6_SWIZZLE_9;
176 }
177 }
178
179 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
180 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
181}
182
Jesse Barnes0f973f22009-01-26 17:10:45 -0800183/* Check pitch constriants for all chips & tiling formats */
Jesse Barnes76446ca2009-12-17 22:05:42 -0500184bool
Jesse Barnes0f973f22009-01-26 17:10:45 -0800185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
186{
187 int tile_width;
188
189 /* Linear is always fine */
190 if (tiling_mode == I915_TILING_NONE)
191 return true;
192
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100193 if (IS_GEN2(dev) ||
Eric Anholte76a16d2009-05-26 17:44:56 -0700194 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
Jesse Barnes0f973f22009-01-26 17:10:45 -0800195 tile_width = 128;
196 else
197 tile_width = 512;
198
Daniel Vetter8d7773a2009-03-29 14:09:41 +0200199 /* check maximum stride & object size */
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100200 if (INTEL_INFO(dev)->gen >= 4) {
Daniel Vetter8d7773a2009-03-29 14:09:41 +0200201 /* i965 stores the end address of the gtt mapping in the fence
202 * reg, so dont bother to check the size */
203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
204 return false;
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100205 } else {
Daniel Vetterc36a2a62010-04-17 15:12:03 +0200206 if (stride > 8192)
Daniel Vetter8d7773a2009-03-29 14:09:41 +0200207 return false;
Eric Anholte76a16d2009-05-26 17:44:56 -0700208
Daniel Vetterc36a2a62010-04-17 15:12:03 +0200209 if (IS_GEN3(dev)) {
210 if (size > I830_FENCE_MAX_SIZE_VAL << 20)
211 return false;
212 } else {
213 if (size > I830_FENCE_MAX_SIZE_VAL << 19)
214 return false;
215 }
Daniel Vetter8d7773a2009-03-29 14:09:41 +0200216 }
217
Jesse Barnes0f973f22009-01-26 17:10:45 -0800218 /* 965+ just needs multiples of tile width */
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100219 if (INTEL_INFO(dev)->gen >= 4) {
Jesse Barnes0f973f22009-01-26 17:10:45 -0800220 if (stride & (tile_width - 1))
221 return false;
222 return true;
223 }
224
225 /* Pre-965 needs power of two tile widths */
226 if (stride < tile_width)
227 return false;
228
229 if (stride & (stride - 1))
230 return false;
231
Jesse Barnes0f973f22009-01-26 17:10:45 -0800232 return true;
233}
234
Owain Ainsworthf590d272010-02-18 15:33:00 +0000235bool
Chris Wilson52dc7d32009-06-06 09:46:01 +0100236i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
237{
238 struct drm_device *dev = obj->dev;
Daniel Vetter23010e42010-03-08 13:35:02 +0100239 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Chris Wilson52dc7d32009-06-06 09:46:01 +0100240
241 if (obj_priv->gtt_space == NULL)
242 return true;
243
244 if (tiling_mode == I915_TILING_NONE)
245 return true;
246
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100247 if (INTEL_INFO(dev)->gen >= 4)
248 return true;
249
250 if (obj_priv->gtt_offset & (obj->size - 1))
251 return false;
252
253 if (IS_GEN3(dev)) {
254 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
Chris Wilson52dc7d32009-06-06 09:46:01 +0100255 return false;
Chris Wilsona6c45cf2010-09-17 00:32:17 +0100256 } else {
257 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
258 return false;
Chris Wilson52dc7d32009-06-06 09:46:01 +0100259 }
260
261 return true;
262}
263
Eric Anholt673a3942008-07-30 12:06:12 -0700264/**
265 * Sets the tiling mode of an object, returning the required swizzling of
266 * bit 6 of addresses in the object.
267 */
268int
269i915_gem_set_tiling(struct drm_device *dev, void *data,
270 struct drm_file *file_priv)
271{
272 struct drm_i915_gem_set_tiling *args = data;
273 drm_i915_private_t *dev_priv = dev->dev_private;
274 struct drm_gem_object *obj;
275 struct drm_i915_gem_object *obj_priv;
Chris Wilson30dbf0c2010-09-25 10:19:17 +0100276 int ret;
277
278 ret = i915_gem_check_is_wedged(dev);
279 if (ret)
280 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700281
282 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
283 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100284 return -ENOENT;
Daniel Vetter23010e42010-03-08 13:35:02 +0100285 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700286
Chris Wilson72daad42009-01-30 21:10:22 +0000287 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
Luca Barbieribc9025b2010-02-09 05:49:12 +0000288 drm_gem_object_unreference_unlocked(obj);
Jesse Barnes0f973f22009-01-26 17:10:45 -0800289 return -EINVAL;
Chris Wilson72daad42009-01-30 21:10:22 +0000290 }
Jesse Barnes0f973f22009-01-26 17:10:45 -0800291
Daniel Vetter31770bd2010-04-23 23:01:01 +0200292 if (obj_priv->pin_count) {
293 drm_gem_object_unreference_unlocked(obj);
294 return -EBUSY;
295 }
296
Eric Anholt673a3942008-07-30 12:06:12 -0700297 if (args->tiling_mode == I915_TILING_NONE) {
Eric Anholt673a3942008-07-30 12:06:12 -0700298 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
Chris Wilson52dc7d32009-06-06 09:46:01 +0100299 args->stride = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700300 } else {
301 if (args->tiling_mode == I915_TILING_X)
302 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
303 else
304 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
Eric Anholt280b7132009-03-12 16:56:27 -0700305
306 /* Hide bit 17 swizzling from the user. This prevents old Mesa
307 * from aborting the application on sw fallbacks to bit 17,
308 * and we use the pread/pwrite bit17 paths to swizzle for it.
309 * If there was a user that was relying on the swizzle
310 * information for drm_intel_bo_map()ed reads/writes this would
311 * break it, but we don't have any of those.
312 */
313 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
314 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
315 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
316 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
317
Eric Anholt673a3942008-07-30 12:06:12 -0700318 /* If we can't handle the swizzling, make it untiled. */
319 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
320 args->tiling_mode = I915_TILING_NONE;
321 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
Chris Wilson52dc7d32009-06-06 09:46:01 +0100322 args->stride = 0;
Eric Anholt673a3942008-07-30 12:06:12 -0700323 }
324 }
Jesse Barnes0f973f22009-01-26 17:10:45 -0800325
Chris Wilson52dc7d32009-06-06 09:46:01 +0100326 mutex_lock(&dev->struct_mutex);
327 if (args->tiling_mode != obj_priv->tiling_mode ||
328 args->stride != obj_priv->stride) {
329 /* We need to rebind the object if its current allocation
330 * no longer meets the alignment restrictions for its new
331 * tiling mode. Otherwise we can just leave it alone, but
332 * need to ensure that any fence register is cleared.
Jesse Barnes0f973f22009-01-26 17:10:45 -0800333 */
Chris Wilson52dc7d32009-06-06 09:46:01 +0100334 if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
Daniel Vetterfe305192010-03-18 09:22:12 +0100335 ret = i915_gem_object_unbind(obj);
336 else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
Chris Wilson2cf34d72010-09-14 13:03:28 +0100337 ret = i915_gem_object_put_fence_reg(obj, true);
Chris Wilson52dc7d32009-06-06 09:46:01 +0100338 else
Daniel Vetterfe305192010-03-18 09:22:12 +0100339 i915_gem_release_mmap(obj);
340
Jesse Barnes0f973f22009-01-26 17:10:45 -0800341 if (ret != 0) {
Jesse Barnes0f973f22009-01-26 17:10:45 -0800342 args->tiling_mode = obj_priv->tiling_mode;
Chris Wilson52dc7d32009-06-06 09:46:01 +0100343 args->stride = obj_priv->stride;
344 goto err;
Jesse Barnes0f973f22009-01-26 17:10:45 -0800345 }
Eric Anholt673a3942008-07-30 12:06:12 -0700346
Chris Wilson52dc7d32009-06-06 09:46:01 +0100347 obj_priv->tiling_mode = args->tiling_mode;
348 obj_priv->stride = args->stride;
349 }
350err:
Eric Anholt673a3942008-07-30 12:06:12 -0700351 drm_gem_object_unreference(obj);
Chris Wilsond6873102009-02-08 19:07:51 +0000352 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700353
Chris Wilson52dc7d32009-06-06 09:46:01 +0100354 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700355}
356
357/**
358 * Returns the current tiling mode and required bit 6 swizzling for the object.
359 */
360int
361i915_gem_get_tiling(struct drm_device *dev, void *data,
362 struct drm_file *file_priv)
363{
364 struct drm_i915_gem_get_tiling *args = data;
365 drm_i915_private_t *dev_priv = dev->dev_private;
366 struct drm_gem_object *obj;
367 struct drm_i915_gem_object *obj_priv;
368
369 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
370 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100371 return -ENOENT;
Daniel Vetter23010e42010-03-08 13:35:02 +0100372 obj_priv = to_intel_bo(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700373
374 mutex_lock(&dev->struct_mutex);
375
376 args->tiling_mode = obj_priv->tiling_mode;
377 switch (obj_priv->tiling_mode) {
378 case I915_TILING_X:
379 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
380 break;
381 case I915_TILING_Y:
382 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
383 break;
384 case I915_TILING_NONE:
385 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
386 break;
387 default:
388 DRM_ERROR("unknown tiling mode\n");
389 }
390
Eric Anholt280b7132009-03-12 16:56:27 -0700391 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
392 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
393 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
394 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
395 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
396
Eric Anholt673a3942008-07-30 12:06:12 -0700397 drm_gem_object_unreference(obj);
Chris Wilsond6873102009-02-08 19:07:51 +0000398 mutex_unlock(&dev->struct_mutex);
Eric Anholt673a3942008-07-30 12:06:12 -0700399
400 return 0;
401}
Eric Anholt280b7132009-03-12 16:56:27 -0700402
403/**
404 * Swap every 64 bytes of this page around, to account for it having a new
405 * bit 17 of its physical address and therefore being interpreted differently
406 * by the GPU.
407 */
Chris Wilsondd2575f2010-09-04 12:59:16 +0100408static void
Eric Anholt280b7132009-03-12 16:56:27 -0700409i915_gem_swizzle_page(struct page *page)
410{
Chris Wilsondd2575f2010-09-04 12:59:16 +0100411 char temp[64];
Eric Anholt280b7132009-03-12 16:56:27 -0700412 char *vaddr;
413 int i;
Eric Anholt280b7132009-03-12 16:56:27 -0700414
415 vaddr = kmap(page);
Eric Anholt280b7132009-03-12 16:56:27 -0700416
417 for (i = 0; i < PAGE_SIZE; i += 128) {
418 memcpy(temp, &vaddr[i], 64);
419 memcpy(&vaddr[i], &vaddr[i + 64], 64);
420 memcpy(&vaddr[i + 64], temp, 64);
421 }
422
423 kunmap(page);
Eric Anholt280b7132009-03-12 16:56:27 -0700424}
425
426void
427i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
428{
429 struct drm_device *dev = obj->dev;
430 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +0100431 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt280b7132009-03-12 16:56:27 -0700432 int page_count = obj->size >> PAGE_SHIFT;
433 int i;
434
435 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
436 return;
437
438 if (obj_priv->bit_17 == NULL)
439 return;
440
441 for (i = 0; i < page_count; i++) {
442 char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
443 if ((new_bit_17 & 0x1) !=
444 (test_bit(i, obj_priv->bit_17) != 0)) {
Chris Wilsondd2575f2010-09-04 12:59:16 +0100445 i915_gem_swizzle_page(obj_priv->pages[i]);
Eric Anholt280b7132009-03-12 16:56:27 -0700446 set_page_dirty(obj_priv->pages[i]);
447 }
448 }
449}
450
451void
452i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
453{
454 struct drm_device *dev = obj->dev;
455 drm_i915_private_t *dev_priv = dev->dev_private;
Daniel Vetter23010e42010-03-08 13:35:02 +0100456 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
Eric Anholt280b7132009-03-12 16:56:27 -0700457 int page_count = obj->size >> PAGE_SHIFT;
458 int i;
459
460 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
461 return;
462
463 if (obj_priv->bit_17 == NULL) {
464 obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
465 sizeof(long), GFP_KERNEL);
466 if (obj_priv->bit_17 == NULL) {
467 DRM_ERROR("Failed to allocate memory for bit 17 "
468 "record\n");
469 return;
470 }
471 }
472
473 for (i = 0; i < page_count; i++) {
474 if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
475 __set_bit(i, obj_priv->bit_17);
476 else
477 __clear_bit(i, obj_priv->bit_17);
478 }
479}