1 | /* |
2 | * Copyright © 2008 Intel Corporation |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
22 | * |
23 | * Authors: |
24 | * Eric Anholt <eric@anholt.net> |
25 | * |
26 | */ |
27 | |
28 | #include <linux/string.h> |
29 | #include <linux/bitops.h> |
30 | #include <drm/drmP.h> |
31 | #include <drm/i915_drm.h> |
32 | #include "i915_drv.h" |
33 | |
34 | /** @file i915_gem_tiling.c |
35 | * |
36 | * Support for managing tiling state of buffer objects. |
37 | * |
38 | * The idea behind tiling is to increase cache hit rates by rearranging |
39 | * pixel data so that a group of pixel accesses are in the same cacheline. |
40 | * Performance improvement from doing this on the back/depth buffer are on |
41 | * the order of 30%. |
42 | * |
43 | * Intel architectures make this somewhat more complicated, though, by |
44 | * adjustments made to addressing of data when the memory is in interleaved |
45 | * mode (matched pairs of DIMMS) to improve memory bandwidth. |
46 | * For interleaved memory, the CPU sends every sequential 64 bytes |
47 | * to an alternate memory channel so it can get the bandwidth from both. |
48 | * |
49 | * The GPU also rearranges its accesses for increased bandwidth to interleaved |
50 | * memory, and it matches what the CPU does for non-tiled. However, when tiled |
51 | * it does it a little differently, since one walks addresses not just in the |
52 | * X direction but also Y. So, along with alternating channels when bit |
53 | * 6 of the address flips, it also alternates when other bits flip -- Bits 9 |
54 | * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) |
55 | * are common to both the 915 and 965-class hardware. |
56 | * |
57 | * The CPU also sometimes XORs in higher bits as well, to improve |
58 | * bandwidth doing strided access like we do so frequently in graphics. This |
59 | * is called "Channel XOR Randomization" in the MCH documentation. The result |
60 | * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address |
61 | * decode. |
62 | * |
63 | * All of this bit 6 XORing has an effect on our memory management, |
64 | * as we need to make sure that the 3d driver can correctly address object |
65 | * contents. |
66 | * |
67 | * If we don't have interleaved memory, all tiling is safe and no swizzling is |
68 | * required. |
69 | * |
70 | * When bit 17 is XORed in, we simply refuse to tile at all. Bit |
71 | * 17 is not just a page offset, so as we page an objet out and back in, |
72 | * individual pages in it will have different bit 17 addresses, resulting in |
73 | * each 64 bytes being swapped with its neighbor! |
74 | * |
75 | * Otherwise, if interleaved, we have to tell the 3d driver what the address |
76 | * swizzling it needs to do is, since it's writing with the CPU to the pages |
77 | * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the |
78 | * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling |
79 | * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order |
80 | * to match what the GPU expects. |
81 | */ |
82 | |
83 | /** |
84 | * Detects bit 6 swizzling of address lookup between IGD access and CPU |
85 | * access through main memory. |
86 | */ |
87 | void |
88 | i915_gem_detect_bit_6_swizzle(struct drm_device *dev) |
89 | { |
90 | struct drm_i915_private *dev_priv = dev->dev_private; |
91 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
92 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
93 | |
94 | if (IS_VALLEYVIEW(dev)) { |
95 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
96 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
97 | } else if (INTEL_INFO(dev)->gen >= 6) { |
98 | uint32_t dimm_c0, dimm_c1; |
99 | dimm_c0 = I915_READ(MAD_DIMM_C0); |
100 | dimm_c1 = I915_READ(MAD_DIMM_C1); |
101 | dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; |
102 | dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; |
103 | /* Enable swizzling when the channels are populated with |
104 | * identically sized dimms. We don't need to check the 3rd |
105 | * channel because no cpu with gpu attached ships in that |
106 | * configuration. Also, swizzling only makes sense for 2 |
107 | * channels anyway. */ |
108 | if (dimm_c0 == dimm_c1) { |
109 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
110 | swizzle_y = I915_BIT_6_SWIZZLE_9; |
111 | } else { |
112 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
113 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
114 | } |
115 | } else if (IS_GEN5(dev)) { |
116 | /* On Ironlake whatever DRAM config, GPU always do |
117 | * same swizzling setup. |
118 | */ |
119 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
120 | swizzle_y = I915_BIT_6_SWIZZLE_9; |
121 | } else if (IS_GEN2(dev)) { |
122 | /* As far as we know, the 865 doesn't have these bit 6 |
123 | * swizzling issues. |
124 | */ |
125 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
126 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
127 | } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) { |
128 | uint32_t dcc; |
129 | |
130 | /* On 9xx chipsets, channel interleave by the CPU is |
131 | * determined by DCC. For single-channel, neither the CPU |
132 | * nor the GPU do swizzling. For dual channel interleaved, |
133 | * the GPU's interleave is bit 9 and 10 for X tiled, and bit |
134 | * 9 for Y tiled. The CPU's interleave is independent, and |
135 | * can be based on either bit 11 (haven't seen this yet) or |
136 | * bit 17 (common). |
137 | */ |
138 | dcc = I915_READ(DCC); |
139 | switch (dcc & DCC_ADDRESSING_MODE_MASK) { |
140 | case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: |
141 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: |
142 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
143 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
144 | break; |
145 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: |
146 | if (dcc & DCC_CHANNEL_XOR_DISABLE) { |
147 | /* This is the base swizzling by the GPU for |
148 | * tiled buffers. |
149 | */ |
150 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
151 | swizzle_y = I915_BIT_6_SWIZZLE_9; |
152 | } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { |
153 | /* Bit 11 swizzling by the CPU in addition. */ |
154 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; |
155 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; |
156 | } else { |
157 | /* Bit 17 swizzling by the CPU in addition. */ |
158 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; |
159 | swizzle_y = I915_BIT_6_SWIZZLE_9_17; |
160 | } |
161 | break; |
162 | } |
163 | if (dcc == 0xffffffff) { |
164 | DRM_ERROR("Couldn't read from MCHBAR. " |
165 | "Disabling tiling.\n" ); |
166 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
167 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
168 | } |
169 | } else { |
170 | /* The 965, G33, and newer, have a very flexible memory |
171 | * configuration. It will enable dual-channel mode |
172 | * (interleaving) on as much memory as it can, and the GPU |
173 | * will additionally sometimes enable different bit 6 |
174 | * swizzling for tiled objects from the CPU. |
175 | * |
176 | * Here's what I found on the G965: |
177 | * slot fill memory size swizzling |
178 | * 0A 0B 1A 1B 1-ch 2-ch |
179 | * 512 0 0 0 512 0 O |
180 | * 512 0 512 0 16 1008 X |
181 | * 512 0 0 512 16 1008 X |
182 | * 0 512 0 512 16 1008 X |
183 | * 1024 1024 1024 0 2048 1024 O |
184 | * |
185 | * We could probably detect this based on either the DRB |
186 | * matching, which was the case for the swizzling required in |
187 | * the table above, or from the 1-ch value being less than |
188 | * the minimum size of a rank. |
189 | */ |
190 | if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { |
191 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
192 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
193 | } else { |
194 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
195 | swizzle_y = I915_BIT_6_SWIZZLE_9; |
196 | } |
197 | } |
198 | |
199 | dev_priv->mm.bit_6_swizzle_x = swizzle_x; |
200 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; |
201 | } |
202 | |
203 | /* Check pitch constriants for all chips & tiling formats */ |
204 | static bool |
205 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) |
206 | { |
207 | int tile_width; |
208 | |
209 | /* Linear is always fine */ |
210 | if (tiling_mode == I915_TILING_NONE) |
211 | return true; |
212 | |
213 | if (IS_GEN2(dev) || |
214 | (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) |
215 | tile_width = 128; |
216 | else |
217 | tile_width = 512; |
218 | |
219 | /* check maximum stride & object size */ |
220 | /* i965+ stores the end address of the gtt mapping in the fence |
221 | * reg, so dont bother to check the size */ |
222 | if (INTEL_INFO(dev)->gen >= 7) { |
223 | if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL) |
224 | return false; |
225 | } else if (INTEL_INFO(dev)->gen >= 4) { |
226 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) |
227 | return false; |
228 | } else { |
229 | if (stride > 8192) |
230 | return false; |
231 | |
232 | if (IS_GEN3(dev)) { |
233 | if (size > I830_FENCE_MAX_SIZE_VAL << 20) |
234 | return false; |
235 | } else { |
236 | if (size > I830_FENCE_MAX_SIZE_VAL << 19) |
237 | return false; |
238 | } |
239 | } |
240 | |
241 | if (stride < tile_width) |
242 | return false; |
243 | |
244 | /* 965+ just needs multiples of tile width */ |
245 | if (INTEL_INFO(dev)->gen >= 4) { |
246 | if (stride & (tile_width - 1)) |
247 | return false; |
248 | return true; |
249 | } |
250 | |
251 | /* Pre-965 needs power of two tile widths */ |
252 | if (stride & (stride - 1)) |
253 | return false; |
254 | |
255 | return true; |
256 | } |
257 | |
258 | /* Is the current GTT allocation valid for the change in tiling? */ |
259 | static bool |
260 | i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) |
261 | { |
262 | u32 size; |
263 | |
264 | if (tiling_mode == I915_TILING_NONE) |
265 | return true; |
266 | |
267 | if (INTEL_INFO(obj->base.dev)->gen >= 4) |
268 | return true; |
269 | |
270 | if (INTEL_INFO(obj->base.dev)->gen == 3) { |
271 | if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) |
272 | return false; |
273 | } else { |
274 | if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) |
275 | return false; |
276 | } |
277 | |
278 | size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); |
279 | if (i915_gem_obj_ggtt_size(obj) != size) |
280 | return false; |
281 | |
282 | if (i915_gem_obj_ggtt_offset(obj) & (size - 1)) |
283 | return false; |
284 | |
285 | return true; |
286 | } |
287 | |
288 | /** |
289 | * Sets the tiling mode of an object, returning the required swizzling of |
290 | * bit 6 of addresses in the object. |
291 | */ |
292 | int |
293 | i915_gem_set_tiling(struct drm_device *dev, void *data, |
294 | struct drm_file *file) |
295 | { |
296 | struct drm_i915_gem_set_tiling *args = data; |
297 | struct drm_i915_private *dev_priv = dev->dev_private; |
298 | struct drm_gem_object *gobj; |
299 | struct drm_i915_gem_object *obj; |
300 | int ret = 0; |
301 | |
302 | gobj = drm_gem_object_lookup(dev, file, args->handle); |
303 | if (gobj == NULL) |
304 | return -ENOENT; |
305 | obj = to_intel_bo(gobj); |
306 | |
307 | if (!i915_tiling_ok(dev, |
308 | args->stride, obj->base.size, args->tiling_mode)) { |
309 | drm_gem_object_unreference_unlocked(&obj->base); |
310 | return -EINVAL; |
311 | } |
312 | |
313 | if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { |
314 | drm_gem_object_unreference_unlocked(&obj->base); |
315 | return -EBUSY; |
316 | } |
317 | |
318 | if (args->tiling_mode == I915_TILING_NONE) { |
319 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
320 | args->stride = 0; |
321 | } else { |
322 | if (args->tiling_mode == I915_TILING_X) |
323 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; |
324 | else |
325 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; |
326 | |
327 | /* Hide bit 17 swizzling from the user. This prevents old Mesa |
328 | * from aborting the application on sw fallbacks to bit 17, |
329 | * and we use the pread/pwrite bit17 paths to swizzle for it. |
330 | * If there was a user that was relying on the swizzle |
331 | * information for drm_intel_bo_map()ed reads/writes this would |
332 | * break it, but we don't have any of those. |
333 | */ |
334 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) |
335 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9; |
336 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) |
337 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; |
338 | |
339 | /* If we can't handle the swizzling, make it untiled. */ |
340 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { |
341 | args->tiling_mode = I915_TILING_NONE; |
342 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
343 | args->stride = 0; |
344 | } |
345 | } |
346 | |
347 | mutex_lock(&dev->struct_mutex); |
348 | if (args->tiling_mode != obj->tiling_mode || |
349 | args->stride != obj->stride) { |
350 | /* We need to rebind the object if its current allocation |
351 | * no longer meets the alignment restrictions for its new |
352 | * tiling mode. Otherwise we can just leave it alone, but |
353 | * need to ensure that any fence register is updated before |
354 | * the next fenced (either through the GTT or by the BLT unit |
355 | * on older GPUs) access. |
356 | * |
357 | * After updating the tiling parameters, we then flag whether |
358 | * we need to update an associated fence register. Note this |
359 | * has to also include the unfenced register the GPU uses |
360 | * whilst executing a fenced command for an untiled object. |
361 | */ |
362 | |
363 | obj->map_and_fenceable = |
364 | !i915_gem_obj_ggtt_bound(obj) || |
365 | (i915_gem_obj_ggtt_offset(obj) + |
366 | obj->base.size <= dev_priv->gtt.mappable_end && |
367 | i915_gem_object_fence_ok(obj, args->tiling_mode)); |
368 | |
369 | /* Rebind if we need a change of alignment */ |
370 | if (!obj->map_and_fenceable) { |
371 | u32 unfenced_align = |
372 | i915_gem_get_gtt_alignment(dev, obj->base.size, |
373 | args->tiling_mode, |
374 | false); |
375 | if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1)) |
376 | ret = i915_gem_object_ggtt_unbind(obj); |
377 | } |
378 | |
379 | if (ret == 0) { |
380 | obj->fence_dirty = |
381 | obj->fenced_gpu_access || |
382 | obj->fence_reg != I915_FENCE_REG_NONE; |
383 | |
384 | obj->tiling_mode = args->tiling_mode; |
385 | obj->stride = args->stride; |
386 | |
387 | /* Force the fence to be reacquired for GTT access */ |
388 | i915_gem_release_mmap(obj); |
389 | } |
390 | } |
391 | /* we have to maintain this existing ABI... */ |
392 | args->stride = obj->stride; |
393 | args->tiling_mode = obj->tiling_mode; |
394 | |
395 | /* Try to preallocate memory required to save swizzling on put-pages */ |
396 | if (i915_gem_object_needs_bit17_swizzle(obj)) { |
397 | if (obj->bit_17 == NULL) { |
398 | obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT), |
399 | sizeof(long), GFP_KERNEL); |
400 | } |
401 | } else { |
402 | kfree(obj->bit_17); |
403 | obj->bit_17 = NULL; |
404 | } |
405 | |
406 | drm_gem_object_unreference(&obj->base); |
407 | mutex_unlock(&dev->struct_mutex); |
408 | |
409 | return ret; |
410 | } |
411 | |
412 | /** |
413 | * Returns the current tiling mode and required bit 6 swizzling for the object. |
414 | */ |
415 | int |
416 | i915_gem_get_tiling(struct drm_device *dev, void *data, |
417 | struct drm_file *file) |
418 | { |
419 | struct drm_i915_gem_get_tiling *args = data; |
420 | struct drm_i915_private *dev_priv = dev->dev_private; |
421 | struct drm_gem_object *gobj; |
422 | struct drm_i915_gem_object *obj; |
423 | |
424 | gobj = drm_gem_object_lookup(dev, file, args->handle); |
425 | if (gobj == NULL) |
426 | return -ENOENT; |
427 | obj = to_intel_bo(gobj); |
428 | |
429 | mutex_lock(&dev->struct_mutex); |
430 | |
431 | args->tiling_mode = obj->tiling_mode; |
432 | switch (obj->tiling_mode) { |
433 | case I915_TILING_X: |
434 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; |
435 | break; |
436 | case I915_TILING_Y: |
437 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; |
438 | break; |
439 | case I915_TILING_NONE: |
440 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
441 | break; |
442 | default: |
443 | DRM_ERROR("unknown tiling mode\n" ); |
444 | } |
445 | |
446 | /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ |
447 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) |
448 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9; |
449 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) |
450 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; |
451 | |
452 | drm_gem_object_unreference(&obj->base); |
453 | mutex_unlock(&dev->struct_mutex); |
454 | |
455 | return 0; |
456 | } |
457 | |
458 | /** |
459 | * Swap every 64 bytes of this page around, to account for it having a new |
460 | * bit 17 of its physical address and therefore being interpreted differently |
461 | * by the GPU. |
462 | */ |
463 | static void |
464 | i915_gem_swizzle_page(struct page *page) |
465 | { |
466 | char temp[64]; |
467 | char *vaddr; |
468 | int i; |
469 | |
470 | vaddr = kmap(page); |
471 | |
472 | for (i = 0; i < PAGE_SIZE; i += 128) { |
473 | memcpy(temp, &vaddr[i], 64); |
474 | memcpy(&vaddr[i], &vaddr[i + 64], 64); |
475 | memcpy(&vaddr[i + 64], temp, 64); |
476 | } |
477 | |
478 | kunmap(page); |
479 | } |
480 | |
481 | void |
482 | i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) |
483 | { |
484 | #ifdef __NetBSD__ |
485 | struct vm_page *page; |
486 | #else |
487 | struct sg_page_iter sg_iter; |
488 | #endif |
489 | int i; |
490 | |
491 | if (obj->bit_17 == NULL) |
492 | return; |
493 | |
494 | #ifdef __NetBSD__ |
495 | i = 0; |
496 | TAILQ_FOREACH(page, &obj->igo_pageq, pageq.queue) { |
497 | unsigned char new_bit_17 = VM_PAGE_TO_PHYS(page) >> 17; |
498 | if ((new_bit_17 & 0x1) != |
499 | (test_bit(i, obj->bit_17) != 0)) { |
500 | i915_gem_swizzle_page(container_of(page, struct page, |
501 | p_vmp)); |
502 | page->flags &= ~PG_CLEAN; |
503 | } |
504 | i += 1; |
505 | } |
506 | #else |
507 | i = 0; |
508 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { |
509 | struct page *page = sg_page_iter_page(&sg_iter); |
510 | char new_bit_17 = page_to_phys(page) >> 17; |
511 | if ((new_bit_17 & 0x1) != |
512 | (test_bit(i, obj->bit_17) != 0)) { |
513 | i915_gem_swizzle_page(page); |
514 | set_page_dirty(page); |
515 | } |
516 | i++; |
517 | } |
518 | #endif |
519 | } |
520 | |
521 | void |
522 | i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) |
523 | { |
524 | #ifdef __NetBSD__ |
525 | struct vm_page *page; |
526 | #else |
527 | struct sg_page_iter sg_iter; |
528 | #endif |
529 | int page_count = obj->base.size >> PAGE_SHIFT; |
530 | int i; |
531 | |
532 | if (obj->bit_17 == NULL) { |
533 | obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count), |
534 | sizeof(long), GFP_KERNEL); |
535 | if (obj->bit_17 == NULL) { |
536 | DRM_ERROR("Failed to allocate memory for bit 17 " |
537 | "record\n" ); |
538 | return; |
539 | } |
540 | } |
541 | |
542 | i = 0; |
543 | #ifdef __NetBSD__ |
544 | TAILQ_FOREACH(page, &obj->igo_pageq, pageq.queue) { |
545 | if (ISSET(VM_PAGE_TO_PHYS(page), __BIT(17))) |
546 | __set_bit(i, obj->bit_17); |
547 | else |
548 | __clear_bit(i, obj->bit_17); |
549 | i += 1; |
550 | } |
551 | #else |
552 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { |
553 | if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) |
554 | __set_bit(i, obj->bit_17); |
555 | else |
556 | __clear_bit(i, obj->bit_17); |
557 | i++; |
558 | } |
559 | #endif |
560 | } |
561 | |