1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
24 | * |
25 | */ |
26 | /* |
27 | * Authors: |
28 | * Jerome Glisse <glisse@freedesktop.org> |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
30 | * Dave Airlie |
31 | */ |
32 | #include <linux/list.h> |
33 | #include <linux/slab.h> |
34 | #include <drm/drmP.h> |
35 | #include <drm/radeon_drm.h> |
36 | #include "radeon.h" |
37 | #include "radeon_trace.h" |
38 | |
39 | |
40 | int radeon_ttm_init(struct radeon_device *rdev); |
41 | void radeon_ttm_fini(struct radeon_device *rdev); |
42 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); |
43 | |
44 | /* |
45 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all |
46 | * function are calling it. |
47 | */ |
48 | |
49 | static void radeon_bo_clear_va(struct radeon_bo *bo) |
50 | { |
51 | struct radeon_bo_va *bo_va, *tmp; |
52 | |
53 | list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { |
54 | /* remove from all vm address space */ |
55 | radeon_vm_bo_rmv(bo->rdev, bo_va); |
56 | } |
57 | } |
58 | |
59 | static void radeon_update_memory_usage(struct radeon_bo *bo, |
60 | unsigned mem_type, int sign) |
61 | { |
62 | struct radeon_device *rdev = bo->rdev; |
63 | u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; |
64 | |
65 | switch (mem_type) { |
66 | case TTM_PL_TT: |
67 | if (sign > 0) |
68 | atomic64_add(size, &rdev->gtt_usage); |
69 | else |
70 | atomic64_sub(size, &rdev->gtt_usage); |
71 | break; |
72 | case TTM_PL_VRAM: |
73 | if (sign > 0) |
74 | atomic64_add(size, &rdev->vram_usage); |
75 | else |
76 | atomic64_sub(size, &rdev->vram_usage); |
77 | break; |
78 | } |
79 | } |
80 | |
81 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
82 | { |
83 | struct radeon_bo *bo; |
84 | |
85 | bo = container_of(tbo, struct radeon_bo, tbo); |
86 | |
87 | radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); |
88 | |
89 | mutex_lock(&bo->rdev->gem.mutex); |
90 | list_del_init(&bo->list); |
91 | mutex_unlock(&bo->rdev->gem.mutex); |
92 | radeon_bo_clear_surface_reg(bo); |
93 | radeon_bo_clear_va(bo); |
94 | drm_gem_object_release(&bo->gem_base); |
95 | kfree(bo); |
96 | } |
97 | |
98 | bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) |
99 | { |
100 | if (bo->destroy == &radeon_ttm_bo_destroy) |
101 | return true; |
102 | return false; |
103 | } |
104 | |
105 | void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) |
106 | { |
107 | u32 c = 0, i; |
108 | |
109 | rbo->placement.fpfn = 0; |
110 | rbo->placement.lpfn = 0; |
111 | rbo->placement.placement = rbo->placements; |
112 | rbo->placement.busy_placement = rbo->placements; |
113 | if (domain & RADEON_GEM_DOMAIN_VRAM) |
114 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
115 | TTM_PL_FLAG_VRAM; |
116 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
117 | if (rbo->rdev->flags & RADEON_IS_AGP) { |
118 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; |
119 | } else { |
120 | rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; |
121 | } |
122 | } |
123 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
124 | if (rbo->rdev->flags & RADEON_IS_AGP) { |
125 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM; |
126 | } else { |
127 | rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; |
128 | } |
129 | } |
130 | if (!c) |
131 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; |
132 | rbo->placement.num_placement = c; |
133 | rbo->placement.num_busy_placement = c; |
134 | |
135 | /* |
136 | * Use two-ended allocation depending on the buffer size to |
137 | * improve fragmentation quality. |
138 | * 512kb was measured as the most optimal number. |
139 | */ |
140 | if (rbo->tbo.mem.size > 512 * 1024) { |
141 | for (i = 0; i < c; i++) { |
142 | rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN; |
143 | } |
144 | } |
145 | } |
146 | |
147 | int radeon_bo_create(struct radeon_device *rdev, |
148 | unsigned long size, int byte_align, bool kernel, u32 domain, |
149 | struct sg_table *sg, struct radeon_bo **bo_ptr) |
150 | { |
151 | struct radeon_bo *bo; |
152 | enum ttm_bo_type type; |
153 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
154 | size_t acc_size; |
155 | int r; |
156 | |
157 | #ifdef __NetBSD__ /* XXX ALIGN means something else. */ |
158 | size = round_up(size, PAGE_SIZE); |
159 | #else |
160 | size = ALIGN(size, PAGE_SIZE); |
161 | #endif |
162 | |
163 | if (kernel) { |
164 | type = ttm_bo_type_kernel; |
165 | } else if (sg) { |
166 | type = ttm_bo_type_sg; |
167 | } else { |
168 | type = ttm_bo_type_device; |
169 | } |
170 | *bo_ptr = NULL; |
171 | |
172 | acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, |
173 | sizeof(struct radeon_bo)); |
174 | |
175 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
176 | if (bo == NULL) |
177 | return -ENOMEM; |
178 | r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); |
179 | if (unlikely(r)) { |
180 | kfree(bo); |
181 | return r; |
182 | } |
183 | bo->rdev = rdev; |
184 | bo->surface_reg = -1; |
185 | INIT_LIST_HEAD(&bo->list); |
186 | INIT_LIST_HEAD(&bo->va); |
187 | bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | |
188 | RADEON_GEM_DOMAIN_GTT | |
189 | RADEON_GEM_DOMAIN_CPU); |
190 | radeon_ttm_placement_from_domain(bo, domain); |
191 | /* Kernel allocation are uninterruptible */ |
192 | down_read(&rdev->pm.mclk_lock); |
193 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
194 | &bo->placement, page_align, !kernel, NULL, |
195 | acc_size, sg, &radeon_ttm_bo_destroy); |
196 | up_read(&rdev->pm.mclk_lock); |
197 | if (unlikely(r != 0)) { |
198 | return r; |
199 | } |
200 | *bo_ptr = bo; |
201 | |
202 | trace_radeon_bo_create(bo); |
203 | |
204 | return 0; |
205 | } |
206 | |
207 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
208 | { |
209 | bool is_iomem; |
210 | int r; |
211 | |
212 | if (bo->kptr) { |
213 | if (ptr) { |
214 | *ptr = bo->kptr; |
215 | } |
216 | return 0; |
217 | } |
218 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
219 | if (r) { |
220 | return r; |
221 | } |
222 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
223 | if (ptr) { |
224 | *ptr = bo->kptr; |
225 | } |
226 | radeon_bo_check_tiling(bo, 0, 0); |
227 | return 0; |
228 | } |
229 | |
230 | void radeon_bo_kunmap(struct radeon_bo *bo) |
231 | { |
232 | if (bo->kptr == NULL) |
233 | return; |
234 | bo->kptr = NULL; |
235 | radeon_bo_check_tiling(bo, 0, 0); |
236 | ttm_bo_kunmap(&bo->kmap); |
237 | } |
238 | |
239 | void radeon_bo_unref(struct radeon_bo **bo) |
240 | { |
241 | struct ttm_buffer_object *tbo; |
242 | struct radeon_device *rdev; |
243 | |
244 | if ((*bo) == NULL) |
245 | return; |
246 | rdev = (*bo)->rdev; |
247 | tbo = &((*bo)->tbo); |
248 | down_read(&rdev->pm.mclk_lock); |
249 | ttm_bo_unref(&tbo); |
250 | up_read(&rdev->pm.mclk_lock); |
251 | if (tbo == NULL) |
252 | *bo = NULL; |
253 | } |
254 | |
255 | int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
256 | u64 *gpu_addr) |
257 | { |
258 | int r, i; |
259 | |
260 | if (bo->pin_count) { |
261 | bo->pin_count++; |
262 | if (gpu_addr) |
263 | *gpu_addr = radeon_bo_gpu_offset(bo); |
264 | |
265 | if (max_offset != 0) { |
266 | u64 domain_start; |
267 | |
268 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
269 | domain_start = bo->rdev->mc.vram_start; |
270 | else |
271 | domain_start = bo->rdev->mc.gtt_start; |
272 | WARN_ON_ONCE(max_offset < |
273 | (radeon_bo_gpu_offset(bo) - domain_start)); |
274 | } |
275 | |
276 | return 0; |
277 | } |
278 | radeon_ttm_placement_from_domain(bo, domain); |
279 | if (domain == RADEON_GEM_DOMAIN_VRAM) { |
280 | /* force to pin into visible video ram */ |
281 | bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
282 | } |
283 | if (max_offset) { |
284 | u64 lpfn = max_offset >> PAGE_SHIFT; |
285 | |
286 | if (!bo->placement.lpfn) |
287 | bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; |
288 | |
289 | if (lpfn < bo->placement.lpfn) |
290 | bo->placement.lpfn = lpfn; |
291 | } |
292 | for (i = 0; i < bo->placement.num_placement; i++) |
293 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; |
294 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
295 | if (likely(r == 0)) { |
296 | bo->pin_count = 1; |
297 | if (gpu_addr != NULL) |
298 | *gpu_addr = radeon_bo_gpu_offset(bo); |
299 | } |
300 | if (unlikely(r != 0)) |
301 | dev_err(bo->rdev->dev, "%p pin failed\n" , bo); |
302 | return r; |
303 | } |
304 | |
305 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) |
306 | { |
307 | return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); |
308 | } |
309 | |
310 | int radeon_bo_unpin(struct radeon_bo *bo) |
311 | { |
312 | int r, i; |
313 | |
314 | if (!bo->pin_count) { |
315 | dev_warn(bo->rdev->dev, "%p unpin not necessary\n" , bo); |
316 | return 0; |
317 | } |
318 | bo->pin_count--; |
319 | if (bo->pin_count) |
320 | return 0; |
321 | for (i = 0; i < bo->placement.num_placement; i++) |
322 | bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; |
323 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
324 | if (unlikely(r != 0)) |
325 | dev_err(bo->rdev->dev, "%p validate failed for unpin\n" , bo); |
326 | return r; |
327 | } |
328 | |
329 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
330 | { |
331 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
332 | if (0 && (rdev->flags & RADEON_IS_IGP)) { |
333 | if (rdev->mc.igp_sideport_enabled == false) |
334 | /* Useless to evict on IGP chips */ |
335 | return 0; |
336 | } |
337 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
338 | } |
339 | |
340 | void radeon_bo_force_delete(struct radeon_device *rdev) |
341 | { |
342 | struct radeon_bo *bo, *n; |
343 | |
344 | if (list_empty(&rdev->gem.objects)) { |
345 | return; |
346 | } |
347 | dev_err(rdev->dev, "Userspace still has active objects !\n" ); |
348 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { |
349 | mutex_lock(&rdev->ddev->struct_mutex); |
350 | dev_err(rdev->dev, "%p %p %lu %lu force free\n" , |
351 | &bo->gem_base, bo, (unsigned long)bo->gem_base.size, |
352 | *((unsigned long *)&bo->gem_base.refcount)); |
353 | mutex_lock(&bo->rdev->gem.mutex); |
354 | list_del_init(&bo->list); |
355 | mutex_unlock(&bo->rdev->gem.mutex); |
356 | /* this should unref the ttm bo */ |
357 | drm_gem_object_unreference(&bo->gem_base); |
358 | mutex_unlock(&rdev->ddev->struct_mutex); |
359 | } |
360 | } |
361 | |
362 | int radeon_bo_init(struct radeon_device *rdev) |
363 | { |
364 | /* Add an MTRR for the VRAM */ |
365 | if (!rdev->fastfb_working) { |
366 | rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, |
367 | rdev->mc.aper_size); |
368 | } |
369 | #ifdef __NetBSD__ |
370 | if (rdev->mc.aper_base) |
371 | pmap_pv_track(rdev->mc.aper_base, rdev->mc.aper_size); |
372 | #endif |
373 | DRM_INFO("Detected VRAM RAM=%" PRIx64"M, BAR=%lluM\n" , |
374 | rdev->mc.mc_vram_size >> 20, |
375 | (unsigned long long)rdev->mc.aper_size >> 20); |
376 | DRM_INFO("RAM width %dbits %cDR\n" , |
377 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); |
378 | return radeon_ttm_init(rdev); |
379 | } |
380 | |
381 | void radeon_bo_fini(struct radeon_device *rdev) |
382 | { |
383 | radeon_ttm_fini(rdev); |
384 | #ifdef __NetBSD__ |
385 | if (rdev->mc.aper_base) |
386 | pmap_pv_untrack(rdev->mc.aper_base, rdev->mc.aper_size); |
387 | #endif |
388 | arch_phys_wc_del(rdev->mc.vram_mtrr); |
389 | } |
390 | |
391 | /* Returns how many bytes TTM can move per IB. |
392 | */ |
393 | static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) |
394 | { |
395 | u64 real_vram_size = rdev->mc.real_vram_size; |
396 | u64 vram_usage = atomic64_read(&rdev->vram_usage); |
397 | |
398 | /* This function is based on the current VRAM usage. |
399 | * |
400 | * - If all of VRAM is free, allow relocating the number of bytes that |
401 | * is equal to 1/4 of the size of VRAM for this IB. |
402 | |
403 | * - If more than one half of VRAM is occupied, only allow relocating |
404 | * 1 MB of data for this IB. |
405 | * |
406 | * - From 0 to one half of used VRAM, the threshold decreases |
407 | * linearly. |
408 | * __________________ |
409 | * 1/4 of -|\ | |
410 | * VRAM | \ | |
411 | * | \ | |
412 | * | \ | |
413 | * | \ | |
414 | * | \ | |
415 | * | \ | |
416 | * | \________|1 MB |
417 | * |----------------| |
418 | * VRAM 0 % 100 % |
419 | * used used |
420 | * |
421 | * Note: It's a threshold, not a limit. The threshold must be crossed |
422 | * for buffer relocations to stop, so any buffer of an arbitrary size |
423 | * can be moved as long as the threshold isn't crossed before |
424 | * the relocation takes place. We don't want to disable buffer |
425 | * relocations completely. |
426 | * |
427 | * The idea is that buffers should be placed in VRAM at creation time |
428 | * and TTM should only do a minimum number of relocations during |
429 | * command submission. In practice, you need to submit at least |
430 | * a dozen IBs to move all buffers to VRAM if they are in GTT. |
431 | * |
432 | * Also, things can get pretty crazy under memory pressure and actual |
433 | * VRAM usage can change a lot, so playing safe even at 50% does |
434 | * consistently increase performance. |
435 | */ |
436 | |
437 | u64 half_vram = real_vram_size >> 1; |
438 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; |
439 | u64 bytes_moved_threshold = half_free_vram >> 1; |
440 | return max(bytes_moved_threshold, 1024*1024ull); |
441 | } |
442 | |
443 | int radeon_bo_list_validate(struct radeon_device *rdev, |
444 | struct ww_acquire_ctx *ticket, |
445 | struct list_head *head, int ring) |
446 | { |
447 | struct radeon_cs_reloc *lobj; |
448 | struct radeon_bo *bo; |
449 | int r; |
450 | u64 bytes_moved = 0, initial_bytes_moved; |
451 | u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); |
452 | |
453 | r = ttm_eu_reserve_buffers(ticket, head); |
454 | if (unlikely(r != 0)) { |
455 | return r; |
456 | } |
457 | |
458 | list_for_each_entry(lobj, head, tv.head) { |
459 | bo = lobj->robj; |
460 | if (!bo->pin_count) { |
461 | u32 domain = lobj->domain; |
462 | u32 current_domain = |
463 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); |
464 | |
465 | /* Check if this buffer will be moved and don't move it |
466 | * if we have moved too many buffers for this IB already. |
467 | * |
468 | * Note that this allows moving at least one buffer of |
469 | * any size, because it doesn't take the current "bo" |
470 | * into account. We don't want to disallow buffer moves |
471 | * completely. |
472 | */ |
473 | if ((lobj->alt_domain & current_domain) != 0 && |
474 | (domain & current_domain) == 0 && /* will be moved */ |
475 | bytes_moved > bytes_moved_threshold) { |
476 | /* don't move it */ |
477 | domain = current_domain; |
478 | } |
479 | |
480 | retry: |
481 | radeon_ttm_placement_from_domain(bo, domain); |
482 | if (ring == R600_RING_TYPE_UVD_INDEX) |
483 | radeon_uvd_force_into_uvd_segment(bo); |
484 | |
485 | initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); |
486 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
487 | bytes_moved += atomic64_read(&rdev->num_bytes_moved) - |
488 | initial_bytes_moved; |
489 | |
490 | if (unlikely(r)) { |
491 | if (r != -ERESTARTSYS && domain != lobj->alt_domain) { |
492 | domain = lobj->alt_domain; |
493 | goto retry; |
494 | } |
495 | ttm_eu_backoff_reservation(ticket, head); |
496 | return r; |
497 | } |
498 | } |
499 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
500 | lobj->tiling_flags = bo->tiling_flags; |
501 | } |
502 | return 0; |
503 | } |
504 | |
505 | #ifdef __NetBSD__ |
506 | /* XXX Fill me in! */ |
507 | #else |
508 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
509 | struct vm_area_struct *vma) |
510 | { |
511 | return ttm_fbdev_mmap(vma, &bo->tbo); |
512 | } |
513 | #endif |
514 | |
515 | int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
516 | { |
517 | struct radeon_device *rdev = bo->rdev; |
518 | struct radeon_surface_reg *reg; |
519 | struct radeon_bo *old_object; |
520 | int steal; |
521 | int i; |
522 | |
523 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
524 | |
525 | if (!bo->tiling_flags) |
526 | return 0; |
527 | |
528 | if (bo->surface_reg >= 0) { |
529 | reg = &rdev->surface_regs[bo->surface_reg]; |
530 | i = bo->surface_reg; |
531 | goto out; |
532 | } |
533 | |
534 | steal = -1; |
535 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { |
536 | |
537 | reg = &rdev->surface_regs[i]; |
538 | if (!reg->bo) |
539 | break; |
540 | |
541 | old_object = reg->bo; |
542 | if (old_object->pin_count == 0) |
543 | steal = i; |
544 | } |
545 | |
546 | /* if we are all out */ |
547 | if (i == RADEON_GEM_MAX_SURFACES) { |
548 | if (steal == -1) |
549 | return -ENOMEM; |
550 | /* find someone with a surface reg and nuke their BO */ |
551 | reg = &rdev->surface_regs[steal]; |
552 | old_object = reg->bo; |
553 | /* blow away the mapping */ |
554 | DRM_DEBUG("stealing surface reg %d from %p\n" , steal, old_object); |
555 | ttm_bo_unmap_virtual(&old_object->tbo); |
556 | old_object->surface_reg = -1; |
557 | i = steal; |
558 | } |
559 | |
560 | bo->surface_reg = i; |
561 | reg->bo = bo; |
562 | |
563 | out: |
564 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
565 | bo->tbo.mem.start << PAGE_SHIFT, |
566 | bo->tbo.num_pages << PAGE_SHIFT); |
567 | return 0; |
568 | } |
569 | |
570 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
571 | { |
572 | struct radeon_device *rdev = bo->rdev; |
573 | struct radeon_surface_reg *reg; |
574 | |
575 | if (bo->surface_reg == -1) |
576 | return; |
577 | |
578 | reg = &rdev->surface_regs[bo->surface_reg]; |
579 | radeon_clear_surface_reg(rdev, bo->surface_reg); |
580 | |
581 | reg->bo = NULL; |
582 | bo->surface_reg = -1; |
583 | } |
584 | |
585 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
586 | uint32_t tiling_flags, uint32_t pitch) |
587 | { |
588 | struct radeon_device *rdev = bo->rdev; |
589 | int r; |
590 | |
591 | if (rdev->family >= CHIP_CEDAR) { |
592 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; |
593 | |
594 | bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; |
595 | bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; |
596 | mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; |
597 | tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; |
598 | stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; |
599 | switch (bankw) { |
600 | case 0: |
601 | case 1: |
602 | case 2: |
603 | case 4: |
604 | case 8: |
605 | break; |
606 | default: |
607 | return -EINVAL; |
608 | } |
609 | switch (bankh) { |
610 | case 0: |
611 | case 1: |
612 | case 2: |
613 | case 4: |
614 | case 8: |
615 | break; |
616 | default: |
617 | return -EINVAL; |
618 | } |
619 | switch (mtaspect) { |
620 | case 0: |
621 | case 1: |
622 | case 2: |
623 | case 4: |
624 | case 8: |
625 | break; |
626 | default: |
627 | return -EINVAL; |
628 | } |
629 | if (tilesplit > 6) { |
630 | return -EINVAL; |
631 | } |
632 | if (stilesplit > 6) { |
633 | return -EINVAL; |
634 | } |
635 | } |
636 | r = radeon_bo_reserve(bo, false); |
637 | if (unlikely(r != 0)) |
638 | return r; |
639 | bo->tiling_flags = tiling_flags; |
640 | bo->pitch = pitch; |
641 | radeon_bo_unreserve(bo); |
642 | return 0; |
643 | } |
644 | |
645 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
646 | uint32_t *tiling_flags, |
647 | uint32_t *pitch) |
648 | { |
649 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
650 | |
651 | if (tiling_flags) |
652 | *tiling_flags = bo->tiling_flags; |
653 | if (pitch) |
654 | *pitch = bo->pitch; |
655 | } |
656 | |
657 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
658 | bool force_drop) |
659 | { |
660 | if (!force_drop) |
661 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
662 | |
663 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) |
664 | return 0; |
665 | |
666 | if (force_drop) { |
667 | radeon_bo_clear_surface_reg(bo); |
668 | return 0; |
669 | } |
670 | |
671 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { |
672 | if (!has_moved) |
673 | return 0; |
674 | |
675 | if (bo->surface_reg >= 0) |
676 | radeon_bo_clear_surface_reg(bo); |
677 | return 0; |
678 | } |
679 | |
680 | if ((bo->surface_reg >= 0) && !has_moved) |
681 | return 0; |
682 | |
683 | return radeon_bo_get_surface_reg(bo); |
684 | } |
685 | |
686 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, |
687 | struct ttm_mem_reg *new_mem) |
688 | { |
689 | struct radeon_bo *rbo; |
690 | |
691 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
692 | return; |
693 | |
694 | rbo = container_of(bo, struct radeon_bo, tbo); |
695 | radeon_bo_check_tiling(rbo, 0, 1); |
696 | radeon_vm_bo_invalidate(rbo->rdev, rbo); |
697 | |
698 | /* update statistics */ |
699 | if (!new_mem) |
700 | return; |
701 | |
702 | radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); |
703 | radeon_update_memory_usage(rbo, new_mem->mem_type, 1); |
704 | } |
705 | |
706 | int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
707 | { |
708 | struct radeon_device *rdev; |
709 | struct radeon_bo *rbo; |
710 | unsigned long offset, size; |
711 | int r; |
712 | |
713 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
714 | return 0; |
715 | rbo = container_of(bo, struct radeon_bo, tbo); |
716 | radeon_bo_check_tiling(rbo, 0, 0); |
717 | rdev = rbo->rdev; |
718 | if (bo->mem.mem_type != TTM_PL_VRAM) |
719 | return 0; |
720 | |
721 | size = bo->mem.num_pages << PAGE_SHIFT; |
722 | offset = bo->mem.start << PAGE_SHIFT; |
723 | if ((offset + size) <= rdev->mc.visible_vram_size) |
724 | return 0; |
725 | |
726 | /* hurrah the memory is not visible ! */ |
727 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); |
728 | rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; |
729 | r = ttm_bo_validate(bo, &rbo->placement, false, false); |
730 | if (unlikely(r == -ENOMEM)) { |
731 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); |
732 | return ttm_bo_validate(bo, &rbo->placement, false, false); |
733 | } else if (unlikely(r != 0)) { |
734 | return r; |
735 | } |
736 | |
737 | offset = bo->mem.start << PAGE_SHIFT; |
738 | /* this should never happen */ |
739 | if ((offset + size) > rdev->mc.visible_vram_size) |
740 | return -EINVAL; |
741 | |
742 | return 0; |
743 | } |
744 | |
745 | int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) |
746 | { |
747 | int r; |
748 | |
749 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); |
750 | if (unlikely(r != 0)) |
751 | return r; |
752 | spin_lock(&bo->tbo.bdev->fence_lock); |
753 | if (mem_type) |
754 | *mem_type = bo->tbo.mem.mem_type; |
755 | if (bo->tbo.sync_obj) |
756 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); |
757 | spin_unlock(&bo->tbo.bdev->fence_lock); |
758 | ttm_bo_unreserve(&bo->tbo); |
759 | return r; |
760 | } |
761 | |