1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
24 | * |
25 | */ |
26 | /* |
27 | * Authors: |
28 | * Jerome Glisse <glisse@freedesktop.org> |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
30 | * Dave Airlie |
31 | */ |
32 | #include <ttm/ttm_bo_api.h> |
33 | #include <ttm/ttm_bo_driver.h> |
34 | #include <ttm/ttm_placement.h> |
35 | #include <ttm/ttm_module.h> |
36 | #include <ttm/ttm_page_alloc.h> |
37 | #include <drm/drmP.h> |
38 | #include <drm/radeon_drm.h> |
39 | #include <linux/seq_file.h> |
40 | #include <linux/slab.h> |
41 | #include <linux/swiotlb.h> |
42 | #include <linux/debugfs.h> |
43 | #include "radeon_reg.h" |
44 | #include "radeon.h" |
45 | |
46 | #ifdef __NetBSD__ |
47 | #include <uvm/uvm_extern.h> |
48 | #include <uvm/uvm_fault.h> |
49 | #include <uvm/uvm_param.h> |
50 | #include <drm/bus_dma_hacks.h> |
51 | #endif |
52 | |
53 | #ifdef _LP64 |
54 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) |
55 | #else |
56 | #define DRM_FILE_PAGE_OFFSET (0xa0000000UL >> PAGE_SHIFT) |
57 | #endif |
58 | |
59 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev); |
60 | static void radeon_ttm_debugfs_fini(struct radeon_device *rdev); |
61 | |
62 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) |
63 | { |
64 | struct radeon_mman *mman; |
65 | struct radeon_device *rdev; |
66 | |
67 | mman = container_of(bdev, struct radeon_mman, bdev); |
68 | rdev = container_of(mman, struct radeon_device, mman); |
69 | return rdev; |
70 | } |
71 | |
72 | |
73 | /* |
74 | * Global memory. |
75 | */ |
76 | static int radeon_ttm_mem_global_init(struct drm_global_reference *ref) |
77 | { |
78 | return ttm_mem_global_init(ref->object); |
79 | } |
80 | |
81 | static void radeon_ttm_mem_global_release(struct drm_global_reference *ref) |
82 | { |
83 | ttm_mem_global_release(ref->object); |
84 | } |
85 | |
86 | static int radeon_ttm_global_init(struct radeon_device *rdev) |
87 | { |
88 | struct drm_global_reference *global_ref; |
89 | int r; |
90 | |
91 | rdev->mman.mem_global_referenced = false; |
92 | global_ref = &rdev->mman.mem_global_ref; |
93 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
94 | global_ref->size = sizeof(struct ttm_mem_global); |
95 | global_ref->init = &radeon_ttm_mem_global_init; |
96 | global_ref->release = &radeon_ttm_mem_global_release; |
97 | r = drm_global_item_ref(global_ref); |
98 | if (r != 0) { |
99 | DRM_ERROR("Failed setting up TTM memory accounting " |
100 | "subsystem.\n" ); |
101 | return r; |
102 | } |
103 | |
104 | rdev->mman.bo_global_ref.mem_glob = |
105 | rdev->mman.mem_global_ref.object; |
106 | global_ref = &rdev->mman.bo_global_ref.ref; |
107 | global_ref->global_type = DRM_GLOBAL_TTM_BO; |
108 | global_ref->size = sizeof(struct ttm_bo_global); |
109 | global_ref->init = &ttm_bo_global_init; |
110 | global_ref->release = &ttm_bo_global_release; |
111 | r = drm_global_item_ref(global_ref); |
112 | if (r != 0) { |
113 | DRM_ERROR("Failed setting up TTM BO subsystem.\n" ); |
114 | drm_global_item_unref(&rdev->mman.mem_global_ref); |
115 | return r; |
116 | } |
117 | |
118 | rdev->mman.mem_global_referenced = true; |
119 | return 0; |
120 | } |
121 | |
122 | static void radeon_ttm_global_fini(struct radeon_device *rdev) |
123 | { |
124 | if (rdev->mman.mem_global_referenced) { |
125 | drm_global_item_unref(&rdev->mman.bo_global_ref.ref); |
126 | drm_global_item_unref(&rdev->mman.mem_global_ref); |
127 | rdev->mman.mem_global_referenced = false; |
128 | } |
129 | } |
130 | |
131 | static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
132 | { |
133 | return 0; |
134 | } |
135 | |
136 | static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
137 | struct ttm_mem_type_manager *man) |
138 | { |
139 | struct radeon_device *rdev; |
140 | |
141 | rdev = radeon_get_rdev(bdev); |
142 | |
143 | switch (type) { |
144 | case TTM_PL_SYSTEM: |
145 | /* System memory */ |
146 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
147 | man->available_caching = TTM_PL_MASK_CACHING; |
148 | man->default_caching = TTM_PL_FLAG_CACHED; |
149 | break; |
150 | case TTM_PL_TT: |
151 | man->func = &ttm_bo_manager_func; |
152 | man->gpu_offset = rdev->mc.gtt_start; |
153 | man->available_caching = TTM_PL_MASK_CACHING; |
154 | man->default_caching = TTM_PL_FLAG_CACHED; |
155 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
156 | #if __OS_HAS_AGP |
157 | if (rdev->flags & RADEON_IS_AGP) { |
158 | if (!rdev->ddev->agp) { |
159 | DRM_ERROR("AGP is not enabled for memory type %u\n" , |
160 | (unsigned)type); |
161 | return -EINVAL; |
162 | } |
163 | if (!rdev->ddev->agp->cant_use_aperture) |
164 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
165 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
166 | TTM_PL_FLAG_WC; |
167 | man->default_caching = TTM_PL_FLAG_WC; |
168 | } |
169 | #endif |
170 | break; |
171 | case TTM_PL_VRAM: |
172 | /* "On-card" video ram */ |
173 | man->func = &ttm_bo_manager_func; |
174 | man->gpu_offset = rdev->mc.vram_start; |
175 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
176 | TTM_MEMTYPE_FLAG_MAPPABLE; |
177 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; |
178 | man->default_caching = TTM_PL_FLAG_WC; |
179 | break; |
180 | default: |
181 | DRM_ERROR("Unsupported memory type %u\n" , (unsigned)type); |
182 | return -EINVAL; |
183 | } |
184 | return 0; |
185 | } |
186 | |
187 | static void radeon_evict_flags(struct ttm_buffer_object *bo, |
188 | struct ttm_placement *placement) |
189 | { |
190 | struct radeon_bo *rbo; |
191 | static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; |
192 | |
193 | if (!radeon_ttm_bo_is_radeon_bo(bo)) { |
194 | placement->fpfn = 0; |
195 | placement->lpfn = 0; |
196 | placement->placement = &placements; |
197 | placement->busy_placement = &placements; |
198 | placement->num_placement = 1; |
199 | placement->num_busy_placement = 1; |
200 | return; |
201 | } |
202 | rbo = container_of(bo, struct radeon_bo, tbo); |
203 | switch (bo->mem.mem_type) { |
204 | case TTM_PL_VRAM: |
205 | if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) |
206 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
207 | else |
208 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); |
209 | break; |
210 | case TTM_PL_TT: |
211 | default: |
212 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
213 | } |
214 | *placement = rbo->placement; |
215 | } |
216 | |
217 | static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
218 | { |
219 | struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); |
220 | |
221 | return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); |
222 | } |
223 | |
224 | static void radeon_move_null(struct ttm_buffer_object *bo, |
225 | struct ttm_mem_reg *new_mem) |
226 | { |
227 | struct ttm_mem_reg *old_mem = &bo->mem; |
228 | |
229 | BUG_ON(old_mem->mm_node != NULL); |
230 | *old_mem = *new_mem; |
231 | new_mem->mm_node = NULL; |
232 | } |
233 | |
234 | static int radeon_move_blit(struct ttm_buffer_object *bo, |
235 | bool evict, bool no_wait_gpu, |
236 | struct ttm_mem_reg *new_mem, |
237 | struct ttm_mem_reg *old_mem) |
238 | { |
239 | struct radeon_device *rdev; |
240 | uint64_t old_start, new_start; |
241 | struct radeon_fence *fence; |
242 | int r, ridx; |
243 | |
244 | rdev = radeon_get_rdev(bo->bdev); |
245 | ridx = radeon_copy_ring_index(rdev); |
246 | old_start = old_mem->start << PAGE_SHIFT; |
247 | new_start = new_mem->start << PAGE_SHIFT; |
248 | |
249 | switch (old_mem->mem_type) { |
250 | case TTM_PL_VRAM: |
251 | old_start += rdev->mc.vram_start; |
252 | break; |
253 | case TTM_PL_TT: |
254 | old_start += rdev->mc.gtt_start; |
255 | break; |
256 | default: |
257 | DRM_ERROR("Unknown placement %d\n" , old_mem->mem_type); |
258 | return -EINVAL; |
259 | } |
260 | switch (new_mem->mem_type) { |
261 | case TTM_PL_VRAM: |
262 | new_start += rdev->mc.vram_start; |
263 | break; |
264 | case TTM_PL_TT: |
265 | new_start += rdev->mc.gtt_start; |
266 | break; |
267 | default: |
268 | DRM_ERROR("Unknown placement %d\n" , old_mem->mem_type); |
269 | return -EINVAL; |
270 | } |
271 | if (!rdev->ring[ridx].ready) { |
272 | DRM_ERROR("Trying to move memory with ring turned off.\n" ); |
273 | return -EINVAL; |
274 | } |
275 | |
276 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); |
277 | |
278 | /* sync other rings */ |
279 | fence = bo->sync_obj; |
280 | r = radeon_copy(rdev, old_start, new_start, |
281 | new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ |
282 | &fence); |
283 | /* FIXME: handle copy error */ |
284 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, |
285 | evict, no_wait_gpu, new_mem); |
286 | radeon_fence_unref(&fence); |
287 | return r; |
288 | } |
289 | |
290 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, |
291 | bool evict, bool interruptible, |
292 | bool no_wait_gpu, |
293 | struct ttm_mem_reg *new_mem) |
294 | { |
295 | struct radeon_device *rdev __unused; |
296 | struct ttm_mem_reg *old_mem = &bo->mem; |
297 | struct ttm_mem_reg tmp_mem; |
298 | u32 placements; |
299 | struct ttm_placement placement; |
300 | int r; |
301 | |
302 | rdev = radeon_get_rdev(bo->bdev); |
303 | tmp_mem = *new_mem; |
304 | tmp_mem.mm_node = NULL; |
305 | placement.fpfn = 0; |
306 | placement.lpfn = 0; |
307 | placement.num_placement = 1; |
308 | placement.placement = &placements; |
309 | placement.num_busy_placement = 1; |
310 | placement.busy_placement = &placements; |
311 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
312 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
313 | interruptible, no_wait_gpu); |
314 | if (unlikely(r)) { |
315 | return r; |
316 | } |
317 | |
318 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); |
319 | if (unlikely(r)) { |
320 | goto out_cleanup; |
321 | } |
322 | |
323 | r = ttm_tt_bind(bo->ttm, &tmp_mem); |
324 | if (unlikely(r)) { |
325 | goto out_cleanup; |
326 | } |
327 | r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); |
328 | if (unlikely(r)) { |
329 | goto out_cleanup; |
330 | } |
331 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); |
332 | out_cleanup: |
333 | ttm_bo_mem_put(bo, &tmp_mem); |
334 | return r; |
335 | } |
336 | |
337 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, |
338 | bool evict, bool interruptible, |
339 | bool no_wait_gpu, |
340 | struct ttm_mem_reg *new_mem) |
341 | { |
342 | struct radeon_device *rdev __unused; |
343 | struct ttm_mem_reg *old_mem = &bo->mem; |
344 | struct ttm_mem_reg tmp_mem; |
345 | struct ttm_placement placement; |
346 | u32 placements; |
347 | int r; |
348 | |
349 | rdev = radeon_get_rdev(bo->bdev); |
350 | tmp_mem = *new_mem; |
351 | tmp_mem.mm_node = NULL; |
352 | placement.fpfn = 0; |
353 | placement.lpfn = 0; |
354 | placement.num_placement = 1; |
355 | placement.placement = &placements; |
356 | placement.num_busy_placement = 1; |
357 | placement.busy_placement = &placements; |
358 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
359 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
360 | interruptible, no_wait_gpu); |
361 | if (unlikely(r)) { |
362 | return r; |
363 | } |
364 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); |
365 | if (unlikely(r)) { |
366 | goto out_cleanup; |
367 | } |
368 | r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); |
369 | if (unlikely(r)) { |
370 | goto out_cleanup; |
371 | } |
372 | out_cleanup: |
373 | ttm_bo_mem_put(bo, &tmp_mem); |
374 | return r; |
375 | } |
376 | |
377 | static int radeon_bo_move(struct ttm_buffer_object *bo, |
378 | bool evict, bool interruptible, |
379 | bool no_wait_gpu, |
380 | struct ttm_mem_reg *new_mem) |
381 | { |
382 | struct radeon_device *rdev; |
383 | struct ttm_mem_reg *old_mem = &bo->mem; |
384 | int r; |
385 | |
386 | rdev = radeon_get_rdev(bo->bdev); |
387 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
388 | radeon_move_null(bo, new_mem); |
389 | return 0; |
390 | } |
391 | if ((old_mem->mem_type == TTM_PL_TT && |
392 | new_mem->mem_type == TTM_PL_SYSTEM) || |
393 | (old_mem->mem_type == TTM_PL_SYSTEM && |
394 | new_mem->mem_type == TTM_PL_TT)) { |
395 | /* bind is enough */ |
396 | radeon_move_null(bo, new_mem); |
397 | return 0; |
398 | } |
399 | if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || |
400 | rdev->asic->copy.copy == NULL) { |
401 | /* use memcpy */ |
402 | goto memcpy; |
403 | } |
404 | |
405 | if (old_mem->mem_type == TTM_PL_VRAM && |
406 | new_mem->mem_type == TTM_PL_SYSTEM) { |
407 | r = radeon_move_vram_ram(bo, evict, interruptible, |
408 | no_wait_gpu, new_mem); |
409 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
410 | new_mem->mem_type == TTM_PL_VRAM) { |
411 | r = radeon_move_ram_vram(bo, evict, interruptible, |
412 | no_wait_gpu, new_mem); |
413 | } else { |
414 | r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); |
415 | } |
416 | |
417 | if (r) { |
418 | memcpy: |
419 | r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); |
420 | if (r) { |
421 | return r; |
422 | } |
423 | } |
424 | |
425 | /* update statistics */ |
426 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); |
427 | return 0; |
428 | } |
429 | |
430 | static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
431 | { |
432 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
433 | struct radeon_device *rdev = radeon_get_rdev(bdev); |
434 | |
435 | mem->bus.addr = NULL; |
436 | mem->bus.offset = 0; |
437 | mem->bus.size = mem->num_pages << PAGE_SHIFT; |
438 | mem->bus.base = 0; |
439 | mem->bus.is_iomem = false; |
440 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
441 | return -EINVAL; |
442 | switch (mem->mem_type) { |
443 | case TTM_PL_SYSTEM: |
444 | /* system memory */ |
445 | return 0; |
446 | case TTM_PL_TT: |
447 | #if __OS_HAS_AGP |
448 | if (rdev->flags & RADEON_IS_AGP) { |
449 | /* RADEON_IS_AGP is set only if AGP is active */ |
450 | mem->bus.offset = mem->start << PAGE_SHIFT; |
451 | mem->bus.base = rdev->mc.agp_base; |
452 | mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; |
453 | KASSERTMSG((mem->bus.base & (PAGE_SIZE - 1)) == 0, |
454 | "agp aperture is not page-aligned: %lx" , |
455 | mem->bus.base); |
456 | KASSERT((mem->bus.offset & (PAGE_SIZE - 1)) == 0); |
457 | } |
458 | #endif |
459 | break; |
460 | case TTM_PL_VRAM: |
461 | mem->bus.offset = mem->start << PAGE_SHIFT; |
462 | /* check if it's visible */ |
463 | if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) |
464 | return -EINVAL; |
465 | mem->bus.base = rdev->mc.aper_base; |
466 | mem->bus.is_iomem = true; |
467 | #ifdef __alpha__ |
468 | /* |
469 | * Alpha: use bus.addr to hold the ioremap() return, |
470 | * so we can modify bus.base below. |
471 | */ |
472 | if (mem->placement & TTM_PL_FLAG_WC) |
473 | mem->bus.addr = |
474 | ioremap_wc(mem->bus.base + mem->bus.offset, |
475 | mem->bus.size); |
476 | else |
477 | mem->bus.addr = |
478 | ioremap_nocache(mem->bus.base + mem->bus.offset, |
479 | mem->bus.size); |
480 | |
481 | /* |
482 | * Alpha: Use just the bus offset plus |
483 | * the hose/domain memory base for bus.base. |
484 | * It then can be used to build PTEs for VRAM |
485 | * access, as done in ttm_bo_vm_fault(). |
486 | */ |
487 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + |
488 | rdev->ddev->hose->dense_mem_base; |
489 | #endif |
490 | KASSERTMSG((mem->bus.base & (PAGE_SIZE - 1)) == 0, |
491 | "mc aperture is not page-aligned: %lx" , |
492 | mem->bus.base); |
493 | KASSERT((mem->bus.offset & (PAGE_SIZE - 1)) == 0); |
494 | break; |
495 | default: |
496 | return -EINVAL; |
497 | } |
498 | return 0; |
499 | } |
500 | |
501 | static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
502 | { |
503 | } |
504 | |
505 | static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) |
506 | { |
507 | return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); |
508 | } |
509 | |
510 | static int radeon_sync_obj_flush(void *sync_obj) |
511 | { |
512 | return 0; |
513 | } |
514 | |
515 | static void radeon_sync_obj_unref(void **sync_obj) |
516 | { |
517 | radeon_fence_unref((struct radeon_fence **)sync_obj); |
518 | } |
519 | |
520 | static void *radeon_sync_obj_ref(void *sync_obj) |
521 | { |
522 | return radeon_fence_ref((struct radeon_fence *)sync_obj); |
523 | } |
524 | |
525 | static bool radeon_sync_obj_signaled(void *sync_obj) |
526 | { |
527 | return radeon_fence_signaled((struct radeon_fence *)sync_obj); |
528 | } |
529 | |
530 | /* |
531 | * TTM backend functions. |
532 | */ |
533 | struct radeon_ttm_tt { |
534 | struct ttm_dma_tt ttm; |
535 | struct radeon_device *rdev; |
536 | u64 offset; |
537 | }; |
538 | |
539 | static int radeon_ttm_backend_bind(struct ttm_tt *ttm, |
540 | struct ttm_mem_reg *bo_mem) |
541 | { |
542 | struct radeon_ttm_tt *gtt = (void*)ttm; |
543 | int r; |
544 | |
545 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); |
546 | if (!ttm->num_pages) { |
547 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n" , |
548 | ttm->num_pages, bo_mem, ttm); |
549 | } |
550 | r = radeon_gart_bind(gtt->rdev, gtt->offset, |
551 | ttm->num_pages, ttm->pages, gtt->ttm.dma_address); |
552 | if (r) { |
553 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n" , |
554 | ttm->num_pages, (unsigned)gtt->offset); |
555 | return r; |
556 | } |
557 | return 0; |
558 | } |
559 | |
560 | static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) |
561 | { |
562 | struct radeon_ttm_tt *gtt = (void *)ttm; |
563 | |
564 | radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); |
565 | return 0; |
566 | } |
567 | |
568 | static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) |
569 | { |
570 | struct radeon_ttm_tt *gtt = (void *)ttm; |
571 | |
572 | ttm_dma_tt_fini(>t->ttm); |
573 | kfree(gtt); |
574 | } |
575 | |
576 | static struct ttm_backend_func radeon_backend_func = { |
577 | .bind = &radeon_ttm_backend_bind, |
578 | .unbind = &radeon_ttm_backend_unbind, |
579 | .destroy = &radeon_ttm_backend_destroy, |
580 | }; |
581 | |
582 | static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, |
583 | unsigned long size, uint32_t page_flags, |
584 | struct page *dummy_read_page) |
585 | { |
586 | struct radeon_device *rdev; |
587 | struct radeon_ttm_tt *gtt; |
588 | |
589 | rdev = radeon_get_rdev(bdev); |
590 | #if __OS_HAS_AGP |
591 | if (rdev->flags & RADEON_IS_AGP) { |
592 | return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, |
593 | size, page_flags, dummy_read_page); |
594 | } |
595 | #endif |
596 | |
597 | gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL); |
598 | if (gtt == NULL) { |
599 | return NULL; |
600 | } |
601 | gtt->ttm.ttm.func = &radeon_backend_func; |
602 | gtt->rdev = rdev; |
603 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { |
604 | kfree(gtt); |
605 | return NULL; |
606 | } |
607 | return >t->ttm.ttm; |
608 | } |
609 | |
610 | static int radeon_ttm_tt_populate(struct ttm_tt *ttm) |
611 | { |
612 | struct radeon_ttm_tt *gtt = (void *)ttm; |
613 | #ifndef __NetBSD__ |
614 | unsigned i; |
615 | int r; |
616 | #endif |
617 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
618 | |
619 | if (ttm->state != tt_unpopulated) |
620 | return 0; |
621 | |
622 | if (slave && ttm->sg) { |
623 | #ifdef __NetBSD__ /* XXX drm prime */ |
624 | return -EINVAL; |
625 | #else |
626 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
627 | gtt->ttm.dma_address, ttm->num_pages); |
628 | ttm->state = tt_unbound; |
629 | return 0; |
630 | #endif |
631 | } |
632 | |
633 | #if __OS_HAS_AGP |
634 | struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); |
635 | if (rdev->flags & RADEON_IS_AGP) { |
636 | return ttm_agp_tt_populate(ttm); |
637 | } |
638 | #endif |
639 | |
640 | #ifdef __NetBSD__ |
641 | /* XXX errno NetBSD->Linux */ |
642 | return ttm_bus_dma_populate(>t->ttm); |
643 | #else |
644 | |
645 | #ifdef CONFIG_SWIOTLB |
646 | #if ! __OS_HAS_AGP |
647 | struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); |
648 | #endif |
649 | if (swiotlb_nr_tbl()) { |
650 | return ttm_dma_populate(>t->ttm, rdev->dev); |
651 | } |
652 | #endif |
653 | |
654 | r = ttm_pool_populate(ttm); |
655 | if (r) { |
656 | return r; |
657 | } |
658 | |
659 | for (i = 0; i < ttm->num_pages; i++) { |
660 | gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], |
661 | 0, PAGE_SIZE, |
662 | PCI_DMA_BIDIRECTIONAL); |
663 | if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { |
664 | while (--i) { |
665 | pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], |
666 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
667 | gtt->ttm.dma_address[i] = 0; |
668 | } |
669 | ttm_pool_unpopulate(ttm); |
670 | return -EFAULT; |
671 | } |
672 | } |
673 | return 0; |
674 | #endif |
675 | } |
676 | |
677 | static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) |
678 | { |
679 | struct radeon_ttm_tt *gtt = (void *)ttm; |
680 | #ifndef __NetBSD__ |
681 | unsigned i; |
682 | #endif |
683 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
684 | |
685 | if (slave) |
686 | return; |
687 | |
688 | #if __OS_HAS_AGP |
689 | struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); |
690 | if (rdev->flags & RADEON_IS_AGP) { |
691 | ttm_agp_tt_unpopulate(ttm); |
692 | return; |
693 | } |
694 | #endif |
695 | |
696 | #ifdef __NetBSD__ |
697 | ttm_bus_dma_unpopulate(>t->ttm); |
698 | return; |
699 | #else |
700 | |
701 | #ifdef CONFIG_SWIOTLB |
702 | #if ! __OS_HAS_AGP |
703 | struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); |
704 | #endif |
705 | if (swiotlb_nr_tbl()) { |
706 | ttm_dma_unpopulate(>t->ttm, rdev->dev); |
707 | return; |
708 | } |
709 | #endif |
710 | |
711 | for (i = 0; i < ttm->num_pages; i++) { |
712 | if (gtt->ttm.dma_address[i]) { |
713 | pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], |
714 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
715 | } |
716 | } |
717 | |
718 | ttm_pool_unpopulate(ttm); |
719 | #endif |
720 | } |
721 | |
722 | #ifdef __NetBSD__ |
723 | static void radeon_ttm_tt_swapout(struct ttm_tt *ttm) |
724 | { |
725 | struct radeon_ttm_tt *gtt = container_of(ttm, struct radeon_ttm_tt, |
726 | ttm.ttm); |
727 | struct ttm_dma_tt *ttm_dma = >t->ttm; |
728 | |
729 | ttm_bus_dma_swapout(ttm_dma); |
730 | } |
731 | |
732 | static int radeon_ttm_fault(struct uvm_faultinfo *, vaddr_t, |
733 | struct vm_page **, int, int, vm_prot_t, int); |
734 | |
735 | static const struct uvm_pagerops radeon_uvm_ops = { |
736 | .pgo_reference = &ttm_bo_uvm_reference, |
737 | .pgo_detach = &ttm_bo_uvm_detach, |
738 | .pgo_fault = &radeon_ttm_fault, |
739 | }; |
740 | #endif |
741 | |
742 | static struct ttm_bo_driver radeon_bo_driver = { |
743 | .ttm_tt_create = &radeon_ttm_tt_create, |
744 | .ttm_tt_populate = &radeon_ttm_tt_populate, |
745 | .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, |
746 | #ifdef __NetBSD__ |
747 | .ttm_tt_swapout = &radeon_ttm_tt_swapout, |
748 | .ttm_uvm_ops = &radeon_uvm_ops, |
749 | #endif |
750 | .invalidate_caches = &radeon_invalidate_caches, |
751 | .init_mem_type = &radeon_init_mem_type, |
752 | .evict_flags = &radeon_evict_flags, |
753 | .move = &radeon_bo_move, |
754 | .verify_access = &radeon_verify_access, |
755 | .sync_obj_signaled = &radeon_sync_obj_signaled, |
756 | .sync_obj_wait = &radeon_sync_obj_wait, |
757 | .sync_obj_flush = &radeon_sync_obj_flush, |
758 | .sync_obj_unref = &radeon_sync_obj_unref, |
759 | .sync_obj_ref = &radeon_sync_obj_ref, |
760 | .move_notify = &radeon_bo_move_notify, |
761 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, |
762 | .io_mem_reserve = &radeon_ttm_io_mem_reserve, |
763 | .io_mem_free = &radeon_ttm_io_mem_free, |
764 | }; |
765 | |
766 | int radeon_ttm_init(struct radeon_device *rdev) |
767 | { |
768 | int r; |
769 | |
770 | r = radeon_ttm_global_init(rdev); |
771 | if (r) { |
772 | return r; |
773 | } |
774 | /* No others user of address space so set it to 0 */ |
775 | r = ttm_bo_device_init(&rdev->mman.bdev, |
776 | rdev->mman.bo_global_ref.ref.object, |
777 | &radeon_bo_driver, |
778 | #ifdef __NetBSD__ |
779 | rdev->ddev->bst, |
780 | rdev->ddev->dmat, |
781 | #else |
782 | rdev->ddev->anon_inode->i_mapping, |
783 | #endif |
784 | DRM_FILE_PAGE_OFFSET, |
785 | rdev->need_dma32); |
786 | if (r) { |
787 | DRM_ERROR("failed initializing buffer object driver(%d).\n" , r); |
788 | return r; |
789 | } |
790 | rdev->mman.initialized = true; |
791 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, |
792 | rdev->mc.real_vram_size >> PAGE_SHIFT); |
793 | if (r) { |
794 | DRM_ERROR("Failed initializing VRAM heap.\n" ); |
795 | return r; |
796 | } |
797 | /* Change the size here instead of the init above so only lpfn is affected */ |
798 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
799 | |
800 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, |
801 | RADEON_GEM_DOMAIN_VRAM, |
802 | NULL, &rdev->stollen_vga_memory); |
803 | if (r) { |
804 | return r; |
805 | } |
806 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
807 | if (r) |
808 | return r; |
809 | r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); |
810 | radeon_bo_unreserve(rdev->stollen_vga_memory); |
811 | if (r) { |
812 | radeon_bo_unref(&rdev->stollen_vga_memory); |
813 | return r; |
814 | } |
815 | DRM_INFO("radeon: %uM of VRAM memory ready\n" , |
816 | (unsigned) (rdev->mc.real_vram_size / (1024 * 1024))); |
817 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, |
818 | rdev->mc.gtt_size >> PAGE_SHIFT); |
819 | if (r) { |
820 | DRM_ERROR("Failed initializing GTT heap.\n" ); |
821 | return r; |
822 | } |
823 | DRM_INFO("radeon: %uM of GTT memory ready.\n" , |
824 | (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); |
825 | |
826 | r = radeon_ttm_debugfs_init(rdev); |
827 | if (r) { |
828 | DRM_ERROR("Failed to init debugfs\n" ); |
829 | return r; |
830 | } |
831 | return 0; |
832 | } |
833 | |
834 | void radeon_ttm_fini(struct radeon_device *rdev) |
835 | { |
836 | int r; |
837 | |
838 | if (!rdev->mman.initialized) |
839 | return; |
840 | radeon_ttm_debugfs_fini(rdev); |
841 | if (rdev->stollen_vga_memory) { |
842 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
843 | if (r == 0) { |
844 | radeon_bo_unpin(rdev->stollen_vga_memory); |
845 | radeon_bo_unreserve(rdev->stollen_vga_memory); |
846 | } |
847 | radeon_bo_unref(&rdev->stollen_vga_memory); |
848 | } |
849 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
850 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); |
851 | ttm_bo_device_release(&rdev->mman.bdev); |
852 | radeon_gart_fini(rdev); |
853 | radeon_ttm_global_fini(rdev); |
854 | rdev->mman.initialized = false; |
855 | DRM_INFO("radeon: ttm finalized\n" ); |
856 | } |
857 | |
858 | /* this should only be called at bootup or when userspace |
859 | * isn't running */ |
860 | void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) |
861 | { |
862 | struct ttm_mem_type_manager *man; |
863 | |
864 | if (!rdev->mman.initialized) |
865 | return; |
866 | |
867 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; |
868 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ |
869 | man->size = size >> PAGE_SHIFT; |
870 | } |
871 | |
872 | #ifdef __NetBSD__ |
873 | |
874 | static int |
875 | radeon_ttm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, |
876 | struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type, |
877 | int flags) |
878 | { |
879 | struct uvm_object *const uobj = ufi->entry->object.uvm_obj; |
880 | struct ttm_buffer_object *const bo = container_of(uobj, |
881 | struct ttm_buffer_object, uvmobj); |
882 | struct radeon_device *const rdev = radeon_get_rdev(bo->bdev); |
883 | int error; |
884 | |
885 | KASSERT(rdev != NULL); |
886 | down_read(&rdev->pm.mclk_lock); |
887 | error = ttm_bo_uvm_fault(ufi, vaddr, pps, npages, centeridx, |
888 | access_type, flags); |
889 | up_read(&rdev->pm.mclk_lock); |
890 | |
891 | return error; |
892 | } |
893 | |
894 | int |
895 | radeon_mmap_object(struct drm_device *dev, off_t offset, size_t size, |
896 | vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp, |
897 | struct file *file) |
898 | { |
899 | struct radeon_device *rdev = dev->dev_private; |
900 | |
901 | KASSERT(0 == (offset & (PAGE_SIZE - 1))); |
902 | |
903 | if (__predict_false(rdev == NULL)) /* XXX How?? */ |
904 | return -EINVAL; |
905 | |
906 | if (__predict_false((offset >> PAGE_SHIFT) < DRM_FILE_PAGE_OFFSET)) |
907 | return drm_mmap_object(dev, offset, size, prot, uobjp, |
908 | uoffsetp, file); |
909 | else |
910 | return ttm_bo_mmap_object(&rdev->mman.bdev, offset, size, prot, |
911 | uobjp, uoffsetp, file); |
912 | } |
913 | |
914 | #else |
915 | |
916 | static struct vm_operations_struct radeon_ttm_vm_ops; |
917 | static const struct vm_operations_struct *ttm_vm_ops = NULL; |
918 | |
919 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
920 | { |
921 | struct ttm_buffer_object *bo; |
922 | struct radeon_device *rdev; |
923 | int r; |
924 | |
925 | bo = (struct ttm_buffer_object *)vma->vm_private_data; |
926 | if (bo == NULL) { |
927 | return VM_FAULT_NOPAGE; |
928 | } |
929 | rdev = radeon_get_rdev(bo->bdev); |
930 | down_read(&rdev->pm.mclk_lock); |
931 | r = ttm_vm_ops->fault(vma, vmf); |
932 | up_read(&rdev->pm.mclk_lock); |
933 | return r; |
934 | } |
935 | |
936 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma) |
937 | { |
938 | struct drm_file *file_priv; |
939 | struct radeon_device *rdev; |
940 | int r; |
941 | |
942 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { |
943 | return drm_mmap(filp, vma); |
944 | } |
945 | |
946 | file_priv = filp->private_data; |
947 | rdev = file_priv->minor->dev->dev_private; |
948 | if (rdev == NULL) { |
949 | return -EINVAL; |
950 | } |
951 | r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); |
952 | if (unlikely(r != 0)) { |
953 | return r; |
954 | } |
955 | if (unlikely(ttm_vm_ops == NULL)) { |
956 | ttm_vm_ops = vma->vm_ops; |
957 | radeon_ttm_vm_ops = *ttm_vm_ops; |
958 | radeon_ttm_vm_ops.fault = &radeon_ttm_fault; |
959 | } |
960 | vma->vm_ops = &radeon_ttm_vm_ops; |
961 | return 0; |
962 | } |
963 | |
964 | #endif /* __NetBSD__ */ |
965 | |
966 | #if defined(CONFIG_DEBUG_FS) |
967 | |
968 | static int radeon_mm_dump_table(struct seq_file *m, void *data) |
969 | { |
970 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
971 | unsigned ttm_pl = *(int *)node->info_ent->data; |
972 | struct drm_device *dev = node->minor->dev; |
973 | struct radeon_device *rdev = dev->dev_private; |
974 | struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv; |
975 | int ret; |
976 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; |
977 | |
978 | spin_lock(&glob->lru_lock); |
979 | ret = drm_mm_dump_table(m, mm); |
980 | spin_unlock(&glob->lru_lock); |
981 | return ret; |
982 | } |
983 | |
984 | static int ttm_pl_vram = TTM_PL_VRAM; |
985 | static int ttm_pl_tt = TTM_PL_TT; |
986 | |
987 | static struct drm_info_list radeon_ttm_debugfs_list[] = { |
988 | {"radeon_vram_mm" , radeon_mm_dump_table, 0, &ttm_pl_vram}, |
989 | {"radeon_gtt_mm" , radeon_mm_dump_table, 0, &ttm_pl_tt}, |
990 | {"ttm_page_pool" , ttm_page_alloc_debugfs, 0, NULL}, |
991 | #ifdef CONFIG_SWIOTLB |
992 | {"ttm_dma_page_pool" , ttm_dma_page_alloc_debugfs, 0, NULL} |
993 | #endif |
994 | }; |
995 | |
996 | static int radeon_ttm_vram_open(struct inode *inode, struct file *filep) |
997 | { |
998 | struct radeon_device *rdev = inode->i_private; |
999 | i_size_write(inode, rdev->mc.mc_vram_size); |
1000 | filep->private_data = inode->i_private; |
1001 | return 0; |
1002 | } |
1003 | |
1004 | static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf, |
1005 | size_t size, loff_t *pos) |
1006 | { |
1007 | struct radeon_device *rdev = f->private_data; |
1008 | ssize_t result = 0; |
1009 | int r; |
1010 | |
1011 | if (size & 0x3 || *pos & 0x3) |
1012 | return -EINVAL; |
1013 | |
1014 | while (size) { |
1015 | unsigned long flags; |
1016 | uint32_t value; |
1017 | |
1018 | if (*pos >= rdev->mc.mc_vram_size) |
1019 | return result; |
1020 | |
1021 | spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
1022 | WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000); |
1023 | if (rdev->family >= CHIP_CEDAR) |
1024 | WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31); |
1025 | value = RREG32(RADEON_MM_DATA); |
1026 | spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
1027 | |
1028 | r = put_user(value, (uint32_t *)buf); |
1029 | if (r) |
1030 | return r; |
1031 | |
1032 | result += 4; |
1033 | buf += 4; |
1034 | *pos += 4; |
1035 | size -= 4; |
1036 | } |
1037 | |
1038 | return result; |
1039 | } |
1040 | |
1041 | static const struct file_operations radeon_ttm_vram_fops = { |
1042 | .owner = THIS_MODULE, |
1043 | .open = radeon_ttm_vram_open, |
1044 | .read = radeon_ttm_vram_read, |
1045 | .llseek = default_llseek |
1046 | }; |
1047 | |
1048 | static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep) |
1049 | { |
1050 | struct radeon_device *rdev = inode->i_private; |
1051 | i_size_write(inode, rdev->mc.gtt_size); |
1052 | filep->private_data = inode->i_private; |
1053 | return 0; |
1054 | } |
1055 | |
1056 | static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf, |
1057 | size_t size, loff_t *pos) |
1058 | { |
1059 | struct radeon_device *rdev = f->private_data; |
1060 | ssize_t result = 0; |
1061 | int r; |
1062 | |
1063 | while (size) { |
1064 | loff_t p = *pos / PAGE_SIZE; |
1065 | unsigned off = *pos & ~PAGE_MASK; |
1066 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); |
1067 | struct page *page; |
1068 | void *ptr; |
1069 | |
1070 | if (p >= rdev->gart.num_cpu_pages) |
1071 | return result; |
1072 | |
1073 | page = rdev->gart.pages[p]; |
1074 | if (page) { |
1075 | ptr = kmap(page); |
1076 | ptr += off; |
1077 | |
1078 | r = copy_to_user(buf, ptr, cur_size); |
1079 | kunmap(rdev->gart.pages[p]); |
1080 | } else |
1081 | r = clear_user(buf, cur_size); |
1082 | |
1083 | if (r) |
1084 | return -EFAULT; |
1085 | |
1086 | result += cur_size; |
1087 | buf += cur_size; |
1088 | *pos += cur_size; |
1089 | size -= cur_size; |
1090 | } |
1091 | |
1092 | return result; |
1093 | } |
1094 | |
1095 | static const struct file_operations radeon_ttm_gtt_fops = { |
1096 | .owner = THIS_MODULE, |
1097 | .open = radeon_ttm_gtt_open, |
1098 | .read = radeon_ttm_gtt_read, |
1099 | .llseek = default_llseek |
1100 | }; |
1101 | |
1102 | #endif |
1103 | |
1104 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev) |
1105 | { |
1106 | #if defined(CONFIG_DEBUG_FS) |
1107 | unsigned count; |
1108 | |
1109 | struct drm_minor *minor = rdev->ddev->primary; |
1110 | struct dentry *ent, *root = minor->debugfs_root; |
1111 | |
1112 | ent = debugfs_create_file("radeon_vram" , S_IFREG | S_IRUGO, root, |
1113 | rdev, &radeon_ttm_vram_fops); |
1114 | if (IS_ERR(ent)) |
1115 | return PTR_ERR(ent); |
1116 | rdev->mman.vram = ent; |
1117 | |
1118 | ent = debugfs_create_file("radeon_gtt" , S_IFREG | S_IRUGO, root, |
1119 | rdev, &radeon_ttm_gtt_fops); |
1120 | if (IS_ERR(ent)) |
1121 | return PTR_ERR(ent); |
1122 | rdev->mman.gtt = ent; |
1123 | |
1124 | count = ARRAY_SIZE(radeon_ttm_debugfs_list); |
1125 | |
1126 | #ifdef CONFIG_SWIOTLB |
1127 | if (!swiotlb_nr_tbl()) |
1128 | --count; |
1129 | #endif |
1130 | |
1131 | return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count); |
1132 | #else |
1133 | |
1134 | return 0; |
1135 | #endif |
1136 | } |
1137 | |
1138 | static void radeon_ttm_debugfs_fini(struct radeon_device *rdev) |
1139 | { |
1140 | #if defined(CONFIG_DEBUG_FS) |
1141 | |
1142 | debugfs_remove(rdev->mman.vram); |
1143 | rdev->mman.vram = NULL; |
1144 | |
1145 | debugfs_remove(rdev->mman.gtt); |
1146 | rdev->mman.gtt = NULL; |
1147 | #endif |
1148 | } |
1149 | |