1 | /************************************************************************** |
2 | * |
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
26 | **************************************************************************/ |
27 | /* |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ |
30 | |
31 | #define pr_fmt(fmt) "[TTM] " fmt |
32 | |
33 | #ifdef __NetBSD__ |
34 | #include <sys/types.h> |
35 | #include <uvm/uvm_extern.h> |
36 | #include <uvm/uvm_object.h> |
37 | #endif |
38 | |
39 | #include <drm/ttm/ttm_module.h> |
40 | #include <drm/ttm/ttm_bo_driver.h> |
41 | #include <drm/ttm/ttm_placement.h> |
42 | #include <linux/jiffies.h> |
43 | #include <linux/slab.h> |
44 | #include <linux/sched.h> |
45 | #include <linux/mm.h> |
46 | #include <linux/file.h> |
47 | #include <linux/module.h> |
48 | #include <linux/atomic.h> |
49 | #include <linux/printk.h> |
50 | #include <linux/export.h> |
51 | |
52 | #define TTM_ASSERT_LOCKED(param) |
53 | #define TTM_DEBUG(fmt, arg...) do {} while (0) |
54 | #define TTM_BO_HASH_ORDER 13 |
55 | |
56 | static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); |
57 | #ifndef __NetBSD__ |
58 | static void ttm_bo_global_kobj_release(struct kobject *kobj); |
59 | #endif |
60 | |
61 | #ifndef __NetBSD__ /* XXX sysfs */ |
62 | static struct attribute ttm_bo_count = { |
63 | .name = "bo_count" , |
64 | .mode = S_IRUGO |
65 | }; |
66 | #endif |
67 | |
68 | static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) |
69 | { |
70 | int i; |
71 | |
72 | for (i = 0; i <= TTM_PL_PRIV5; i++) |
73 | if (flags & (1 << i)) { |
74 | *mem_type = i; |
75 | return 0; |
76 | } |
77 | return -EINVAL; |
78 | } |
79 | |
80 | static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) |
81 | { |
82 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
83 | |
84 | pr_err(" has_type: %d\n" , man->has_type); |
85 | pr_err(" use_type: %d\n" , man->use_type); |
86 | pr_err(" flags: 0x%08X\n" , man->flags); |
87 | pr_err(" gpu_offset: 0x%08lX\n" , man->gpu_offset); |
88 | pr_err(" size: %" PRIu64"\n" , man->size); |
89 | pr_err(" available_caching: 0x%08X\n" , man->available_caching); |
90 | pr_err(" default_caching: 0x%08X\n" , man->default_caching); |
91 | if (mem_type != TTM_PL_SYSTEM) |
92 | (*man->func->debug)(man, TTM_PFX); |
93 | } |
94 | |
95 | static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, |
96 | struct ttm_placement *placement) |
97 | { |
98 | int i, ret, mem_type; |
99 | |
100 | pr_err("No space for %p (%lu pages, %luK, %luM)\n" , |
101 | bo, bo->mem.num_pages, bo->mem.size >> 10, |
102 | bo->mem.size >> 20); |
103 | for (i = 0; i < placement->num_placement; i++) { |
104 | ret = ttm_mem_type_from_flags(placement->placement[i], |
105 | &mem_type); |
106 | if (ret) |
107 | return; |
108 | pr_err(" placement[%d]=0x%08X (%d)\n" , |
109 | i, placement->placement[i], mem_type); |
110 | ttm_mem_type_debug(bo->bdev, mem_type); |
111 | } |
112 | } |
113 | |
114 | #ifndef __NetBSD__ /* XXX sysfs */ |
115 | static ssize_t ttm_bo_global_show(struct kobject *kobj, |
116 | struct attribute *attr, |
117 | char *buffer) |
118 | { |
119 | struct ttm_bo_global *glob = |
120 | container_of(kobj, struct ttm_bo_global, kobj); |
121 | |
122 | return snprintf(buffer, PAGE_SIZE, "%lu\n" , |
123 | (unsigned long) atomic_read(&glob->bo_count)); |
124 | } |
125 | |
126 | static struct attribute *ttm_bo_global_attrs[] = { |
127 | &ttm_bo_count, |
128 | NULL |
129 | }; |
130 | |
131 | static const struct sysfs_ops ttm_bo_global_ops = { |
132 | .show = &ttm_bo_global_show |
133 | }; |
134 | |
135 | static struct kobj_type ttm_bo_glob_kobj_type = { |
136 | .release = &ttm_bo_global_kobj_release, |
137 | .sysfs_ops = &ttm_bo_global_ops, |
138 | .default_attrs = ttm_bo_global_attrs |
139 | }; |
140 | #endif /* __NetBSD__ */ |
141 | |
142 | |
143 | static inline uint32_t ttm_bo_type_flags(unsigned type) |
144 | { |
145 | return 1 << (type); |
146 | } |
147 | |
148 | static void ttm_bo_release_list(struct kref *list_kref) |
149 | { |
150 | struct ttm_buffer_object *bo = |
151 | container_of(list_kref, struct ttm_buffer_object, list_kref); |
152 | struct ttm_bo_device *bdev = bo->bdev; |
153 | size_t acc_size = bo->acc_size; |
154 | |
155 | BUG_ON(kref_referenced_p(&bo->list_kref)); |
156 | BUG_ON(kref_referenced_p(&bo->kref)); |
157 | BUG_ON(atomic_read(&bo->cpu_writers)); |
158 | BUG_ON(bo->sync_obj != NULL); |
159 | BUG_ON(bo->mem.mm_node != NULL); |
160 | BUG_ON(!list_empty(&bo->lru)); |
161 | BUG_ON(!list_empty(&bo->ddestroy)); |
162 | |
163 | if (bo->ttm) |
164 | ttm_tt_destroy(bo->ttm); |
165 | atomic_dec(&bo->glob->bo_count); |
166 | if (bo->resv == &bo->ttm_resv) |
167 | reservation_object_fini(&bo->ttm_resv); |
168 | #ifdef __NetBSD__ |
169 | linux_mutex_destroy(&bo->wu_mutex); |
170 | #else |
171 | mutex_destroy(&bo->wu_mutex); |
172 | #endif |
173 | if (bo->destroy) |
174 | bo->destroy(bo); |
175 | else { |
176 | kfree(bo); |
177 | } |
178 | ttm_mem_global_free(bdev->glob->mem_glob, acc_size); |
179 | } |
180 | |
181 | void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) |
182 | { |
183 | struct ttm_bo_device *bdev = bo->bdev; |
184 | struct ttm_mem_type_manager *man; |
185 | |
186 | lockdep_assert_held(&bo->resv->lock.base); |
187 | |
188 | if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
189 | |
190 | BUG_ON(!list_empty(&bo->lru)); |
191 | |
192 | man = &bdev->man[bo->mem.mem_type]; |
193 | list_add_tail(&bo->lru, &man->lru); |
194 | kref_get(&bo->list_kref); |
195 | |
196 | if (bo->ttm != NULL) { |
197 | list_add_tail(&bo->swap, &bo->glob->swap_lru); |
198 | kref_get(&bo->list_kref); |
199 | } |
200 | } |
201 | } |
202 | EXPORT_SYMBOL(ttm_bo_add_to_lru); |
203 | |
204 | int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) |
205 | { |
206 | int put_count = 0; |
207 | |
208 | if (!list_empty(&bo->swap)) { |
209 | list_del_init(&bo->swap); |
210 | ++put_count; |
211 | } |
212 | if (!list_empty(&bo->lru)) { |
213 | list_del_init(&bo->lru); |
214 | ++put_count; |
215 | } |
216 | |
217 | /* |
218 | * TODO: Add a driver hook to delete from |
219 | * driver-specific LRU's here. |
220 | */ |
221 | |
222 | return put_count; |
223 | } |
224 | |
225 | static void ttm_bo_ref_bug(struct kref *list_kref) |
226 | { |
227 | BUG(); |
228 | } |
229 | |
230 | void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, |
231 | bool never_free) |
232 | { |
233 | kref_sub(&bo->list_kref, count, |
234 | (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); |
235 | } |
236 | |
237 | void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) |
238 | { |
239 | int put_count; |
240 | |
241 | spin_lock(&bo->glob->lru_lock); |
242 | put_count = ttm_bo_del_from_lru(bo); |
243 | spin_unlock(&bo->glob->lru_lock); |
244 | ttm_bo_list_ref_sub(bo, put_count, true); |
245 | } |
246 | EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); |
247 | |
248 | /* |
249 | * Call bo->mutex locked. |
250 | */ |
251 | static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) |
252 | { |
253 | struct ttm_bo_device *bdev = bo->bdev; |
254 | struct ttm_bo_global *glob = bo->glob; |
255 | int ret = 0; |
256 | uint32_t page_flags = 0; |
257 | |
258 | TTM_ASSERT_LOCKED(&bo->mutex); |
259 | bo->ttm = NULL; |
260 | |
261 | if (bdev->need_dma32) |
262 | page_flags |= TTM_PAGE_FLAG_DMA32; |
263 | |
264 | switch (bo->type) { |
265 | case ttm_bo_type_device: |
266 | if (zero_alloc) |
267 | page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; |
268 | case ttm_bo_type_kernel: |
269 | bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
270 | page_flags, glob->dummy_read_page); |
271 | if (unlikely(bo->ttm == NULL)) |
272 | ret = -ENOMEM; |
273 | break; |
274 | case ttm_bo_type_sg: |
275 | bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
276 | page_flags | TTM_PAGE_FLAG_SG, |
277 | glob->dummy_read_page); |
278 | if (unlikely(bo->ttm == NULL)) { |
279 | ret = -ENOMEM; |
280 | break; |
281 | } |
282 | bo->ttm->sg = bo->sg; |
283 | break; |
284 | default: |
285 | pr_err("Illegal buffer object type\n" ); |
286 | ret = -EINVAL; |
287 | break; |
288 | } |
289 | |
290 | #ifdef __NetBSD__ |
291 | if (ret) |
292 | return ret; |
293 | |
294 | /* |
295 | * XXX This is gross. We ought to do it the other way around: |
296 | * set the uao to have the main uvm object's lock. However, |
297 | * uvm_obj_setlock is not safe on uvm_aobjs. |
298 | */ |
299 | mutex_obj_hold(bo->ttm->swap_storage->vmobjlock); |
300 | uvm_obj_setlock(&bo->uvmobj, bo->ttm->swap_storage->vmobjlock); |
301 | return 0; |
302 | #else |
303 | return ret; |
304 | #endif |
305 | } |
306 | |
307 | static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, |
308 | struct ttm_mem_reg *mem, |
309 | bool evict, bool interruptible, |
310 | bool no_wait_gpu) |
311 | { |
312 | struct ttm_bo_device *bdev = bo->bdev; |
313 | bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); |
314 | bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); |
315 | struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; |
316 | struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; |
317 | int ret = 0; |
318 | |
319 | if (old_is_pci || new_is_pci || |
320 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { |
321 | ret = ttm_mem_io_lock(old_man, true); |
322 | if (unlikely(ret != 0)) |
323 | goto out_err; |
324 | ttm_bo_unmap_virtual_locked(bo); |
325 | ttm_mem_io_unlock(old_man); |
326 | } |
327 | |
328 | /* |
329 | * Create and bind a ttm if required. |
330 | */ |
331 | |
332 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
333 | if (bo->ttm == NULL) { |
334 | bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); |
335 | ret = ttm_bo_add_ttm(bo, zero); |
336 | if (ret) |
337 | goto out_err; |
338 | } |
339 | |
340 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); |
341 | if (ret) |
342 | goto out_err; |
343 | |
344 | if (mem->mem_type != TTM_PL_SYSTEM) { |
345 | ret = ttm_tt_bind(bo->ttm, mem); |
346 | if (ret) |
347 | goto out_err; |
348 | } |
349 | |
350 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
351 | if (bdev->driver->move_notify) |
352 | bdev->driver->move_notify(bo, mem); |
353 | bo->mem = *mem; |
354 | mem->mm_node = NULL; |
355 | goto moved; |
356 | } |
357 | } |
358 | |
359 | if (bdev->driver->move_notify) |
360 | bdev->driver->move_notify(bo, mem); |
361 | |
362 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
363 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
364 | ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); |
365 | else if (bdev->driver->move) |
366 | ret = bdev->driver->move(bo, evict, interruptible, |
367 | no_wait_gpu, mem); |
368 | else |
369 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); |
370 | |
371 | if (ret) { |
372 | if (bdev->driver->move_notify) { |
373 | struct ttm_mem_reg tmp_mem = *mem; |
374 | *mem = bo->mem; |
375 | bo->mem = tmp_mem; |
376 | bdev->driver->move_notify(bo, mem); |
377 | bo->mem = *mem; |
378 | *mem = tmp_mem; |
379 | } |
380 | |
381 | goto out_err; |
382 | } |
383 | |
384 | moved: |
385 | if (bo->evicted) { |
386 | if (bdev->driver->invalidate_caches) { |
387 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); |
388 | if (ret) |
389 | pr_err("Can not flush read caches\n" ); |
390 | } |
391 | bo->evicted = false; |
392 | } |
393 | |
394 | if (bo->mem.mm_node) { |
395 | bo->offset = (bo->mem.start << PAGE_SHIFT) + |
396 | bdev->man[bo->mem.mem_type].gpu_offset; |
397 | bo->cur_placement = bo->mem.placement; |
398 | } else |
399 | bo->offset = 0; |
400 | |
401 | return 0; |
402 | |
403 | out_err: |
404 | new_man = &bdev->man[bo->mem.mem_type]; |
405 | if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { |
406 | ttm_tt_unbind(bo->ttm); |
407 | ttm_tt_destroy(bo->ttm); |
408 | bo->ttm = NULL; |
409 | } |
410 | |
411 | return ret; |
412 | } |
413 | |
414 | /** |
415 | * Call bo::reserved. |
416 | * Will release GPU memory type usage on destruction. |
417 | * This is the place to put in driver specific hooks to release |
418 | * driver private resources. |
419 | * Will release the bo::reserved lock. |
420 | */ |
421 | |
422 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) |
423 | { |
424 | if (bo->bdev->driver->move_notify) |
425 | bo->bdev->driver->move_notify(bo, NULL); |
426 | |
427 | if (bo->ttm) { |
428 | ttm_tt_unbind(bo->ttm); |
429 | ttm_tt_destroy(bo->ttm); |
430 | bo->ttm = NULL; |
431 | } |
432 | ttm_bo_mem_put(bo, &bo->mem); |
433 | |
434 | ww_mutex_unlock (&bo->resv->lock); |
435 | } |
436 | |
437 | static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) |
438 | { |
439 | struct ttm_bo_device *bdev = bo->bdev; |
440 | struct ttm_bo_global *glob = bo->glob; |
441 | struct ttm_bo_driver *driver = bdev->driver; |
442 | void *sync_obj = NULL; |
443 | int put_count; |
444 | int ret; |
445 | |
446 | spin_lock(&glob->lru_lock); |
447 | ret = __ttm_bo_reserve(bo, false, true, false, 0); |
448 | |
449 | spin_lock(&bdev->fence_lock); |
450 | (void) ttm_bo_wait(bo, false, false, true); |
451 | if (!ret && !bo->sync_obj) { |
452 | spin_unlock(&bdev->fence_lock); |
453 | put_count = ttm_bo_del_from_lru(bo); |
454 | |
455 | spin_unlock(&glob->lru_lock); |
456 | ttm_bo_cleanup_memtype_use(bo); |
457 | |
458 | ttm_bo_list_ref_sub(bo, put_count, true); |
459 | |
460 | return; |
461 | } |
462 | if (bo->sync_obj) |
463 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
464 | spin_unlock(&bdev->fence_lock); |
465 | |
466 | if (!ret) { |
467 | |
468 | /* |
469 | * Make NO_EVICT bos immediately available to |
470 | * shrinkers, now that they are queued for |
471 | * destruction. |
472 | */ |
473 | if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { |
474 | bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; |
475 | ttm_bo_add_to_lru(bo); |
476 | } |
477 | |
478 | __ttm_bo_unreserve(bo); |
479 | } |
480 | |
481 | kref_get(&bo->list_kref); |
482 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
483 | spin_unlock(&glob->lru_lock); |
484 | |
485 | if (sync_obj) { |
486 | driver->sync_obj_flush(sync_obj); |
487 | driver->sync_obj_unref(&sync_obj); |
488 | } |
489 | schedule_delayed_work(&bdev->wq, |
490 | ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); |
491 | } |
492 | |
493 | /** |
494 | * function ttm_bo_cleanup_refs_and_unlock |
495 | * If bo idle, remove from delayed- and lru lists, and unref. |
496 | * If not idle, do nothing. |
497 | * |
498 | * Must be called with lru_lock and reservation held, this function |
499 | * will drop both before returning. |
500 | * |
501 | * @interruptible Any sleeps should occur interruptibly. |
502 | * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. |
503 | */ |
504 | |
505 | static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, |
506 | bool interruptible, |
507 | bool no_wait_gpu) |
508 | { |
509 | struct ttm_bo_device *bdev = bo->bdev; |
510 | struct ttm_bo_driver *driver = bdev->driver; |
511 | struct ttm_bo_global *glob = bo->glob; |
512 | int put_count; |
513 | int ret; |
514 | |
515 | spin_lock(&bdev->fence_lock); |
516 | ret = ttm_bo_wait(bo, false, false, true); |
517 | |
518 | if (ret && !no_wait_gpu) { |
519 | void *sync_obj; |
520 | |
521 | /* |
522 | * Take a reference to the fence and unreserve, |
523 | * at this point the buffer should be dead, so |
524 | * no new sync objects can be attached. |
525 | */ |
526 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
527 | spin_unlock(&bdev->fence_lock); |
528 | |
529 | __ttm_bo_unreserve(bo); |
530 | spin_unlock(&glob->lru_lock); |
531 | |
532 | ret = driver->sync_obj_wait(sync_obj, false, interruptible); |
533 | driver->sync_obj_unref(&sync_obj); |
534 | if (ret) |
535 | return ret; |
536 | |
537 | /* |
538 | * remove sync_obj with ttm_bo_wait, the wait should be |
539 | * finished, and no new wait object should have been added. |
540 | */ |
541 | spin_lock(&bdev->fence_lock); |
542 | ret = ttm_bo_wait(bo, false, false, true); |
543 | WARN_ON(ret); |
544 | spin_unlock(&bdev->fence_lock); |
545 | if (ret) |
546 | return ret; |
547 | |
548 | spin_lock(&glob->lru_lock); |
549 | ret = __ttm_bo_reserve(bo, false, true, false, 0); |
550 | |
551 | /* |
552 | * We raced, and lost, someone else holds the reservation now, |
553 | * and is probably busy in ttm_bo_cleanup_memtype_use. |
554 | * |
555 | * Even if it's not the case, because we finished waiting any |
556 | * delayed destruction would succeed, so just return success |
557 | * here. |
558 | */ |
559 | if (ret) { |
560 | spin_unlock(&glob->lru_lock); |
561 | return 0; |
562 | } |
563 | } else |
564 | spin_unlock(&bdev->fence_lock); |
565 | |
566 | if (ret || unlikely(list_empty(&bo->ddestroy))) { |
567 | __ttm_bo_unreserve(bo); |
568 | spin_unlock(&glob->lru_lock); |
569 | return ret; |
570 | } |
571 | |
572 | put_count = ttm_bo_del_from_lru(bo); |
573 | list_del_init(&bo->ddestroy); |
574 | ++put_count; |
575 | |
576 | spin_unlock(&glob->lru_lock); |
577 | ttm_bo_cleanup_memtype_use(bo); |
578 | |
579 | ttm_bo_list_ref_sub(bo, put_count, true); |
580 | |
581 | return 0; |
582 | } |
583 | |
584 | /** |
585 | * Traverse the delayed list, and call ttm_bo_cleanup_refs on all |
586 | * encountered buffers. |
587 | */ |
588 | |
589 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) |
590 | { |
591 | struct ttm_bo_global *glob = bdev->glob; |
592 | struct ttm_buffer_object *entry = NULL; |
593 | int ret = 0; |
594 | |
595 | spin_lock(&glob->lru_lock); |
596 | if (list_empty(&bdev->ddestroy)) |
597 | goto out_unlock; |
598 | |
599 | entry = list_first_entry(&bdev->ddestroy, |
600 | struct ttm_buffer_object, ddestroy); |
601 | kref_get(&entry->list_kref); |
602 | |
603 | for (;;) { |
604 | struct ttm_buffer_object *nentry = NULL; |
605 | |
606 | if (entry->ddestroy.next != &bdev->ddestroy) { |
607 | nentry = list_first_entry(&entry->ddestroy, |
608 | struct ttm_buffer_object, ddestroy); |
609 | kref_get(&nentry->list_kref); |
610 | } |
611 | |
612 | ret = __ttm_bo_reserve(entry, false, true, false, 0); |
613 | if (remove_all && ret) { |
614 | spin_unlock(&glob->lru_lock); |
615 | ret = __ttm_bo_reserve(entry, false, false, |
616 | false, 0); |
617 | spin_lock(&glob->lru_lock); |
618 | } |
619 | |
620 | if (!ret) |
621 | ret = ttm_bo_cleanup_refs_and_unlock(entry, false, |
622 | !remove_all); |
623 | else |
624 | spin_unlock(&glob->lru_lock); |
625 | |
626 | kref_put(&entry->list_kref, ttm_bo_release_list); |
627 | entry = nentry; |
628 | |
629 | if (ret || !entry) |
630 | goto out; |
631 | |
632 | spin_lock(&glob->lru_lock); |
633 | if (list_empty(&entry->ddestroy)) |
634 | break; |
635 | } |
636 | |
637 | out_unlock: |
638 | spin_unlock(&glob->lru_lock); |
639 | out: |
640 | if (entry) |
641 | kref_put(&entry->list_kref, ttm_bo_release_list); |
642 | return ret; |
643 | } |
644 | |
645 | static void ttm_bo_delayed_workqueue(struct work_struct *work) |
646 | { |
647 | struct ttm_bo_device *bdev = |
648 | container_of(work, struct ttm_bo_device, wq.work); |
649 | |
650 | if (ttm_bo_delayed_delete(bdev, false)) { |
651 | schedule_delayed_work(&bdev->wq, |
652 | ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); |
653 | } |
654 | } |
655 | |
656 | static void ttm_bo_release(struct kref *kref) |
657 | { |
658 | struct ttm_buffer_object *bo = |
659 | container_of(kref, struct ttm_buffer_object, kref); |
660 | struct ttm_bo_device *bdev = bo->bdev; |
661 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
662 | |
663 | #ifdef __NetBSD__ |
664 | uvm_obj_destroy(&bo->uvmobj, true); |
665 | #endif |
666 | drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); |
667 | #ifdef __NetBSD__ |
668 | drm_vma_node_destroy(&bo->vma_node); |
669 | #endif |
670 | ttm_mem_io_lock(man, false); |
671 | ttm_mem_io_free_vm(bo); |
672 | ttm_mem_io_unlock(man); |
673 | ttm_bo_cleanup_refs_or_queue(bo); |
674 | kref_put(&bo->list_kref, ttm_bo_release_list); |
675 | } |
676 | |
677 | void ttm_bo_unref(struct ttm_buffer_object **p_bo) |
678 | { |
679 | struct ttm_buffer_object *bo = *p_bo; |
680 | |
681 | *p_bo = NULL; |
682 | kref_put(&bo->kref, ttm_bo_release); |
683 | } |
684 | EXPORT_SYMBOL(ttm_bo_unref); |
685 | |
686 | int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) |
687 | { |
688 | return cancel_delayed_work_sync(&bdev->wq); |
689 | } |
690 | EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue); |
691 | |
692 | void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) |
693 | { |
694 | if (resched) |
695 | schedule_delayed_work(&bdev->wq, |
696 | ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); |
697 | } |
698 | EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); |
699 | |
700 | static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, |
701 | bool no_wait_gpu) |
702 | { |
703 | struct ttm_bo_device *bdev = bo->bdev; |
704 | struct ttm_mem_reg evict_mem; |
705 | struct ttm_placement placement; |
706 | int ret = 0; |
707 | |
708 | spin_lock(&bdev->fence_lock); |
709 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
710 | spin_unlock(&bdev->fence_lock); |
711 | |
712 | if (unlikely(ret != 0)) { |
713 | if (ret != -ERESTARTSYS) { |
714 | pr_err("Failed to expire sync object before buffer eviction\n" ); |
715 | } |
716 | goto out; |
717 | } |
718 | |
719 | lockdep_assert_held(&bo->resv->lock.base); |
720 | |
721 | evict_mem = bo->mem; |
722 | evict_mem.mm_node = NULL; |
723 | evict_mem.bus.io_reserved_vm = false; |
724 | evict_mem.bus.io_reserved_count = 0; |
725 | |
726 | placement.fpfn = 0; |
727 | placement.lpfn = 0; |
728 | placement.num_placement = 0; |
729 | placement.num_busy_placement = 0; |
730 | bdev->driver->evict_flags(bo, &placement); |
731 | ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, |
732 | no_wait_gpu); |
733 | if (ret) { |
734 | if (ret != -ERESTARTSYS) { |
735 | pr_err("Failed to find memory space for buffer 0x%p eviction\n" , |
736 | bo); |
737 | ttm_bo_mem_space_debug(bo, &placement); |
738 | } |
739 | goto out; |
740 | } |
741 | |
742 | ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, |
743 | no_wait_gpu); |
744 | if (ret) { |
745 | if (ret != -ERESTARTSYS) |
746 | pr_err("Buffer eviction failed\n" ); |
747 | ttm_bo_mem_put(bo, &evict_mem); |
748 | goto out; |
749 | } |
750 | bo->evicted = true; |
751 | out: |
752 | return ret; |
753 | } |
754 | |
755 | static int ttm_mem_evict_first(struct ttm_bo_device *bdev, |
756 | uint32_t mem_type, |
757 | bool interruptible, |
758 | bool no_wait_gpu) |
759 | { |
760 | struct ttm_bo_global *glob = bdev->glob; |
761 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
762 | struct ttm_buffer_object *bo; |
763 | int ret = -EBUSY, put_count; |
764 | |
765 | spin_lock(&glob->lru_lock); |
766 | list_for_each_entry(bo, &man->lru, lru) { |
767 | ret = __ttm_bo_reserve(bo, false, true, false, 0); |
768 | if (!ret) |
769 | break; |
770 | } |
771 | |
772 | if (ret) { |
773 | spin_unlock(&glob->lru_lock); |
774 | return ret; |
775 | } |
776 | |
777 | kref_get(&bo->list_kref); |
778 | |
779 | if (!list_empty(&bo->ddestroy)) { |
780 | ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, |
781 | no_wait_gpu); |
782 | kref_put(&bo->list_kref, ttm_bo_release_list); |
783 | return ret; |
784 | } |
785 | |
786 | put_count = ttm_bo_del_from_lru(bo); |
787 | spin_unlock(&glob->lru_lock); |
788 | |
789 | BUG_ON(ret != 0); |
790 | |
791 | ttm_bo_list_ref_sub(bo, put_count, true); |
792 | |
793 | ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); |
794 | ttm_bo_unreserve(bo); |
795 | |
796 | kref_put(&bo->list_kref, ttm_bo_release_list); |
797 | return ret; |
798 | } |
799 | |
800 | void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) |
801 | { |
802 | struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; |
803 | |
804 | if (mem->mm_node) |
805 | (*man->func->put_node)(man, mem); |
806 | } |
807 | EXPORT_SYMBOL(ttm_bo_mem_put); |
808 | |
809 | /** |
810 | * Repeatedly evict memory from the LRU for @mem_type until we create enough |
811 | * space, or we've evicted everything and there isn't enough space. |
812 | */ |
813 | static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, |
814 | uint32_t mem_type, |
815 | struct ttm_placement *placement, |
816 | struct ttm_mem_reg *mem, |
817 | bool interruptible, |
818 | bool no_wait_gpu) |
819 | { |
820 | struct ttm_bo_device *bdev = bo->bdev; |
821 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
822 | int ret; |
823 | |
824 | do { |
825 | ret = (*man->func->get_node)(man, bo, placement, mem); |
826 | if (unlikely(ret != 0)) |
827 | return ret; |
828 | if (mem->mm_node) |
829 | break; |
830 | ret = ttm_mem_evict_first(bdev, mem_type, |
831 | interruptible, no_wait_gpu); |
832 | if (unlikely(ret != 0)) |
833 | return ret; |
834 | } while (1); |
835 | if (mem->mm_node == NULL) |
836 | return -ENOMEM; |
837 | mem->mem_type = mem_type; |
838 | return 0; |
839 | } |
840 | |
841 | static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, |
842 | uint32_t cur_placement, |
843 | uint32_t proposed_placement) |
844 | { |
845 | uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; |
846 | uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; |
847 | |
848 | /** |
849 | * Keep current caching if possible. |
850 | */ |
851 | |
852 | if ((cur_placement & caching) != 0) |
853 | result |= (cur_placement & caching); |
854 | else if ((man->default_caching & caching) != 0) |
855 | result |= man->default_caching; |
856 | else if ((TTM_PL_FLAG_CACHED & caching) != 0) |
857 | result |= TTM_PL_FLAG_CACHED; |
858 | else if ((TTM_PL_FLAG_WC & caching) != 0) |
859 | result |= TTM_PL_FLAG_WC; |
860 | else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) |
861 | result |= TTM_PL_FLAG_UNCACHED; |
862 | |
863 | return result; |
864 | } |
865 | |
866 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
867 | uint32_t mem_type, |
868 | uint32_t proposed_placement, |
869 | uint32_t *masked_placement) |
870 | { |
871 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); |
872 | |
873 | if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) |
874 | return false; |
875 | |
876 | if ((proposed_placement & man->available_caching) == 0) |
877 | return false; |
878 | |
879 | cur_flags |= (proposed_placement & man->available_caching); |
880 | |
881 | *masked_placement = cur_flags; |
882 | return true; |
883 | } |
884 | |
885 | /** |
886 | * Creates space for memory region @mem according to its type. |
887 | * |
888 | * This function first searches for free space in compatible memory types in |
889 | * the priority order defined by the driver. If free space isn't found, then |
890 | * ttm_bo_mem_force_space is attempted in priority order to evict and find |
891 | * space. |
892 | */ |
893 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
894 | struct ttm_placement *placement, |
895 | struct ttm_mem_reg *mem, |
896 | bool interruptible, |
897 | bool no_wait_gpu) |
898 | { |
899 | struct ttm_bo_device *bdev = bo->bdev; |
900 | struct ttm_mem_type_manager *man; |
901 | uint32_t mem_type = TTM_PL_SYSTEM; |
902 | uint32_t cur_flags = 0; |
903 | bool type_found = false; |
904 | bool type_ok = false; |
905 | bool has_erestartsys = false; |
906 | int i, ret; |
907 | |
908 | mem->mm_node = NULL; |
909 | for (i = 0; i < placement->num_placement; ++i) { |
910 | ret = ttm_mem_type_from_flags(placement->placement[i], |
911 | &mem_type); |
912 | if (ret) |
913 | return ret; |
914 | man = &bdev->man[mem_type]; |
915 | |
916 | type_ok = ttm_bo_mt_compatible(man, |
917 | mem_type, |
918 | placement->placement[i], |
919 | &cur_flags); |
920 | |
921 | if (!type_ok) |
922 | continue; |
923 | |
924 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
925 | cur_flags); |
926 | /* |
927 | * Use the access and other non-mapping-related flag bits from |
928 | * the memory placement flags to the current flags |
929 | */ |
930 | ttm_flag_masked(&cur_flags, placement->placement[i], |
931 | ~TTM_PL_MASK_MEMTYPE); |
932 | |
933 | if (mem_type == TTM_PL_SYSTEM) |
934 | break; |
935 | |
936 | if (man->has_type && man->use_type) { |
937 | type_found = true; |
938 | ret = (*man->func->get_node)(man, bo, placement, mem); |
939 | if (unlikely(ret)) |
940 | return ret; |
941 | } |
942 | if (mem->mm_node) |
943 | break; |
944 | } |
945 | |
946 | if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { |
947 | mem->mem_type = mem_type; |
948 | mem->placement = cur_flags; |
949 | return 0; |
950 | } |
951 | |
952 | if (!type_found) |
953 | return -EINVAL; |
954 | |
955 | for (i = 0; i < placement->num_busy_placement; ++i) { |
956 | ret = ttm_mem_type_from_flags(placement->busy_placement[i], |
957 | &mem_type); |
958 | if (ret) |
959 | return ret; |
960 | man = &bdev->man[mem_type]; |
961 | if (!man->has_type) |
962 | continue; |
963 | if (!ttm_bo_mt_compatible(man, |
964 | mem_type, |
965 | placement->busy_placement[i], |
966 | &cur_flags)) |
967 | continue; |
968 | |
969 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
970 | cur_flags); |
971 | /* |
972 | * Use the access and other non-mapping-related flag bits from |
973 | * the memory placement flags to the current flags |
974 | */ |
975 | ttm_flag_masked(&cur_flags, placement->busy_placement[i], |
976 | ~TTM_PL_MASK_MEMTYPE); |
977 | |
978 | |
979 | if (mem_type == TTM_PL_SYSTEM) { |
980 | mem->mem_type = mem_type; |
981 | mem->placement = cur_flags; |
982 | mem->mm_node = NULL; |
983 | return 0; |
984 | } |
985 | |
986 | ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, |
987 | interruptible, no_wait_gpu); |
988 | if (ret == 0 && mem->mm_node) { |
989 | mem->placement = cur_flags; |
990 | return 0; |
991 | } |
992 | if (ret == -ERESTARTSYS) |
993 | has_erestartsys = true; |
994 | } |
995 | ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; |
996 | return ret; |
997 | } |
998 | EXPORT_SYMBOL(ttm_bo_mem_space); |
999 | |
1000 | static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
1001 | struct ttm_placement *placement, |
1002 | bool interruptible, |
1003 | bool no_wait_gpu) |
1004 | { |
1005 | int ret = 0; |
1006 | struct ttm_mem_reg mem; |
1007 | struct ttm_bo_device *bdev = bo->bdev; |
1008 | |
1009 | lockdep_assert_held(&bo->resv->lock.base); |
1010 | |
1011 | /* |
1012 | * FIXME: It's possible to pipeline buffer moves. |
1013 | * Have the driver move function wait for idle when necessary, |
1014 | * instead of doing it here. |
1015 | */ |
1016 | spin_lock(&bdev->fence_lock); |
1017 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
1018 | spin_unlock(&bdev->fence_lock); |
1019 | if (ret) |
1020 | return ret; |
1021 | mem.num_pages = bo->num_pages; |
1022 | mem.size = mem.num_pages << PAGE_SHIFT; |
1023 | mem.page_alignment = bo->mem.page_alignment; |
1024 | mem.bus.is_iomem = false; |
1025 | mem.bus.io_reserved_vm = false; |
1026 | mem.bus.io_reserved_count = 0; |
1027 | /* |
1028 | * Determine where to move the buffer. |
1029 | */ |
1030 | ret = ttm_bo_mem_space(bo, placement, &mem, |
1031 | interruptible, no_wait_gpu); |
1032 | if (ret) |
1033 | goto out_unlock; |
1034 | ret = ttm_bo_handle_move_mem(bo, &mem, false, |
1035 | interruptible, no_wait_gpu); |
1036 | out_unlock: |
1037 | if (ret && mem.mm_node) |
1038 | ttm_bo_mem_put(bo, &mem); |
1039 | return ret; |
1040 | } |
1041 | |
1042 | static bool ttm_bo_mem_compat(struct ttm_placement *placement, |
1043 | struct ttm_mem_reg *mem, |
1044 | uint32_t *new_flags) |
1045 | { |
1046 | int i; |
1047 | |
1048 | if (mem->mm_node && placement->lpfn != 0 && |
1049 | (mem->start < placement->fpfn || |
1050 | mem->start + mem->num_pages > placement->lpfn)) |
1051 | return false; |
1052 | |
1053 | for (i = 0; i < placement->num_placement; i++) { |
1054 | *new_flags = placement->placement[i]; |
1055 | if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
1056 | (*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
1057 | return true; |
1058 | } |
1059 | |
1060 | for (i = 0; i < placement->num_busy_placement; i++) { |
1061 | *new_flags = placement->busy_placement[i]; |
1062 | if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
1063 | (*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
1064 | return true; |
1065 | } |
1066 | |
1067 | return false; |
1068 | } |
1069 | |
1070 | int ttm_bo_validate(struct ttm_buffer_object *bo, |
1071 | struct ttm_placement *placement, |
1072 | bool interruptible, |
1073 | bool no_wait_gpu) |
1074 | { |
1075 | int ret; |
1076 | uint32_t new_flags; |
1077 | |
1078 | lockdep_assert_held(&bo->resv->lock.base); |
1079 | /* Check that range is valid */ |
1080 | if (placement->lpfn || placement->fpfn) |
1081 | if (placement->fpfn > placement->lpfn || |
1082 | (placement->lpfn - placement->fpfn) < bo->num_pages) |
1083 | return -EINVAL; |
1084 | /* |
1085 | * Check whether we need to move buffer. |
1086 | */ |
1087 | if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { |
1088 | ret = ttm_bo_move_buffer(bo, placement, interruptible, |
1089 | no_wait_gpu); |
1090 | if (ret) |
1091 | return ret; |
1092 | } else { |
1093 | /* |
1094 | * Use the access and other non-mapping-related flag bits from |
1095 | * the compatible memory placement flags to the active flags |
1096 | */ |
1097 | ttm_flag_masked(&bo->mem.placement, new_flags, |
1098 | ~TTM_PL_MASK_MEMTYPE); |
1099 | } |
1100 | /* |
1101 | * We might need to add a TTM. |
1102 | */ |
1103 | if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
1104 | ret = ttm_bo_add_ttm(bo, true); |
1105 | if (ret) |
1106 | return ret; |
1107 | } |
1108 | return 0; |
1109 | } |
1110 | EXPORT_SYMBOL(ttm_bo_validate); |
1111 | |
1112 | int ttm_bo_check_placement(struct ttm_buffer_object *bo, |
1113 | struct ttm_placement *placement) |
1114 | { |
1115 | BUG_ON((placement->fpfn || placement->lpfn) && |
1116 | (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); |
1117 | |
1118 | return 0; |
1119 | } |
1120 | |
1121 | int ttm_bo_init(struct ttm_bo_device *bdev, |
1122 | struct ttm_buffer_object *bo, |
1123 | unsigned long size, |
1124 | enum ttm_bo_type type, |
1125 | struct ttm_placement *placement, |
1126 | uint32_t page_alignment, |
1127 | bool interruptible, |
1128 | struct file *persistent_swap_storage, |
1129 | size_t acc_size, |
1130 | struct sg_table *sg, |
1131 | void (*destroy) (struct ttm_buffer_object *)) |
1132 | { |
1133 | int ret = 0; |
1134 | unsigned long num_pages; |
1135 | struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; |
1136 | bool locked; |
1137 | |
1138 | ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); |
1139 | if (ret) { |
1140 | pr_err("Out of kernel memory\n" ); |
1141 | if (destroy) |
1142 | (*destroy)(bo); |
1143 | else |
1144 | kfree(bo); |
1145 | return -ENOMEM; |
1146 | } |
1147 | |
1148 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1149 | if (num_pages == 0) { |
1150 | pr_err("Illegal buffer object size\n" ); |
1151 | if (destroy) |
1152 | (*destroy)(bo); |
1153 | else |
1154 | kfree(bo); |
1155 | ttm_mem_global_free(mem_glob, acc_size); |
1156 | return -EINVAL; |
1157 | } |
1158 | bo->destroy = destroy; |
1159 | |
1160 | kref_init(&bo->kref); |
1161 | kref_init(&bo->list_kref); |
1162 | atomic_set(&bo->cpu_writers, 0); |
1163 | INIT_LIST_HEAD(&bo->lru); |
1164 | INIT_LIST_HEAD(&bo->ddestroy); |
1165 | INIT_LIST_HEAD(&bo->swap); |
1166 | INIT_LIST_HEAD(&bo->io_reserve_lru); |
1167 | #ifdef __NetBSD__ |
1168 | linux_mutex_init(&bo->wu_mutex); |
1169 | #else |
1170 | mutex_init(&bo->wu_mutex); |
1171 | #endif |
1172 | bo->bdev = bdev; |
1173 | bo->glob = bdev->glob; |
1174 | bo->type = type; |
1175 | bo->num_pages = num_pages; |
1176 | bo->mem.size = num_pages << PAGE_SHIFT; |
1177 | bo->mem.mem_type = TTM_PL_SYSTEM; |
1178 | bo->mem.num_pages = bo->num_pages; |
1179 | bo->mem.mm_node = NULL; |
1180 | bo->mem.page_alignment = page_alignment; |
1181 | bo->mem.bus.io_reserved_vm = false; |
1182 | bo->mem.bus.io_reserved_count = 0; |
1183 | bo->priv_flags = 0; |
1184 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
1185 | bo->persistent_swap_storage = persistent_swap_storage; |
1186 | bo->acc_size = acc_size; |
1187 | bo->sg = sg; |
1188 | bo->resv = &bo->ttm_resv; |
1189 | reservation_object_init(bo->resv); |
1190 | atomic_inc(&bo->glob->bo_count); |
1191 | #ifdef __NetBSD__ |
1192 | drm_vma_node_init(&bo->vma_node); |
1193 | uvm_obj_init(&bo->uvmobj, bdev->driver->ttm_uvm_ops, true, 1); |
1194 | #else |
1195 | drm_vma_node_reset(&bo->vma_node); |
1196 | #endif |
1197 | |
1198 | ret = ttm_bo_check_placement(bo, placement); |
1199 | |
1200 | /* |
1201 | * For ttm_bo_type_device buffers, allocate |
1202 | * address space from the device. |
1203 | */ |
1204 | if (likely(!ret) && |
1205 | (bo->type == ttm_bo_type_device || |
1206 | bo->type == ttm_bo_type_sg)) |
1207 | ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, |
1208 | bo->mem.num_pages); |
1209 | |
1210 | locked = ww_mutex_trylock(&bo->resv->lock); |
1211 | WARN_ON(!locked); |
1212 | |
1213 | if (likely(!ret)) |
1214 | ret = ttm_bo_validate(bo, placement, interruptible, false); |
1215 | |
1216 | ttm_bo_unreserve(bo); |
1217 | |
1218 | if (unlikely(ret)) |
1219 | ttm_bo_unref(&bo); |
1220 | |
1221 | return ret; |
1222 | } |
1223 | EXPORT_SYMBOL(ttm_bo_init); |
1224 | |
1225 | size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, |
1226 | unsigned long bo_size, |
1227 | unsigned struct_size) |
1228 | { |
1229 | unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; |
1230 | size_t size = 0; |
1231 | |
1232 | size += ttm_round_pot(struct_size); |
1233 | size += PAGE_ALIGN(npages * sizeof(void *)); |
1234 | size += ttm_round_pot(sizeof(struct ttm_tt)); |
1235 | return size; |
1236 | } |
1237 | EXPORT_SYMBOL(ttm_bo_acc_size); |
1238 | |
1239 | size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, |
1240 | unsigned long bo_size, |
1241 | unsigned struct_size) |
1242 | { |
1243 | unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; |
1244 | size_t size = 0; |
1245 | |
1246 | size += ttm_round_pot(struct_size); |
1247 | size += PAGE_ALIGN(npages * sizeof(void *)); |
1248 | size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); |
1249 | size += ttm_round_pot(sizeof(struct ttm_dma_tt)); |
1250 | return size; |
1251 | } |
1252 | EXPORT_SYMBOL(ttm_bo_dma_acc_size); |
1253 | |
1254 | int ttm_bo_create(struct ttm_bo_device *bdev, |
1255 | unsigned long size, |
1256 | enum ttm_bo_type type, |
1257 | struct ttm_placement *placement, |
1258 | uint32_t page_alignment, |
1259 | bool interruptible, |
1260 | struct file *persistent_swap_storage, |
1261 | struct ttm_buffer_object **p_bo) |
1262 | { |
1263 | struct ttm_buffer_object *bo; |
1264 | size_t acc_size; |
1265 | int ret; |
1266 | |
1267 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); |
1268 | if (unlikely(bo == NULL)) |
1269 | return -ENOMEM; |
1270 | |
1271 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); |
1272 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, |
1273 | interruptible, persistent_swap_storage, acc_size, |
1274 | NULL, NULL); |
1275 | if (likely(ret == 0)) |
1276 | *p_bo = bo; |
1277 | |
1278 | return ret; |
1279 | } |
1280 | EXPORT_SYMBOL(ttm_bo_create); |
1281 | |
1282 | static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, |
1283 | unsigned mem_type, bool allow_errors) |
1284 | { |
1285 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
1286 | struct ttm_bo_global *glob = bdev->glob; |
1287 | int ret; |
1288 | |
1289 | /* |
1290 | * Can't use standard list traversal since we're unlocking. |
1291 | */ |
1292 | |
1293 | spin_lock(&glob->lru_lock); |
1294 | while (!list_empty(&man->lru)) { |
1295 | spin_unlock(&glob->lru_lock); |
1296 | ret = ttm_mem_evict_first(bdev, mem_type, false, false); |
1297 | if (ret) { |
1298 | if (allow_errors) { |
1299 | return ret; |
1300 | } else { |
1301 | pr_err("Cleanup eviction failed\n" ); |
1302 | } |
1303 | } |
1304 | spin_lock(&glob->lru_lock); |
1305 | } |
1306 | spin_unlock(&glob->lru_lock); |
1307 | return 0; |
1308 | } |
1309 | |
1310 | int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) |
1311 | { |
1312 | struct ttm_mem_type_manager *man; |
1313 | int ret = -EINVAL; |
1314 | |
1315 | if (mem_type >= TTM_NUM_MEM_TYPES) { |
1316 | pr_err("Illegal memory type %d\n" , mem_type); |
1317 | return ret; |
1318 | } |
1319 | man = &bdev->man[mem_type]; |
1320 | |
1321 | if (!man->has_type) { |
1322 | pr_err("Trying to take down uninitialized memory manager type %u\n" , |
1323 | mem_type); |
1324 | return ret; |
1325 | } |
1326 | |
1327 | man->use_type = false; |
1328 | man->has_type = false; |
1329 | |
1330 | ret = 0; |
1331 | if (mem_type > 0) { |
1332 | ttm_bo_force_list_clean(bdev, mem_type, false); |
1333 | |
1334 | ret = (*man->func->takedown)(man); |
1335 | } |
1336 | |
1337 | #ifdef __NetBSD__ |
1338 | linux_mutex_destroy(&man->io_reserve_mutex); |
1339 | #else |
1340 | mutex_destroy(&man->io_reserve_mutex); |
1341 | #endif |
1342 | |
1343 | return ret; |
1344 | } |
1345 | EXPORT_SYMBOL(ttm_bo_clean_mm); |
1346 | |
1347 | int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) |
1348 | { |
1349 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
1350 | |
1351 | if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { |
1352 | pr_err("Illegal memory manager memory type %u\n" , mem_type); |
1353 | return -EINVAL; |
1354 | } |
1355 | |
1356 | if (!man->has_type) { |
1357 | pr_err("Memory type %u has not been initialized\n" , mem_type); |
1358 | return 0; |
1359 | } |
1360 | |
1361 | return ttm_bo_force_list_clean(bdev, mem_type, true); |
1362 | } |
1363 | EXPORT_SYMBOL(ttm_bo_evict_mm); |
1364 | |
1365 | int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, |
1366 | unsigned long p_size) |
1367 | { |
1368 | int ret = -EINVAL; |
1369 | struct ttm_mem_type_manager *man; |
1370 | |
1371 | BUG_ON(type >= TTM_NUM_MEM_TYPES); |
1372 | man = &bdev->man[type]; |
1373 | BUG_ON(man->has_type); |
1374 | man->io_reserve_fastpath = true; |
1375 | man->use_io_reserve_lru = false; |
1376 | #ifdef __NetBSD__ |
1377 | linux_mutex_init(&man->io_reserve_mutex); |
1378 | #else |
1379 | mutex_init(&man->io_reserve_mutex); |
1380 | #endif |
1381 | INIT_LIST_HEAD(&man->io_reserve_lru); |
1382 | |
1383 | ret = bdev->driver->init_mem_type(bdev, type, man); |
1384 | if (ret) |
1385 | return ret; |
1386 | man->bdev = bdev; |
1387 | |
1388 | ret = 0; |
1389 | if (type != TTM_PL_SYSTEM) { |
1390 | ret = (*man->func->init)(man, p_size); |
1391 | if (ret) |
1392 | return ret; |
1393 | } |
1394 | man->has_type = true; |
1395 | man->use_type = true; |
1396 | man->size = p_size; |
1397 | |
1398 | INIT_LIST_HEAD(&man->lru); |
1399 | |
1400 | return 0; |
1401 | } |
1402 | EXPORT_SYMBOL(ttm_bo_init_mm); |
1403 | |
1404 | #ifndef __NetBSD__ |
1405 | static void ttm_bo_global_kobj_release(struct kobject *kobj) |
1406 | { |
1407 | struct ttm_bo_global *glob = |
1408 | container_of(kobj, struct ttm_bo_global, kobj); |
1409 | |
1410 | ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); |
1411 | __free_page(glob->dummy_read_page); |
1412 | mutex_destroy(&glob->device_list_mutex); |
1413 | kfree(glob); |
1414 | } |
1415 | #endif |
1416 | |
1417 | void ttm_bo_global_release(struct drm_global_reference *ref) |
1418 | { |
1419 | struct ttm_bo_global *glob = ref->object; |
1420 | |
1421 | #ifdef __NetBSD__ |
1422 | ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); |
1423 | BUG_ON(glob->dummy_read_page != NULL); |
1424 | spin_lock_destroy(&glob->lru_lock); |
1425 | linux_mutex_destroy(&glob->device_list_mutex); |
1426 | kfree(glob); |
1427 | #else |
1428 | kobject_del(&glob->kobj); |
1429 | kobject_put(&glob->kobj); |
1430 | #endif |
1431 | } |
1432 | EXPORT_SYMBOL(ttm_bo_global_release); |
1433 | |
1434 | int ttm_bo_global_init(struct drm_global_reference *ref) |
1435 | { |
1436 | struct ttm_bo_global_ref *bo_ref = |
1437 | container_of(ref, struct ttm_bo_global_ref, ref); |
1438 | struct ttm_bo_global *glob = ref->object; |
1439 | int ret; |
1440 | |
1441 | #ifdef __NetBSD__ |
1442 | linux_mutex_init(&glob->device_list_mutex); |
1443 | #else |
1444 | mutex_init(&glob->device_list_mutex); |
1445 | #endif |
1446 | spin_lock_init(&glob->lru_lock); |
1447 | glob->mem_glob = bo_ref->mem_glob; |
1448 | #ifdef __NetBSD__ |
1449 | /* Only used by agp back end, will fix there. */ |
1450 | /* XXX Fix agp back end to DTRT. */ |
1451 | glob->dummy_read_page = NULL; |
1452 | #else |
1453 | glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); |
1454 | |
1455 | if (unlikely(glob->dummy_read_page == NULL)) { |
1456 | ret = -ENOMEM; |
1457 | goto out_no_drp; |
1458 | } |
1459 | #endif |
1460 | |
1461 | INIT_LIST_HEAD(&glob->swap_lru); |
1462 | INIT_LIST_HEAD(&glob->device_list); |
1463 | |
1464 | ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); |
1465 | ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); |
1466 | if (unlikely(ret != 0)) { |
1467 | pr_err("Could not register buffer object swapout\n" ); |
1468 | goto out_no_shrink; |
1469 | } |
1470 | |
1471 | atomic_set(&glob->bo_count, 0); |
1472 | |
1473 | #ifdef __NetBSD__ |
1474 | ret = 0; |
1475 | #else |
1476 | ret = kobject_init_and_add( |
1477 | &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects" ); |
1478 | if (unlikely(ret != 0)) |
1479 | kobject_put(&glob->kobj); |
1480 | #endif |
1481 | return ret; |
1482 | out_no_shrink: |
1483 | #ifndef __NetBSD__ |
1484 | __free_page(glob->dummy_read_page); |
1485 | out_no_drp: |
1486 | #endif |
1487 | kfree(glob); |
1488 | return ret; |
1489 | } |
1490 | EXPORT_SYMBOL(ttm_bo_global_init); |
1491 | |
1492 | |
1493 | int ttm_bo_device_release(struct ttm_bo_device *bdev) |
1494 | { |
1495 | int ret = 0; |
1496 | unsigned i = TTM_NUM_MEM_TYPES; |
1497 | struct ttm_mem_type_manager *man; |
1498 | struct ttm_bo_global *glob = bdev->glob; |
1499 | |
1500 | while (i--) { |
1501 | man = &bdev->man[i]; |
1502 | if (man->has_type) { |
1503 | man->use_type = false; |
1504 | if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { |
1505 | ret = -EBUSY; |
1506 | pr_err("DRM memory manager type %d is not clean\n" , |
1507 | i); |
1508 | } |
1509 | man->has_type = false; |
1510 | } |
1511 | } |
1512 | |
1513 | mutex_lock(&glob->device_list_mutex); |
1514 | list_del(&bdev->device_list); |
1515 | mutex_unlock(&glob->device_list_mutex); |
1516 | |
1517 | cancel_delayed_work_sync(&bdev->wq); |
1518 | |
1519 | while (ttm_bo_delayed_delete(bdev, true)) |
1520 | ; |
1521 | |
1522 | spin_lock(&glob->lru_lock); |
1523 | if (list_empty(&bdev->ddestroy)) |
1524 | TTM_DEBUG("Delayed destroy list was clean\n" ); |
1525 | |
1526 | if (list_empty(&bdev->man[0].lru)) |
1527 | TTM_DEBUG("Swap list was clean\n" ); |
1528 | spin_unlock(&glob->lru_lock); |
1529 | |
1530 | drm_vma_offset_manager_destroy(&bdev->vma_manager); |
1531 | |
1532 | return ret; |
1533 | } |
1534 | EXPORT_SYMBOL(ttm_bo_device_release); |
1535 | |
1536 | int ttm_bo_device_init(struct ttm_bo_device *bdev, |
1537 | struct ttm_bo_global *glob, |
1538 | struct ttm_bo_driver *driver, |
1539 | #ifdef __NetBSD__ |
1540 | bus_space_tag_t memt, |
1541 | bus_dma_tag_t dmat, |
1542 | #else |
1543 | struct address_space *mapping, |
1544 | #endif |
1545 | uint64_t file_page_offset, |
1546 | bool need_dma32) |
1547 | { |
1548 | int ret = -EINVAL; |
1549 | |
1550 | bdev->driver = driver; |
1551 | |
1552 | memset(bdev->man, 0, sizeof(bdev->man)); |
1553 | |
1554 | /* |
1555 | * Initialize the system memory buffer type. |
1556 | * Other types need to be driver / IOCTL initialized. |
1557 | */ |
1558 | ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); |
1559 | if (unlikely(ret != 0)) |
1560 | goto out_no_sys; |
1561 | |
1562 | drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, |
1563 | 0x10000000); |
1564 | INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); |
1565 | INIT_LIST_HEAD(&bdev->ddestroy); |
1566 | #ifdef __NetBSD__ |
1567 | bdev->memt = memt; |
1568 | bdev->dmat = dmat; |
1569 | #else |
1570 | bdev->dev_mapping = mapping; |
1571 | #endif |
1572 | bdev->glob = glob; |
1573 | bdev->need_dma32 = need_dma32; |
1574 | bdev->val_seq = 0; |
1575 | spin_lock_init(&bdev->fence_lock); |
1576 | mutex_lock(&glob->device_list_mutex); |
1577 | list_add_tail(&bdev->device_list, &glob->device_list); |
1578 | mutex_unlock(&glob->device_list_mutex); |
1579 | |
1580 | return 0; |
1581 | out_no_sys: |
1582 | return ret; |
1583 | } |
1584 | EXPORT_SYMBOL(ttm_bo_device_init); |
1585 | |
1586 | /* |
1587 | * buffer object vm functions. |
1588 | */ |
1589 | |
1590 | bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
1591 | { |
1592 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
1593 | |
1594 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
1595 | if (mem->mem_type == TTM_PL_SYSTEM) |
1596 | return false; |
1597 | |
1598 | if (man->flags & TTM_MEMTYPE_FLAG_CMA) |
1599 | return false; |
1600 | |
1601 | if (mem->placement & TTM_PL_FLAG_CACHED) |
1602 | return false; |
1603 | } |
1604 | return true; |
1605 | } |
1606 | |
1607 | void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) |
1608 | { |
1609 | #ifndef __NetBSD__ |
1610 | struct ttm_bo_device *bdev = bo->bdev; |
1611 | #endif |
1612 | |
1613 | #ifdef __NetBSD__ |
1614 | if (bo->mem.bus.is_iomem) { |
1615 | paddr_t start, end, pa; |
1616 | |
1617 | KASSERTMSG((bo->mem.bus.base & (PAGE_SIZE - 1)) == 0, |
1618 | "bo bus base addr not page-aligned: %lx" , |
1619 | bo->mem.bus.base); |
1620 | KASSERTMSG((bo->mem.bus.offset & (PAGE_SIZE - 1)) == 0, |
1621 | "bo bus offset not page-aligned: %lx" , |
1622 | bo->mem.bus.offset); |
1623 | start = bo->mem.bus.base + bo->mem.bus.offset; |
1624 | KASSERT((bo->mem.bus.size & (PAGE_SIZE - 1)) == 0); |
1625 | end = start + bo->mem.bus.size; |
1626 | |
1627 | for (pa = start; pa < end; pa += PAGE_SIZE) |
1628 | pmap_pv_protect(pa, VM_PROT_NONE); |
1629 | } else if (bo->ttm != NULL) { |
1630 | unsigned i; |
1631 | |
1632 | mutex_enter(bo->uvmobj.vmobjlock); |
1633 | for (i = 0; i < bo->ttm->num_pages; i++) |
1634 | pmap_page_protect(&bo->ttm->pages[i]->p_vmp, |
1635 | VM_PROT_NONE); |
1636 | mutex_exit(bo->uvmobj.vmobjlock); |
1637 | } |
1638 | #else |
1639 | drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); |
1640 | #endif |
1641 | ttm_mem_io_free_vm(bo); |
1642 | } |
1643 | |
1644 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) |
1645 | { |
1646 | struct ttm_bo_device *bdev = bo->bdev; |
1647 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
1648 | |
1649 | ttm_mem_io_lock(man, false); |
1650 | ttm_bo_unmap_virtual_locked(bo); |
1651 | ttm_mem_io_unlock(man); |
1652 | } |
1653 | |
1654 | |
1655 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); |
1656 | |
1657 | |
1658 | int ttm_bo_wait(struct ttm_buffer_object *bo, |
1659 | bool lazy, bool interruptible, bool no_wait) |
1660 | { |
1661 | struct ttm_bo_driver *driver = bo->bdev->driver; |
1662 | struct ttm_bo_device *bdev = bo->bdev; |
1663 | void *sync_obj; |
1664 | int ret = 0; |
1665 | |
1666 | if (likely(bo->sync_obj == NULL)) |
1667 | return 0; |
1668 | |
1669 | while (bo->sync_obj) { |
1670 | |
1671 | if (driver->sync_obj_signaled(bo->sync_obj)) { |
1672 | void *tmp_obj = bo->sync_obj; |
1673 | bo->sync_obj = NULL; |
1674 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
1675 | spin_unlock(&bdev->fence_lock); |
1676 | driver->sync_obj_unref(&tmp_obj); |
1677 | spin_lock(&bdev->fence_lock); |
1678 | continue; |
1679 | } |
1680 | |
1681 | if (no_wait) |
1682 | return -EBUSY; |
1683 | |
1684 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
1685 | spin_unlock(&bdev->fence_lock); |
1686 | ret = driver->sync_obj_wait(sync_obj, |
1687 | lazy, interruptible); |
1688 | if (unlikely(ret != 0)) { |
1689 | driver->sync_obj_unref(&sync_obj); |
1690 | spin_lock(&bdev->fence_lock); |
1691 | return ret; |
1692 | } |
1693 | spin_lock(&bdev->fence_lock); |
1694 | if (likely(bo->sync_obj == sync_obj)) { |
1695 | void *tmp_obj = bo->sync_obj; |
1696 | bo->sync_obj = NULL; |
1697 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, |
1698 | &bo->priv_flags); |
1699 | spin_unlock(&bdev->fence_lock); |
1700 | driver->sync_obj_unref(&sync_obj); |
1701 | driver->sync_obj_unref(&tmp_obj); |
1702 | spin_lock(&bdev->fence_lock); |
1703 | } else { |
1704 | spin_unlock(&bdev->fence_lock); |
1705 | driver->sync_obj_unref(&sync_obj); |
1706 | spin_lock(&bdev->fence_lock); |
1707 | } |
1708 | } |
1709 | return 0; |
1710 | } |
1711 | EXPORT_SYMBOL(ttm_bo_wait); |
1712 | |
1713 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
1714 | { |
1715 | struct ttm_bo_device *bdev = bo->bdev; |
1716 | int ret = 0; |
1717 | |
1718 | /* |
1719 | * Using ttm_bo_reserve makes sure the lru lists are updated. |
1720 | */ |
1721 | |
1722 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); |
1723 | if (unlikely(ret != 0)) |
1724 | return ret; |
1725 | spin_lock(&bdev->fence_lock); |
1726 | ret = ttm_bo_wait(bo, false, true, no_wait); |
1727 | spin_unlock(&bdev->fence_lock); |
1728 | if (likely(ret == 0)) |
1729 | atomic_inc(&bo->cpu_writers); |
1730 | ttm_bo_unreserve(bo); |
1731 | return ret; |
1732 | } |
1733 | EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); |
1734 | |
1735 | void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) |
1736 | { |
1737 | atomic_dec(&bo->cpu_writers); |
1738 | } |
1739 | EXPORT_SYMBOL(ttm_bo_synccpu_write_release); |
1740 | |
1741 | /** |
1742 | * A buffer object shrink method that tries to swap out the first |
1743 | * buffer object on the bo_global::swap_lru list. |
1744 | */ |
1745 | |
1746 | static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) |
1747 | { |
1748 | struct ttm_bo_global *glob = |
1749 | container_of(shrink, struct ttm_bo_global, shrink); |
1750 | struct ttm_buffer_object *bo; |
1751 | int ret = -EBUSY; |
1752 | int put_count; |
1753 | uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); |
1754 | |
1755 | spin_lock(&glob->lru_lock); |
1756 | list_for_each_entry(bo, &glob->swap_lru, swap) { |
1757 | ret = __ttm_bo_reserve(bo, false, true, false, 0); |
1758 | if (!ret) |
1759 | break; |
1760 | } |
1761 | |
1762 | if (ret) { |
1763 | spin_unlock(&glob->lru_lock); |
1764 | return ret; |
1765 | } |
1766 | |
1767 | kref_get(&bo->list_kref); |
1768 | |
1769 | if (!list_empty(&bo->ddestroy)) { |
1770 | ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); |
1771 | kref_put(&bo->list_kref, ttm_bo_release_list); |
1772 | return ret; |
1773 | } |
1774 | |
1775 | put_count = ttm_bo_del_from_lru(bo); |
1776 | spin_unlock(&glob->lru_lock); |
1777 | |
1778 | ttm_bo_list_ref_sub(bo, put_count, true); |
1779 | |
1780 | /** |
1781 | * Wait for GPU, then move to system cached. |
1782 | */ |
1783 | |
1784 | spin_lock(&bo->bdev->fence_lock); |
1785 | ret = ttm_bo_wait(bo, false, false, false); |
1786 | spin_unlock(&bo->bdev->fence_lock); |
1787 | |
1788 | if (unlikely(ret != 0)) |
1789 | goto out; |
1790 | |
1791 | if ((bo->mem.placement & swap_placement) != swap_placement) { |
1792 | struct ttm_mem_reg evict_mem; |
1793 | |
1794 | evict_mem = bo->mem; |
1795 | evict_mem.mm_node = NULL; |
1796 | evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; |
1797 | evict_mem.mem_type = TTM_PL_SYSTEM; |
1798 | |
1799 | ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, |
1800 | false, false); |
1801 | if (unlikely(ret != 0)) |
1802 | goto out; |
1803 | } |
1804 | |
1805 | ttm_bo_unmap_virtual(bo); |
1806 | |
1807 | /** |
1808 | * Swap out. Buffer will be swapped in again as soon as |
1809 | * anyone tries to access a ttm page. |
1810 | */ |
1811 | |
1812 | if (bo->bdev->driver->swap_notify) |
1813 | bo->bdev->driver->swap_notify(bo); |
1814 | |
1815 | ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); |
1816 | out: |
1817 | |
1818 | /** |
1819 | * |
1820 | * Unreserve without putting on LRU to avoid swapping out an |
1821 | * already swapped buffer. |
1822 | */ |
1823 | |
1824 | __ttm_bo_unreserve(bo); |
1825 | kref_put(&bo->list_kref, ttm_bo_release_list); |
1826 | return ret; |
1827 | } |
1828 | |
1829 | void ttm_bo_swapout_all(struct ttm_bo_device *bdev) |
1830 | { |
1831 | while (ttm_bo_swapout(&bdev->glob->shrink) == 0) |
1832 | ; |
1833 | } |
1834 | EXPORT_SYMBOL(ttm_bo_swapout_all); |
1835 | |
1836 | /** |
1837 | * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become |
1838 | * unreserved |
1839 | * |
1840 | * @bo: Pointer to buffer |
1841 | */ |
1842 | int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) |
1843 | { |
1844 | int ret; |
1845 | |
1846 | /* |
1847 | * In the absense of a wait_unlocked API, |
1848 | * Use the bo::wu_mutex to avoid triggering livelocks due to |
1849 | * concurrent use of this function. Note that this use of |
1850 | * bo::wu_mutex can go away if we change locking order to |
1851 | * mmap_sem -> bo::reserve. |
1852 | */ |
1853 | ret = mutex_lock_interruptible(&bo->wu_mutex); |
1854 | if (unlikely(ret != 0)) |
1855 | return -ERESTARTSYS; |
1856 | if (!ww_mutex_is_locked(&bo->resv->lock)) |
1857 | goto out_unlock; |
1858 | ret = __ttm_bo_reserve(bo, true, false, false, NULL); |
1859 | if (unlikely(ret != 0)) |
1860 | goto out_unlock; |
1861 | __ttm_bo_unreserve(bo); |
1862 | |
1863 | out_unlock: |
1864 | mutex_unlock(&bo->wu_mutex); |
1865 | return ret; |
1866 | } |
1867 | |