1 | /************************************************************************** |
2 | * |
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
26 | **************************************************************************/ |
27 | /* |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ |
30 | |
31 | #include <drm/ttm/ttm_bo_driver.h> |
32 | #include <drm/ttm/ttm_placement.h> |
33 | #include <drm/drm_vma_manager.h> |
34 | #include <linux/io.h> |
35 | #include <linux/highmem.h> |
36 | #include <linux/wait.h> |
37 | #include <linux/slab.h> |
38 | #include <linux/vmalloc.h> |
39 | #include <linux/module.h> |
40 | #include <linux/export.h> |
41 | |
42 | #ifdef __NetBSD__ /* PMAP_* caching flags for ttm_io_prot */ |
43 | #include <uvm/uvm_pmap.h> |
44 | #endif |
45 | |
46 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
47 | { |
48 | ttm_bo_mem_put(bo, &bo->mem); |
49 | } |
50 | |
51 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, |
52 | bool evict, |
53 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) |
54 | { |
55 | struct ttm_tt *ttm = bo->ttm; |
56 | struct ttm_mem_reg *old_mem = &bo->mem; |
57 | int ret; |
58 | |
59 | if (old_mem->mem_type != TTM_PL_SYSTEM) { |
60 | ttm_tt_unbind(ttm); |
61 | ttm_bo_free_old_node(bo); |
62 | ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, |
63 | TTM_PL_MASK_MEM); |
64 | old_mem->mem_type = TTM_PL_SYSTEM; |
65 | } |
66 | |
67 | ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); |
68 | if (unlikely(ret != 0)) |
69 | return ret; |
70 | |
71 | if (new_mem->mem_type != TTM_PL_SYSTEM) { |
72 | ret = ttm_tt_bind(ttm, new_mem); |
73 | if (unlikely(ret != 0)) |
74 | return ret; |
75 | } |
76 | |
77 | *old_mem = *new_mem; |
78 | new_mem->mm_node = NULL; |
79 | |
80 | return 0; |
81 | } |
82 | EXPORT_SYMBOL(ttm_bo_move_ttm); |
83 | |
84 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
85 | { |
86 | if (likely(man->io_reserve_fastpath)) |
87 | return 0; |
88 | |
89 | if (interruptible) |
90 | return mutex_lock_interruptible(&man->io_reserve_mutex); |
91 | |
92 | mutex_lock(&man->io_reserve_mutex); |
93 | return 0; |
94 | } |
95 | EXPORT_SYMBOL(ttm_mem_io_lock); |
96 | |
97 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
98 | { |
99 | if (likely(man->io_reserve_fastpath)) |
100 | return; |
101 | |
102 | mutex_unlock(&man->io_reserve_mutex); |
103 | } |
104 | EXPORT_SYMBOL(ttm_mem_io_unlock); |
105 | |
106 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) |
107 | { |
108 | struct ttm_buffer_object *bo; |
109 | |
110 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) |
111 | return -EAGAIN; |
112 | |
113 | bo = list_first_entry(&man->io_reserve_lru, |
114 | struct ttm_buffer_object, |
115 | io_reserve_lru); |
116 | list_del_init(&bo->io_reserve_lru); |
117 | ttm_bo_unmap_virtual_locked(bo); |
118 | |
119 | return 0; |
120 | } |
121 | |
122 | |
123 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, |
124 | struct ttm_mem_reg *mem) |
125 | { |
126 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
127 | int ret = 0; |
128 | |
129 | if (!bdev->driver->io_mem_reserve) |
130 | return 0; |
131 | if (likely(man->io_reserve_fastpath)) |
132 | return bdev->driver->io_mem_reserve(bdev, mem); |
133 | |
134 | if (bdev->driver->io_mem_reserve && |
135 | mem->bus.io_reserved_count++ == 0) { |
136 | retry: |
137 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
138 | if (ret == -EAGAIN) { |
139 | ret = ttm_mem_io_evict(man); |
140 | if (ret == 0) |
141 | goto retry; |
142 | } |
143 | } |
144 | return ret; |
145 | } |
146 | EXPORT_SYMBOL(ttm_mem_io_reserve); |
147 | |
148 | void ttm_mem_io_free(struct ttm_bo_device *bdev, |
149 | struct ttm_mem_reg *mem) |
150 | { |
151 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
152 | |
153 | if (likely(man->io_reserve_fastpath)) |
154 | return; |
155 | |
156 | if (bdev->driver->io_mem_reserve && |
157 | --mem->bus.io_reserved_count == 0 && |
158 | bdev->driver->io_mem_free) |
159 | bdev->driver->io_mem_free(bdev, mem); |
160 | |
161 | } |
162 | EXPORT_SYMBOL(ttm_mem_io_free); |
163 | |
164 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) |
165 | { |
166 | struct ttm_mem_reg *mem = &bo->mem; |
167 | int ret; |
168 | |
169 | if (!mem->bus.io_reserved_vm) { |
170 | struct ttm_mem_type_manager *man = |
171 | &bo->bdev->man[mem->mem_type]; |
172 | |
173 | ret = ttm_mem_io_reserve(bo->bdev, mem); |
174 | if (unlikely(ret != 0)) |
175 | return ret; |
176 | mem->bus.io_reserved_vm = true; |
177 | if (man->use_io_reserve_lru) |
178 | list_add_tail(&bo->io_reserve_lru, |
179 | &man->io_reserve_lru); |
180 | } |
181 | return 0; |
182 | } |
183 | |
184 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
185 | { |
186 | struct ttm_mem_reg *mem = &bo->mem; |
187 | |
188 | if (mem->bus.io_reserved_vm) { |
189 | mem->bus.io_reserved_vm = false; |
190 | list_del_init(&bo->io_reserve_lru); |
191 | ttm_mem_io_free(bo->bdev, mem); |
192 | } |
193 | } |
194 | |
195 | static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
196 | void **virtual) |
197 | { |
198 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
199 | int ret; |
200 | void *addr; |
201 | |
202 | *virtual = NULL; |
203 | (void) ttm_mem_io_lock(man, false); |
204 | ret = ttm_mem_io_reserve(bdev, mem); |
205 | ttm_mem_io_unlock(man); |
206 | if (ret || !mem->bus.is_iomem) |
207 | return ret; |
208 | |
209 | if (mem->bus.addr) { |
210 | addr = mem->bus.addr; |
211 | } else { |
212 | #ifdef __NetBSD__ |
213 | const bus_addr_t bus_addr = (mem->bus.base + mem->bus.offset); |
214 | int flags = BUS_SPACE_MAP_LINEAR; |
215 | |
216 | if (ISSET(mem->placement, TTM_PL_FLAG_WC)) |
217 | flags |= BUS_SPACE_MAP_PREFETCHABLE; |
218 | /* XXX errno NetBSD->Linux */ |
219 | ret = -bus_space_map(bdev->memt, bus_addr, mem->bus.size, |
220 | flags, &mem->bus.memh); |
221 | if (ret) { |
222 | (void) ttm_mem_io_lock(man, false); |
223 | ttm_mem_io_free(bdev, mem); |
224 | ttm_mem_io_unlock(man); |
225 | return ret; |
226 | } |
227 | addr = bus_space_vaddr(bdev->memt, mem->bus.memh); |
228 | #else |
229 | if (mem->placement & TTM_PL_FLAG_WC) |
230 | addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); |
231 | else |
232 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
233 | if (!addr) { |
234 | (void) ttm_mem_io_lock(man, false); |
235 | ttm_mem_io_free(bdev, mem); |
236 | ttm_mem_io_unlock(man); |
237 | return -ENOMEM; |
238 | } |
239 | #endif |
240 | } |
241 | *virtual = addr; |
242 | return 0; |
243 | } |
244 | |
245 | static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
246 | void *virtual) |
247 | { |
248 | struct ttm_mem_type_manager *man; |
249 | |
250 | man = &bdev->man[mem->mem_type]; |
251 | |
252 | if (virtual && mem->bus.addr == NULL) |
253 | #ifdef __NetBSD__ |
254 | bus_space_unmap(bdev->memt, mem->bus.memh, mem->bus.size); |
255 | #else |
256 | iounmap(virtual); |
257 | #endif |
258 | (void) ttm_mem_io_lock(man, false); |
259 | ttm_mem_io_free(bdev, mem); |
260 | ttm_mem_io_unlock(man); |
261 | } |
262 | |
263 | #ifdef __NetBSD__ |
264 | # define ioread32 fake_ioread32 |
265 | # define iowrite32 fake_iowrite32 |
266 | |
267 | static inline uint32_t |
268 | fake_ioread32(const volatile uint32_t *p) |
269 | { |
270 | uint32_t v; |
271 | |
272 | v = *p; |
273 | __insn_barrier(); /* XXX */ |
274 | |
275 | return v; |
276 | } |
277 | |
278 | static inline void |
279 | iowrite32(uint32_t v, volatile uint32_t *p) |
280 | { |
281 | |
282 | __insn_barrier(); /* XXX */ |
283 | *p = v; |
284 | } |
285 | #endif |
286 | |
287 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) |
288 | { |
289 | uint32_t *dstP = |
290 | (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); |
291 | uint32_t *srcP = |
292 | (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); |
293 | |
294 | int i; |
295 | for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) |
296 | iowrite32(ioread32(srcP++), dstP++); |
297 | return 0; |
298 | } |
299 | |
300 | #ifdef __NetBSD__ |
301 | # undef ioread32 |
302 | # undef iowrite32 |
303 | #endif |
304 | |
305 | static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, |
306 | unsigned long page, |
307 | pgprot_t prot) |
308 | { |
309 | struct page *d = ttm->pages[page]; |
310 | void *dst; |
311 | |
312 | if (!d) |
313 | return -ENOMEM; |
314 | |
315 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); |
316 | |
317 | #ifdef CONFIG_X86 |
318 | dst = kmap_atomic_prot(d, prot); |
319 | #else |
320 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
321 | dst = vmap(&d, 1, 0, prot); |
322 | else |
323 | dst = kmap(d); |
324 | #endif |
325 | if (!dst) |
326 | return -ENOMEM; |
327 | |
328 | memcpy_fromio(dst, src, PAGE_SIZE); |
329 | |
330 | #ifdef CONFIG_X86 |
331 | kunmap_atomic(dst); |
332 | #else |
333 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
334 | #ifdef __NetBSD__ |
335 | vunmap(dst, 1); |
336 | #else |
337 | vunmap(dst); |
338 | #endif |
339 | else |
340 | kunmap(d); |
341 | #endif |
342 | |
343 | return 0; |
344 | } |
345 | |
346 | static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, |
347 | unsigned long page, |
348 | pgprot_t prot) |
349 | { |
350 | struct page *s = ttm->pages[page]; |
351 | void *src; |
352 | |
353 | if (!s) |
354 | return -ENOMEM; |
355 | |
356 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); |
357 | #ifdef CONFIG_X86 |
358 | src = kmap_atomic_prot(s, prot); |
359 | #else |
360 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
361 | src = vmap(&s, 1, 0, prot); |
362 | else |
363 | src = kmap(s); |
364 | #endif |
365 | if (!src) |
366 | return -ENOMEM; |
367 | |
368 | memcpy_toio(dst, src, PAGE_SIZE); |
369 | |
370 | #ifdef CONFIG_X86 |
371 | kunmap_atomic(src); |
372 | #else |
373 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
374 | #ifdef __NetBSD__ |
375 | vunmap(src, 1); |
376 | #else |
377 | vunmap(src); |
378 | #endif |
379 | else |
380 | kunmap(s); |
381 | #endif |
382 | |
383 | return 0; |
384 | } |
385 | |
386 | int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, |
387 | bool evict, bool no_wait_gpu, |
388 | struct ttm_mem_reg *new_mem) |
389 | { |
390 | struct ttm_bo_device *bdev = bo->bdev; |
391 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
392 | struct ttm_tt *ttm = bo->ttm; |
393 | struct ttm_mem_reg *old_mem = &bo->mem; |
394 | struct ttm_mem_reg old_copy = *old_mem; |
395 | void *old_iomap; |
396 | void *new_iomap; |
397 | int ret; |
398 | unsigned long i; |
399 | unsigned long page; |
400 | unsigned long add = 0; |
401 | int dir; |
402 | |
403 | ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); |
404 | if (ret) |
405 | return ret; |
406 | ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); |
407 | if (ret) |
408 | goto out; |
409 | |
410 | /* |
411 | * Single TTM move. NOP. |
412 | */ |
413 | if (old_iomap == NULL && new_iomap == NULL) |
414 | goto out2; |
415 | |
416 | /* |
417 | * Don't move nonexistent data. Clear destination instead. |
418 | */ |
419 | if (old_iomap == NULL && |
420 | (ttm == NULL || (ttm->state == tt_unpopulated && |
421 | !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { |
422 | memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); |
423 | goto out2; |
424 | } |
425 | |
426 | /* |
427 | * TTM might be null for moves within the same region. |
428 | */ |
429 | if (ttm && ttm->state == tt_unpopulated) { |
430 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
431 | if (ret) |
432 | goto out1; |
433 | } |
434 | |
435 | add = 0; |
436 | dir = 1; |
437 | |
438 | if ((old_mem->mem_type == new_mem->mem_type) && |
439 | (new_mem->start < old_mem->start + old_mem->size)) { |
440 | dir = -1; |
441 | add = new_mem->num_pages - 1; |
442 | } |
443 | |
444 | for (i = 0; i < new_mem->num_pages; ++i) { |
445 | page = i * dir + add; |
446 | if (old_iomap == NULL) { |
447 | pgprot_t prot = ttm_io_prot(old_mem->placement, |
448 | PAGE_KERNEL); |
449 | ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, |
450 | prot); |
451 | } else if (new_iomap == NULL) { |
452 | pgprot_t prot = ttm_io_prot(new_mem->placement, |
453 | PAGE_KERNEL); |
454 | ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, |
455 | prot); |
456 | } else |
457 | ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
458 | if (ret) |
459 | goto out1; |
460 | } |
461 | mb(); |
462 | out2: |
463 | old_copy = *old_mem; |
464 | *old_mem = *new_mem; |
465 | new_mem->mm_node = NULL; |
466 | |
467 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { |
468 | ttm_tt_unbind(ttm); |
469 | ttm_tt_destroy(ttm); |
470 | bo->ttm = NULL; |
471 | } |
472 | |
473 | out1: |
474 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
475 | out: |
476 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); |
477 | |
478 | /* |
479 | * On error, keep the mm node! |
480 | */ |
481 | if (!ret) |
482 | ttm_bo_mem_put(bo, &old_copy); |
483 | return ret; |
484 | } |
485 | EXPORT_SYMBOL(ttm_bo_move_memcpy); |
486 | |
487 | static void ttm_transfered_destroy(struct ttm_buffer_object *bo) |
488 | { |
489 | kfree(bo); |
490 | } |
491 | |
492 | /** |
493 | * ttm_buffer_object_transfer |
494 | * |
495 | * @bo: A pointer to a struct ttm_buffer_object. |
496 | * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, |
497 | * holding the data of @bo with the old placement. |
498 | * |
499 | * This is a utility function that may be called after an accelerated move |
500 | * has been scheduled. A new buffer object is created as a placeholder for |
501 | * the old data while it's being copied. When that buffer object is idle, |
502 | * it can be destroyed, releasing the space of the old placement. |
503 | * Returns: |
504 | * !0: Failure. |
505 | */ |
506 | |
507 | static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, |
508 | struct ttm_buffer_object **new_obj) |
509 | { |
510 | struct ttm_buffer_object *fbo; |
511 | struct ttm_bo_device *bdev = bo->bdev; |
512 | struct ttm_bo_driver *driver = bdev->driver; |
513 | int ret; |
514 | |
515 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
516 | if (!fbo) |
517 | return -ENOMEM; |
518 | |
519 | *fbo = *bo; |
520 | |
521 | /** |
522 | * Fix up members that we shouldn't copy directly: |
523 | * TODO: Explicit member copy would probably be better here. |
524 | */ |
525 | |
526 | INIT_LIST_HEAD(&fbo->ddestroy); |
527 | INIT_LIST_HEAD(&fbo->lru); |
528 | INIT_LIST_HEAD(&fbo->swap); |
529 | INIT_LIST_HEAD(&fbo->io_reserve_lru); |
530 | #ifdef __NetBSD__ |
531 | linux_mutex_init(&fbo->wu_mutex); |
532 | drm_vma_node_init(&fbo->vma_node); |
533 | uvm_obj_init(&fbo->uvmobj, bdev->driver->ttm_uvm_ops, true, 1); |
534 | mutex_obj_hold(bo->uvmobj.vmobjlock); |
535 | uvm_obj_setlock(&fbo->uvmobj, bo->uvmobj.vmobjlock); |
536 | #else |
537 | mutex_init(&fbo->wu_mutex); |
538 | drm_vma_node_reset(&fbo->vma_node); |
539 | #endif |
540 | atomic_set(&fbo->cpu_writers, 0); |
541 | |
542 | spin_lock(&bdev->fence_lock); |
543 | if (bo->sync_obj) |
544 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); |
545 | else |
546 | fbo->sync_obj = NULL; |
547 | spin_unlock(&bdev->fence_lock); |
548 | kref_init(&fbo->list_kref); |
549 | kref_init(&fbo->kref); |
550 | fbo->destroy = &ttm_transfered_destroy; |
551 | fbo->acc_size = 0; |
552 | fbo->resv = &fbo->ttm_resv; |
553 | reservation_object_init(fbo->resv); |
554 | ret = ww_mutex_trylock(&fbo->resv->lock); |
555 | WARN_ON(!ret); |
556 | |
557 | *new_obj = fbo; |
558 | return 0; |
559 | } |
560 | |
561 | pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) |
562 | { |
563 | #ifdef __NetBSD__ |
564 | switch (caching_flags & TTM_PL_MASK_CACHING) { |
565 | case TTM_PL_FLAG_CACHED: |
566 | return (tmp | PMAP_WRITE_BACK); |
567 | case TTM_PL_FLAG_WC: |
568 | return (tmp | PMAP_WRITE_COMBINE); |
569 | case TTM_PL_FLAG_UNCACHED: |
570 | return (tmp | PMAP_NOCACHE); |
571 | default: |
572 | panic("invalid caching flags: %" PRIx32"\n" , |
573 | (caching_flags & TTM_PL_MASK_CACHING)); |
574 | } |
575 | #else |
576 | #if defined(__i386__) || defined(__x86_64__) |
577 | if (caching_flags & TTM_PL_FLAG_WC) |
578 | tmp = pgprot_writecombine(tmp); |
579 | else if (boot_cpu_data.x86 > 3) |
580 | tmp = pgprot_noncached(tmp); |
581 | |
582 | #elif defined(__powerpc__) |
583 | if (!(caching_flags & TTM_PL_FLAG_CACHED)) { |
584 | pgprot_val(tmp) |= _PAGE_NO_CACHE; |
585 | if (caching_flags & TTM_PL_FLAG_UNCACHED) |
586 | pgprot_val(tmp) |= _PAGE_GUARDED; |
587 | } |
588 | #endif |
589 | #if defined(__ia64__) |
590 | if (caching_flags & TTM_PL_FLAG_WC) |
591 | tmp = pgprot_writecombine(tmp); |
592 | else |
593 | tmp = pgprot_noncached(tmp); |
594 | #endif |
595 | #if defined(__sparc__) || defined(__mips__) |
596 | if (!(caching_flags & TTM_PL_FLAG_CACHED)) |
597 | tmp = pgprot_noncached(tmp); |
598 | #endif |
599 | return tmp; |
600 | #endif |
601 | } |
602 | EXPORT_SYMBOL(ttm_io_prot); |
603 | |
604 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, |
605 | unsigned long offset, |
606 | unsigned long size, |
607 | struct ttm_bo_kmap_obj *map) |
608 | { |
609 | struct ttm_mem_reg *mem = &bo->mem; |
610 | |
611 | if (bo->mem.bus.addr) { |
612 | map->bo_kmap_type = ttm_bo_map_premapped; |
613 | map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); |
614 | } else { |
615 | map->bo_kmap_type = ttm_bo_map_iomap; |
616 | #ifdef __NetBSD__ |
617 | { |
618 | bus_addr_t addr; |
619 | int flags = BUS_SPACE_MAP_LINEAR; |
620 | int ret; |
621 | |
622 | addr = (bo->mem.bus.base + bo->mem.bus.offset + offset); |
623 | if (ISSET(mem->placement, TTM_PL_FLAG_WC)) |
624 | flags |= BUS_SPACE_MAP_PREFETCHABLE; |
625 | /* XXX errno NetBSD->Linux */ |
626 | ret = -bus_space_map(bo->bdev->memt, addr, size, flags, |
627 | &map->u.io.memh); |
628 | if (ret) |
629 | return ret; |
630 | map->u.io.size = size; |
631 | map->virtual = bus_space_vaddr(bo->bdev->memt, map->u.io.memh); |
632 | } |
633 | #else |
634 | if (mem->placement & TTM_PL_FLAG_WC) |
635 | map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, |
636 | size); |
637 | else |
638 | map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, |
639 | size); |
640 | #endif |
641 | } |
642 | return (!map->virtual) ? -ENOMEM : 0; |
643 | } |
644 | |
645 | static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, |
646 | unsigned long start_page, |
647 | unsigned long num_pages, |
648 | struct ttm_bo_kmap_obj *map) |
649 | { |
650 | struct ttm_mem_reg *mem = &bo->mem; |
651 | pgprot_t prot; |
652 | struct ttm_tt *ttm = bo->ttm; |
653 | #ifdef __NetBSD__ |
654 | unsigned i; |
655 | vaddr_t vaddr; |
656 | #endif |
657 | int ret; |
658 | |
659 | BUG_ON(!ttm); |
660 | |
661 | if (ttm->state == tt_unpopulated) { |
662 | ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
663 | if (ret) |
664 | return ret; |
665 | } |
666 | |
667 | #ifdef __NetBSD__ |
668 | /* |
669 | * Can't use uvm_map here because it provides no way to pass |
670 | * along the cacheability flags. So we'll uvm_km_alloc |
671 | * ourselves some KVA and then pmap_kenter_pa directly. |
672 | */ |
673 | |
674 | KASSERT(num_pages <= ttm->num_pages); |
675 | KASSERT(start_page <= (ttm->num_pages - num_pages)); |
676 | prot = ttm_io_prot(mem->placement, (VM_PROT_READ | VM_PROT_WRITE)); |
677 | vaddr = uvm_km_alloc(kernel_map, (num_pages << PAGE_SHIFT), PAGE_SIZE, |
678 | UVM_KMF_VAONLY | UVM_KMF_CANFAIL | UVM_KMF_WAITVA); |
679 | if (vaddr == 0) |
680 | return -ENOMEM; |
681 | for (i = 0; i < num_pages; i++) |
682 | pmap_kenter_pa(vaddr + i*PAGE_SIZE, |
683 | page_to_phys(ttm->pages[start_page + i]), |
684 | (VM_PROT_READ | VM_PROT_WRITE), prot); |
685 | pmap_update(pmap_kernel()); |
686 | map->bo_kmap_type = ttm_bo_map_vmap; |
687 | map->u.uvm.vsize = (num_pages << PAGE_SHIFT); |
688 | map->virtual = (void *)vaddr; |
689 | return 0; |
690 | #else |
691 | if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { |
692 | /* |
693 | * We're mapping a single page, and the desired |
694 | * page protection is consistent with the bo. |
695 | */ |
696 | |
697 | map->bo_kmap_type = ttm_bo_map_kmap; |
698 | map->page = ttm->pages[start_page]; |
699 | map->virtual = kmap(map->page); |
700 | } else { |
701 | /* |
702 | * We need to use vmap to get the desired page protection |
703 | * or to make the buffer object look contiguous. |
704 | */ |
705 | prot = (mem->placement & TTM_PL_FLAG_CACHED) ? |
706 | PAGE_KERNEL : |
707 | ttm_io_prot(mem->placement, PAGE_KERNEL); |
708 | map->bo_kmap_type = ttm_bo_map_vmap; |
709 | map->virtual = vmap(ttm->pages + start_page, num_pages, |
710 | 0, prot); |
711 | } |
712 | return (!map->virtual) ? -ENOMEM : 0; |
713 | #endif |
714 | } |
715 | |
716 | int ttm_bo_kmap(struct ttm_buffer_object *bo, |
717 | unsigned long start_page, unsigned long num_pages, |
718 | struct ttm_bo_kmap_obj *map) |
719 | { |
720 | struct ttm_mem_type_manager *man = |
721 | &bo->bdev->man[bo->mem.mem_type]; |
722 | unsigned long offset, size; |
723 | int ret; |
724 | |
725 | BUG_ON(!list_empty(&bo->swap)); |
726 | map->virtual = NULL; |
727 | map->bo = bo; |
728 | if (num_pages > bo->num_pages) |
729 | return -EINVAL; |
730 | if (start_page > bo->num_pages) |
731 | return -EINVAL; |
732 | #if 0 |
733 | if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) |
734 | return -EPERM; |
735 | #endif |
736 | (void) ttm_mem_io_lock(man, false); |
737 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
738 | ttm_mem_io_unlock(man); |
739 | if (ret) |
740 | return ret; |
741 | if (!bo->mem.bus.is_iomem) { |
742 | return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); |
743 | } else { |
744 | offset = start_page << PAGE_SHIFT; |
745 | size = num_pages << PAGE_SHIFT; |
746 | return ttm_bo_ioremap(bo, offset, size, map); |
747 | } |
748 | } |
749 | EXPORT_SYMBOL(ttm_bo_kmap); |
750 | |
751 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) |
752 | { |
753 | struct ttm_buffer_object *bo = map->bo; |
754 | struct ttm_mem_type_manager *man = |
755 | &bo->bdev->man[bo->mem.mem_type]; |
756 | |
757 | if (!map->virtual) |
758 | return; |
759 | switch (map->bo_kmap_type) { |
760 | case ttm_bo_map_iomap: |
761 | #ifdef __NetBSD__ |
762 | bus_space_unmap(bo->bdev->memt, map->u.io.memh, |
763 | map->u.io.size); |
764 | #else |
765 | iounmap(map->virtual); |
766 | #endif |
767 | break; |
768 | case ttm_bo_map_vmap: |
769 | #ifdef __NetBSD__ |
770 | pmap_kremove((vaddr_t)map->virtual, map->u.uvm.vsize); |
771 | pmap_update(pmap_kernel()); |
772 | uvm_km_free(kernel_map, (vaddr_t)map->virtual, |
773 | map->u.uvm.vsize, UVM_KMF_VAONLY); |
774 | #else |
775 | vunmap(map->virtual); |
776 | #endif |
777 | break; |
778 | case ttm_bo_map_kmap: |
779 | #ifdef __NetBSD__ |
780 | panic("ttm_bo_map_kmap does not exist in NetBSD" ); |
781 | #else |
782 | kunmap(map->page); |
783 | #endif |
784 | break; |
785 | case ttm_bo_map_premapped: |
786 | break; |
787 | default: |
788 | BUG(); |
789 | } |
790 | (void) ttm_mem_io_lock(man, false); |
791 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); |
792 | ttm_mem_io_unlock(man); |
793 | map->virtual = NULL; |
794 | #ifndef __NetBSD__ |
795 | map->page = NULL; |
796 | #endif |
797 | } |
798 | EXPORT_SYMBOL(ttm_bo_kunmap); |
799 | |
800 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
801 | void *sync_obj, |
802 | bool evict, |
803 | bool no_wait_gpu, |
804 | struct ttm_mem_reg *new_mem) |
805 | { |
806 | struct ttm_bo_device *bdev = bo->bdev; |
807 | struct ttm_bo_driver *driver = bdev->driver; |
808 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
809 | struct ttm_mem_reg *old_mem = &bo->mem; |
810 | int ret; |
811 | struct ttm_buffer_object *ghost_obj; |
812 | void *tmp_obj = NULL; |
813 | |
814 | spin_lock(&bdev->fence_lock); |
815 | if (bo->sync_obj) { |
816 | tmp_obj = bo->sync_obj; |
817 | bo->sync_obj = NULL; |
818 | } |
819 | bo->sync_obj = driver->sync_obj_ref(sync_obj); |
820 | if (evict) { |
821 | ret = ttm_bo_wait(bo, false, false, false); |
822 | spin_unlock(&bdev->fence_lock); |
823 | if (tmp_obj) |
824 | driver->sync_obj_unref(&tmp_obj); |
825 | if (ret) |
826 | return ret; |
827 | |
828 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && |
829 | (bo->ttm != NULL)) { |
830 | ttm_tt_unbind(bo->ttm); |
831 | ttm_tt_destroy(bo->ttm); |
832 | bo->ttm = NULL; |
833 | } |
834 | ttm_bo_free_old_node(bo); |
835 | } else { |
836 | /** |
837 | * This should help pipeline ordinary buffer moves. |
838 | * |
839 | * Hang old buffer memory on a new buffer object, |
840 | * and leave it to be released when the GPU |
841 | * operation has completed. |
842 | */ |
843 | |
844 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
845 | spin_unlock(&bdev->fence_lock); |
846 | if (tmp_obj) |
847 | driver->sync_obj_unref(&tmp_obj); |
848 | |
849 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
850 | if (ret) |
851 | return ret; |
852 | |
853 | /** |
854 | * If we're not moving to fixed memory, the TTM object |
855 | * needs to stay alive. Otherwhise hang it on the ghost |
856 | * bo to be unbound and destroyed. |
857 | */ |
858 | |
859 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) |
860 | ghost_obj->ttm = NULL; |
861 | else |
862 | bo->ttm = NULL; |
863 | |
864 | ttm_bo_unreserve(ghost_obj); |
865 | ttm_bo_unref(&ghost_obj); |
866 | } |
867 | |
868 | *old_mem = *new_mem; |
869 | new_mem->mm_node = NULL; |
870 | |
871 | return 0; |
872 | } |
873 | EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |
874 | |