1 | /* $NetBSD: ttm_bo_vm.c,v 1.10 2015/07/28 01:25:00 riastradh Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2014 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Taylor R. Campbell. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | #include <sys/cdefs.h> |
33 | __KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.10 2015/07/28 01:25:00 riastradh Exp $" ); |
34 | |
35 | #include <sys/types.h> |
36 | |
37 | #include <uvm/uvm.h> |
38 | #include <uvm/uvm_extern.h> |
39 | #include <uvm/uvm_fault.h> |
40 | |
41 | #include <linux/bitops.h> |
42 | |
43 | #include <drm/drm_vma_manager.h> |
44 | |
45 | #include <ttm/ttm_bo_driver.h> |
46 | |
47 | static int ttm_bo_uvm_fault_idle(struct ttm_buffer_object *, |
48 | struct uvm_faultinfo *); |
49 | static int ttm_bo_uvm_lookup(struct ttm_bo_device *, unsigned long, |
50 | unsigned long, struct ttm_buffer_object **); |
51 | |
52 | void |
53 | ttm_bo_uvm_reference(struct uvm_object *uobj) |
54 | { |
55 | struct ttm_buffer_object *const bo = container_of(uobj, |
56 | struct ttm_buffer_object, uvmobj); |
57 | |
58 | (void)ttm_bo_reference(bo); |
59 | } |
60 | |
61 | void |
62 | ttm_bo_uvm_detach(struct uvm_object *uobj) |
63 | { |
64 | struct ttm_buffer_object *bo = container_of(uobj, |
65 | struct ttm_buffer_object, uvmobj); |
66 | |
67 | ttm_bo_unref(&bo); |
68 | KASSERT(bo == NULL); |
69 | } |
70 | |
71 | int |
72 | ttm_bo_uvm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, |
73 | struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type, |
74 | int flags) |
75 | { |
76 | struct uvm_object *const uobj = ufi->entry->object.uvm_obj; |
77 | struct ttm_buffer_object *const bo = container_of(uobj, |
78 | struct ttm_buffer_object, uvmobj); |
79 | struct ttm_bo_device *const bdev = bo->bdev; |
80 | struct ttm_mem_type_manager *man = |
81 | &bdev->man[bo->mem.mem_type]; |
82 | union { |
83 | bus_addr_t base; |
84 | struct ttm_tt *ttm; |
85 | } u; |
86 | size_t size __diagused; |
87 | voff_t uoffset; /* offset in bytes into bo */ |
88 | unsigned startpage; /* offset in pages into bo */ |
89 | unsigned i; |
90 | vm_prot_t vm_prot; /* VM_PROT_* */ |
91 | pgprot_t pgprot; /* VM_PROT_* | PMAP_* cacheability flags */ |
92 | unsigned mmapflags; |
93 | int ret; |
94 | |
95 | /* Thanks, uvm, but we don't need this lock. */ |
96 | mutex_exit(uobj->vmobjlock); |
97 | |
98 | /* Copy-on-write mappings make no sense for the graphics aperture. */ |
99 | if (UVM_ET_ISCOPYONWRITE(ufi->entry)) { |
100 | ret = -EIO; |
101 | goto out0; |
102 | } |
103 | |
104 | /* Try to lock the buffer. */ |
105 | ret = ttm_bo_reserve(bo, true, true, false, NULL); |
106 | if (ret) { |
107 | if (ret != -EBUSY) |
108 | goto out0; |
109 | /* |
110 | * It's currently locked. Unlock the fault, wait for |
111 | * it, and start over. |
112 | */ |
113 | uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL); |
114 | (void)ttm_bo_wait_unreserved(bo); |
115 | return -ERESTART; |
116 | } |
117 | |
118 | /* drm prime buffers are not mappable. XXX Catch this earlier? */ |
119 | if (bo->ttm && ISSET(bo->ttm->page_flags, TTM_PAGE_FLAG_SG)) { |
120 | ret = -EINVAL; |
121 | goto out1; |
122 | } |
123 | |
124 | /* Notify the driver of a fault if it wants. */ |
125 | if (bdev->driver->fault_reserve_notify) { |
126 | ret = (*bdev->driver->fault_reserve_notify)(bo); |
127 | if (ret) { |
128 | if (ret == -ERESTART) |
129 | ret = -EIO; |
130 | goto out1; |
131 | } |
132 | } |
133 | |
134 | ret = ttm_bo_uvm_fault_idle(bo, ufi); |
135 | if (ret) { |
136 | KASSERT(ret == -ERESTART); |
137 | /* ttm_bo_uvm_fault_idle calls uvmfault_unlockall for us. */ |
138 | ttm_bo_unreserve(bo); |
139 | /* XXX errno Linux->NetBSD */ |
140 | return -ret; |
141 | } |
142 | |
143 | ret = ttm_mem_io_lock(man, true); |
144 | if (ret) { |
145 | ret = -EIO; |
146 | goto out1; |
147 | } |
148 | ret = ttm_mem_io_reserve_vm(bo); |
149 | if (ret) { |
150 | ret = -EIO; |
151 | goto out2; |
152 | } |
153 | |
154 | vm_prot = ufi->entry->protection; |
155 | if (bo->mem.bus.is_iomem) { |
156 | u.base = (bo->mem.bus.base + bo->mem.bus.offset); |
157 | size = bo->mem.bus.size; |
158 | pgprot = ttm_io_prot(bo->mem.placement, vm_prot); |
159 | } else { |
160 | u.ttm = bo->ttm; |
161 | size = (bo->ttm->num_pages << PAGE_SHIFT); |
162 | if (ISSET(bo->mem.placement, TTM_PL_FLAG_CACHED)) |
163 | pgprot = vm_prot; |
164 | else |
165 | pgprot = ttm_io_prot(bo->mem.placement, vm_prot); |
166 | if ((*u.ttm->bdev->driver->ttm_tt_populate)(u.ttm)) { |
167 | ret = -ENOMEM; |
168 | goto out2; |
169 | } |
170 | } |
171 | |
172 | KASSERT(ufi->entry->start <= vaddr); |
173 | KASSERT((ufi->entry->offset & (PAGE_SIZE - 1)) == 0); |
174 | KASSERT(ufi->entry->offset <= size); |
175 | KASSERT((vaddr - ufi->entry->start) <= (size - ufi->entry->offset)); |
176 | KASSERT(npages <= ((size - ufi->entry->offset) - |
177 | (vaddr - ufi->entry->start))); |
178 | uoffset = (ufi->entry->offset + (vaddr - ufi->entry->start)); |
179 | startpage = (uoffset >> PAGE_SHIFT); |
180 | for (i = 0; i < npages; i++) { |
181 | paddr_t paddr; |
182 | |
183 | /* XXX PGO_ALLPAGES? */ |
184 | if (pps[i] == PGO_DONTCARE) |
185 | continue; |
186 | if (bo->mem.bus.is_iomem) { |
187 | const paddr_t cookie = bus_space_mmap(bdev->memt, |
188 | u.base, ((startpage + i) << PAGE_SHIFT), vm_prot, |
189 | 0); |
190 | |
191 | paddr = pmap_phys_address(cookie); |
192 | mmapflags = pmap_mmap_flags(cookie); |
193 | } else { |
194 | paddr = page_to_phys(u.ttm->pages[startpage + i]); |
195 | mmapflags = 0; |
196 | } |
197 | ret = -pmap_enter(ufi->orig_map->pmap, vaddr + i*PAGE_SIZE, |
198 | paddr, vm_prot, (PMAP_CANFAIL | pgprot | mmapflags)); |
199 | if (ret) |
200 | goto out3; |
201 | } |
202 | |
203 | out3: pmap_update(ufi->orig_map->pmap); |
204 | out2: ttm_mem_io_unlock(man); |
205 | out1: ttm_bo_unreserve(bo); |
206 | out0: uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL); |
207 | /* XXX errno Linux->NetBSD */ |
208 | return -ret; |
209 | } |
210 | |
211 | static int |
212 | ttm_bo_uvm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *ufi) |
213 | { |
214 | struct ttm_bo_device *const bdev = bo->bdev; |
215 | int ret = 0; |
216 | |
217 | spin_lock(&bdev->fence_lock); |
218 | if (__predict_true(!test_bit(TTM_BO_PRIV_FLAG_MOVING, |
219 | &bo->priv_flags))) |
220 | goto out; |
221 | if (ttm_bo_wait(bo, false, false, true) == 0) |
222 | goto out; |
223 | |
224 | uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL); |
225 | (void)ttm_bo_wait(bo, false, true, false); |
226 | ret = -ERESTART; |
227 | |
228 | out: spin_unlock(&bdev->fence_lock); |
229 | return ret; |
230 | } |
231 | |
232 | int |
233 | ttm_bo_mmap_object(struct ttm_bo_device *bdev, off_t offset, size_t size, |
234 | vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp, |
235 | struct file *file) |
236 | { |
237 | const unsigned long startpage = (offset >> PAGE_SHIFT); |
238 | const unsigned long npages = (size >> PAGE_SHIFT); |
239 | struct ttm_buffer_object *bo; |
240 | int ret; |
241 | |
242 | KASSERT(0 == (offset & (PAGE_SIZE - 1))); |
243 | KASSERT(0 == (size & (PAGE_SIZE - 1))); |
244 | |
245 | ret = ttm_bo_uvm_lookup(bdev, startpage, npages, &bo); |
246 | if (ret) |
247 | goto fail0; |
248 | KASSERT(drm_vma_node_start(&bo->vma_node) <= offset); |
249 | /* XXX Just assert this? */ |
250 | if (__predict_false(bdev->driver->verify_access == NULL)) { |
251 | ret = -EPERM; |
252 | goto fail1; |
253 | } |
254 | ret = (*bdev->driver->verify_access)(bo, file); |
255 | if (ret) |
256 | goto fail1; |
257 | |
258 | /* Success! */ |
259 | *uobjp = &bo->uvmobj; |
260 | *uoffsetp = (offset - |
261 | (drm_vma_node_start(&bo->vma_node) << PAGE_SHIFT)); |
262 | return 0; |
263 | |
264 | fail1: ttm_bo_unref(&bo); |
265 | fail0: KASSERT(ret); |
266 | return ret; |
267 | } |
268 | |
269 | static int |
270 | ttm_bo_uvm_lookup(struct ttm_bo_device *bdev, unsigned long startpage, |
271 | unsigned long npages, struct ttm_buffer_object **bop) |
272 | { |
273 | struct ttm_buffer_object *bo = NULL; |
274 | struct drm_vma_offset_node *node; |
275 | |
276 | drm_vma_offset_lock_lookup(&bdev->vma_manager); |
277 | node = drm_vma_offset_lookup_locked(&bdev->vma_manager, startpage, |
278 | npages); |
279 | if (node != NULL) { |
280 | bo = container_of(node, struct ttm_buffer_object, vma_node); |
281 | if (!kref_get_unless_zero(&bo->kref)) |
282 | bo = NULL; |
283 | } |
284 | drm_vma_offset_unlock_lookup(&bdev->vma_manager); |
285 | |
286 | if (bo == NULL) |
287 | return -ENOENT; |
288 | |
289 | *bop = bo; |
290 | return 0; |
291 | } |
292 | |