1 | /* $NetBSD: nouveau_ttm.c,v 1.4 2015/04/03 01:09:42 riastradh Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA, |
5 | * All Rights Reserved. |
6 | * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA, |
7 | * All Rights Reserved. |
8 | * |
9 | * Permission is hereby granted, free of charge, to any person obtaining a |
10 | * copy of this software and associated documentation files (the "Software"), |
11 | * to deal in the Software without restriction, including without limitation |
12 | * the rights to use, copy, modify, merge, publish, distribute, sub license, |
13 | * and/or sell copies of the Software, and to permit persons to whom the |
14 | * Software is furnished to do so, subject to the following conditions: |
15 | * |
16 | * The above copyright notice and this permission notice (including the |
17 | * next paragraph) shall be included in all copies or substantial portions |
18 | * of the Software. |
19 | * |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
23 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
24 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
25 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
26 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
27 | */ |
28 | |
29 | #include <sys/cdefs.h> |
30 | __KERNEL_RCSID(0, "$NetBSD: nouveau_ttm.c,v 1.4 2015/04/03 01:09:42 riastradh Exp $" ); |
31 | |
32 | #include <subdev/fb.h> |
33 | #include <subdev/vm.h> |
34 | #include <subdev/instmem.h> |
35 | |
36 | #include "nouveau_drm.h" |
37 | #include "nouveau_ttm.h" |
38 | #include "nouveau_gem.h" |
39 | |
40 | static int |
41 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) |
42 | { |
43 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
44 | struct nouveau_fb *pfb = nouveau_fb(drm->device); |
45 | man->priv = pfb; |
46 | return 0; |
47 | } |
48 | |
49 | static int |
50 | nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) |
51 | { |
52 | man->priv = NULL; |
53 | return 0; |
54 | } |
55 | |
56 | static inline void |
57 | nouveau_mem_node_cleanup(struct nouveau_mem *node) |
58 | { |
59 | if (node->vma[0].node) { |
60 | nouveau_vm_unmap(&node->vma[0]); |
61 | nouveau_vm_put(&node->vma[0]); |
62 | } |
63 | |
64 | if (node->vma[1].node) { |
65 | nouveau_vm_unmap(&node->vma[1]); |
66 | nouveau_vm_put(&node->vma[1]); |
67 | } |
68 | } |
69 | |
70 | static void |
71 | nouveau_vram_manager_del(struct ttm_mem_type_manager *man, |
72 | struct ttm_mem_reg *mem) |
73 | { |
74 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
75 | struct nouveau_fb *pfb = nouveau_fb(drm->device); |
76 | nouveau_mem_node_cleanup(mem->mm_node); |
77 | pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node); |
78 | } |
79 | |
80 | static int |
81 | nouveau_vram_manager_new(struct ttm_mem_type_manager *man, |
82 | struct ttm_buffer_object *bo, |
83 | struct ttm_placement *placement, |
84 | struct ttm_mem_reg *mem) |
85 | { |
86 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
87 | struct nouveau_fb *pfb = nouveau_fb(drm->device); |
88 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
89 | struct nouveau_mem *node; |
90 | u32 size_nc = 0; |
91 | int ret; |
92 | |
93 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) |
94 | size_nc = 1 << nvbo->page_shift; |
95 | |
96 | ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT, |
97 | mem->page_alignment << PAGE_SHIFT, size_nc, |
98 | (nvbo->tile_flags >> 8) & 0x3ff, &node); |
99 | if (ret) { |
100 | mem->mm_node = NULL; |
101 | return (ret == -ENOSPC) ? 0 : ret; |
102 | } |
103 | |
104 | node->page_shift = nvbo->page_shift; |
105 | |
106 | mem->mm_node = node; |
107 | mem->start = node->offset >> PAGE_SHIFT; |
108 | return 0; |
109 | } |
110 | |
111 | static void |
112 | nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) |
113 | { |
114 | struct nouveau_fb *pfb = man->priv; |
115 | struct nouveau_mm *mm = &pfb->vram; |
116 | struct nouveau_mm_node *r; |
117 | u32 total = 0, free = 0; |
118 | |
119 | mutex_lock(&nv_subdev(pfb)->mutex); |
120 | list_for_each_entry(r, &mm->nodes, nl_entry) { |
121 | printk(KERN_DEBUG "%s %d: 0x%010" PRIx64" 0x%010" PRIx64"\n" , |
122 | prefix, r->type, ((u64)r->offset << 12), |
123 | (((u64)r->offset + r->length) << 12)); |
124 | |
125 | total += r->length; |
126 | if (!r->type) |
127 | free += r->length; |
128 | } |
129 | mutex_unlock(&nv_subdev(pfb)->mutex); |
130 | |
131 | printk(KERN_DEBUG "%s total: 0x%010" PRIx64" free: 0x%010" PRIx64"\n" , |
132 | prefix, (u64)total << 12, (u64)free << 12); |
133 | printk(KERN_DEBUG "%s block: 0x%08x\n" , |
134 | prefix, mm->block_size << 12); |
135 | } |
136 | |
137 | const struct ttm_mem_type_manager_func nouveau_vram_manager = { |
138 | nouveau_vram_manager_init, |
139 | nouveau_vram_manager_fini, |
140 | nouveau_vram_manager_new, |
141 | nouveau_vram_manager_del, |
142 | nouveau_vram_manager_debug |
143 | }; |
144 | |
145 | static int |
146 | nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) |
147 | { |
148 | return 0; |
149 | } |
150 | |
151 | static int |
152 | nouveau_gart_manager_fini(struct ttm_mem_type_manager *man) |
153 | { |
154 | return 0; |
155 | } |
156 | |
157 | static void |
158 | nouveau_gart_manager_del(struct ttm_mem_type_manager *man, |
159 | struct ttm_mem_reg *mem) |
160 | { |
161 | nouveau_mem_node_cleanup(mem->mm_node); |
162 | kfree(mem->mm_node); |
163 | mem->mm_node = NULL; |
164 | } |
165 | |
166 | static int |
167 | nouveau_gart_manager_new(struct ttm_mem_type_manager *man, |
168 | struct ttm_buffer_object *bo, |
169 | struct ttm_placement *placement, |
170 | struct ttm_mem_reg *mem) |
171 | { |
172 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
173 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
174 | struct nouveau_mem *node; |
175 | |
176 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
177 | if (!node) |
178 | return -ENOMEM; |
179 | |
180 | node->page_shift = 12; |
181 | |
182 | switch (nv_device(drm->device)->card_type) { |
183 | case NV_50: |
184 | if (nv_device(drm->device)->chipset != 0x50) |
185 | node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; |
186 | break; |
187 | case NV_C0: |
188 | case NV_D0: |
189 | case NV_E0: |
190 | node->memtype = (nvbo->tile_flags & 0xff00) >> 8; |
191 | break; |
192 | default: |
193 | break; |
194 | } |
195 | |
196 | mem->mm_node = node; |
197 | mem->start = 0; |
198 | return 0; |
199 | } |
200 | |
201 | static void |
202 | nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) |
203 | { |
204 | } |
205 | |
206 | const struct ttm_mem_type_manager_func nouveau_gart_manager = { |
207 | nouveau_gart_manager_init, |
208 | nouveau_gart_manager_fini, |
209 | nouveau_gart_manager_new, |
210 | nouveau_gart_manager_del, |
211 | nouveau_gart_manager_debug |
212 | }; |
213 | |
214 | #include <core/subdev/vm/nv04.h> |
215 | static int |
216 | nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) |
217 | { |
218 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
219 | struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device); |
220 | struct nv04_vmmgr_priv *priv = (void *)vmm; |
221 | struct nouveau_vm *vm = NULL; |
222 | nouveau_vm_ref(priv->vm, &vm, NULL); |
223 | man->priv = vm; |
224 | return 0; |
225 | } |
226 | |
227 | static int |
228 | nv04_gart_manager_fini(struct ttm_mem_type_manager *man) |
229 | { |
230 | struct nouveau_vm *vm = man->priv; |
231 | nouveau_vm_ref(NULL, &vm, NULL); |
232 | man->priv = NULL; |
233 | return 0; |
234 | } |
235 | |
236 | static void |
237 | nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) |
238 | { |
239 | struct nouveau_mem *node = mem->mm_node; |
240 | if (node->vma[0].node) |
241 | nouveau_vm_put(&node->vma[0]); |
242 | kfree(mem->mm_node); |
243 | mem->mm_node = NULL; |
244 | } |
245 | |
246 | static int |
247 | nv04_gart_manager_new(struct ttm_mem_type_manager *man, |
248 | struct ttm_buffer_object *bo, |
249 | struct ttm_placement *placement, |
250 | struct ttm_mem_reg *mem) |
251 | { |
252 | struct nouveau_mem *node; |
253 | int ret; |
254 | |
255 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
256 | if (!node) |
257 | return -ENOMEM; |
258 | |
259 | node->page_shift = 12; |
260 | |
261 | ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift, |
262 | NV_MEM_ACCESS_RW, &node->vma[0]); |
263 | if (ret) { |
264 | kfree(node); |
265 | return ret; |
266 | } |
267 | |
268 | mem->mm_node = node; |
269 | mem->start = node->vma[0].offset >> PAGE_SHIFT; |
270 | return 0; |
271 | } |
272 | |
273 | static void |
274 | nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) |
275 | { |
276 | } |
277 | |
278 | const struct ttm_mem_type_manager_func nv04_gart_manager = { |
279 | nv04_gart_manager_init, |
280 | nv04_gart_manager_fini, |
281 | nv04_gart_manager_new, |
282 | nv04_gart_manager_del, |
283 | nv04_gart_manager_debug |
284 | }; |
285 | |
286 | #ifdef __NetBSD__ |
287 | |
288 | int |
289 | nouveau_ttm_mmap_object(struct drm_device *dev, off_t offset, size_t size, |
290 | vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp, |
291 | struct file *file) |
292 | { |
293 | struct nouveau_drm *const drm = nouveau_drm(dev); |
294 | |
295 | KASSERT(0 == (offset & (PAGE_SIZE - 1))); |
296 | |
297 | if (__predict_false((offset >> PAGE_SHIFT) < DRM_FILE_PAGE_OFFSET)) |
298 | return drm_mmap_object(dev, offset, size, prot, uobjp, |
299 | uoffsetp, file); |
300 | else |
301 | return ttm_bo_mmap_object(&drm->ttm.bdev, offset, size, prot, |
302 | uobjp, uoffsetp, file); |
303 | } |
304 | |
305 | #else |
306 | |
307 | int |
308 | nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) |
309 | { |
310 | struct drm_file *file_priv = filp->private_data; |
311 | struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); |
312 | |
313 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) |
314 | return drm_mmap(filp, vma); |
315 | |
316 | return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); |
317 | } |
318 | |
319 | #endif |
320 | |
321 | static int |
322 | nouveau_ttm_mem_global_init(struct drm_global_reference *ref) |
323 | { |
324 | return ttm_mem_global_init(ref->object); |
325 | } |
326 | |
327 | static void |
328 | nouveau_ttm_mem_global_release(struct drm_global_reference *ref) |
329 | { |
330 | ttm_mem_global_release(ref->object); |
331 | } |
332 | |
333 | int |
334 | nouveau_ttm_global_init(struct nouveau_drm *drm) |
335 | { |
336 | struct drm_global_reference *global_ref; |
337 | int ret; |
338 | |
339 | global_ref = &drm->ttm.mem_global_ref; |
340 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
341 | global_ref->size = sizeof(struct ttm_mem_global); |
342 | global_ref->init = &nouveau_ttm_mem_global_init; |
343 | global_ref->release = &nouveau_ttm_mem_global_release; |
344 | |
345 | ret = drm_global_item_ref(global_ref); |
346 | if (unlikely(ret != 0)) { |
347 | DRM_ERROR("Failed setting up TTM memory accounting\n" ); |
348 | drm->ttm.mem_global_ref.release = NULL; |
349 | return ret; |
350 | } |
351 | |
352 | drm->ttm.bo_global_ref.mem_glob = global_ref->object; |
353 | global_ref = &drm->ttm.bo_global_ref.ref; |
354 | global_ref->global_type = DRM_GLOBAL_TTM_BO; |
355 | global_ref->size = sizeof(struct ttm_bo_global); |
356 | global_ref->init = &ttm_bo_global_init; |
357 | global_ref->release = &ttm_bo_global_release; |
358 | |
359 | ret = drm_global_item_ref(global_ref); |
360 | if (unlikely(ret != 0)) { |
361 | DRM_ERROR("Failed setting up TTM BO subsystem\n" ); |
362 | drm_global_item_unref(&drm->ttm.mem_global_ref); |
363 | drm->ttm.mem_global_ref.release = NULL; |
364 | return ret; |
365 | } |
366 | |
367 | return 0; |
368 | } |
369 | |
370 | void |
371 | nouveau_ttm_global_release(struct nouveau_drm *drm) |
372 | { |
373 | if (drm->ttm.mem_global_ref.release == NULL) |
374 | return; |
375 | |
376 | drm_global_item_unref(&drm->ttm.bo_global_ref.ref); |
377 | drm_global_item_unref(&drm->ttm.mem_global_ref); |
378 | drm->ttm.mem_global_ref.release = NULL; |
379 | } |
380 | |
381 | int |
382 | nouveau_ttm_init(struct nouveau_drm *drm) |
383 | { |
384 | struct drm_device *dev = drm->dev; |
385 | struct nouveau_device *device = nv_device(drm->device); |
386 | u32 bits; |
387 | int ret; |
388 | |
389 | bits = nouveau_vmmgr(drm->device)->dma_bits; |
390 | if (nv_device_is_pci(device)) { |
391 | if (drm->agp.stat == ENABLED || |
392 | !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) |
393 | bits = 32; |
394 | |
395 | #ifdef __NetBSD__ |
396 | ret = drm_limit_dma_space(dev, 0, DMA_BIT_MASK(bits)); |
397 | if (ret) |
398 | return ret; |
399 | #else |
400 | ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits)); |
401 | if (ret) |
402 | return ret; |
403 | |
404 | ret = pci_set_consistent_dma_mask(dev->pdev, |
405 | DMA_BIT_MASK(bits)); |
406 | if (ret) |
407 | pci_set_consistent_dma_mask(dev->pdev, |
408 | DMA_BIT_MASK(32)); |
409 | #endif |
410 | } |
411 | |
412 | ret = nouveau_ttm_global_init(drm); |
413 | if (ret) |
414 | return ret; |
415 | |
416 | ret = ttm_bo_device_init(&drm->ttm.bdev, |
417 | drm->ttm.bo_global_ref.ref.object, |
418 | &nouveau_bo_driver, |
419 | #ifdef __NetBSD__ |
420 | dev->bst, |
421 | dev->dmat, |
422 | #else |
423 | dev->anon_inode->i_mapping, |
424 | #endif |
425 | DRM_FILE_PAGE_OFFSET, |
426 | bits <= 32 ? true : false); |
427 | if (ret) { |
428 | NV_ERROR(drm, "error initialising bo driver, %d\n" , ret); |
429 | return ret; |
430 | } |
431 | |
432 | /* VRAM init */ |
433 | drm->gem.vram_available = nouveau_fb(drm->device)->ram->size; |
434 | drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved; |
435 | |
436 | ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, |
437 | drm->gem.vram_available >> PAGE_SHIFT); |
438 | if (ret) { |
439 | NV_ERROR(drm, "VRAM mm init failed, %d\n" , ret); |
440 | return ret; |
441 | } |
442 | |
443 | drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(device, 1), |
444 | nv_device_resource_len(device, 1)); |
445 | |
446 | #ifdef __NetBSD__ |
447 | pmap_pv_track(nv_device_resource_start(device, 1), |
448 | nv_device_resource_len(device, 1)); |
449 | #endif |
450 | |
451 | /* GART init */ |
452 | if (drm->agp.stat != ENABLED) { |
453 | drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit; |
454 | } else { |
455 | drm->gem.gart_available = drm->agp.size; |
456 | } |
457 | |
458 | ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT, |
459 | drm->gem.gart_available >> PAGE_SHIFT); |
460 | if (ret) { |
461 | NV_ERROR(drm, "GART mm init failed, %d\n" , ret); |
462 | return ret; |
463 | } |
464 | |
465 | NV_INFO(drm, "VRAM: %d MiB\n" , (u32)(drm->gem.vram_available >> 20)); |
466 | NV_INFO(drm, "GART: %d MiB\n" , (u32)(drm->gem.gart_available >> 20)); |
467 | return 0; |
468 | } |
469 | |
470 | void |
471 | nouveau_ttm_fini(struct nouveau_drm *drm) |
472 | { |
473 | mutex_lock(&drm->dev->struct_mutex); |
474 | ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); |
475 | ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); |
476 | mutex_unlock(&drm->dev->struct_mutex); |
477 | |
478 | ttm_bo_device_release(&drm->ttm.bdev); |
479 | |
480 | nouveau_ttm_global_release(drm); |
481 | |
482 | arch_phys_wc_del(drm->ttm.mtrr); |
483 | drm->ttm.mtrr = 0; |
484 | |
485 | #ifdef __NetBSD__ |
486 | pmap_pv_untrack(nv_device_resource_start(nv_device(drm->device), 1), |
487 | nv_device_resource_len(nv_device(drm->device), 1)); |
488 | #endif |
489 | } |
490 | |