1 | /* $NetBSD: nouveau_sgdma.c,v 1.1.1.2 2014/08/06 12:36:23 riastradh Exp $ */ |
2 | |
3 | #include <sys/cdefs.h> |
4 | __KERNEL_RCSID(0, "$NetBSD: nouveau_sgdma.c,v 1.1.1.2 2014/08/06 12:36:23 riastradh Exp $" ); |
5 | |
6 | #include <linux/pagemap.h> |
7 | #include <linux/slab.h> |
8 | |
9 | #include <subdev/fb.h> |
10 | |
11 | #include "nouveau_drm.h" |
12 | #include "nouveau_ttm.h" |
13 | |
14 | struct nouveau_sgdma_be { |
15 | /* this has to be the first field so populate/unpopulated in |
16 | * nouve_bo.c works properly, otherwise have to move them here |
17 | */ |
18 | struct ttm_dma_tt ttm; |
19 | struct drm_device *dev; |
20 | struct nouveau_mem *node; |
21 | }; |
22 | |
23 | static void |
24 | nouveau_sgdma_destroy(struct ttm_tt *ttm) |
25 | { |
26 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
27 | |
28 | if (ttm) { |
29 | ttm_dma_tt_fini(&nvbe->ttm); |
30 | kfree(nvbe); |
31 | } |
32 | } |
33 | |
34 | static int |
35 | nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) |
36 | { |
37 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
38 | struct nouveau_mem *node = mem->mm_node; |
39 | |
40 | if (ttm->sg) { |
41 | node->sg = ttm->sg; |
42 | node->pages = NULL; |
43 | } else { |
44 | node->sg = NULL; |
45 | node->pages = nvbe->ttm.dma_address; |
46 | } |
47 | node->size = (mem->num_pages << PAGE_SHIFT) >> 12; |
48 | |
49 | nouveau_vm_map(&node->vma[0], node); |
50 | nvbe->node = node; |
51 | return 0; |
52 | } |
53 | |
54 | static int |
55 | nv04_sgdma_unbind(struct ttm_tt *ttm) |
56 | { |
57 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
58 | nouveau_vm_unmap(&nvbe->node->vma[0]); |
59 | return 0; |
60 | } |
61 | |
62 | static struct ttm_backend_func nv04_sgdma_backend = { |
63 | .bind = nv04_sgdma_bind, |
64 | .unbind = nv04_sgdma_unbind, |
65 | .destroy = nouveau_sgdma_destroy |
66 | }; |
67 | |
68 | static int |
69 | nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) |
70 | { |
71 | struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; |
72 | struct nouveau_mem *node = mem->mm_node; |
73 | |
74 | /* noop: bound in move_notify() */ |
75 | if (ttm->sg) { |
76 | node->sg = ttm->sg; |
77 | node->pages = NULL; |
78 | } else { |
79 | node->sg = NULL; |
80 | node->pages = nvbe->ttm.dma_address; |
81 | } |
82 | node->size = (mem->num_pages << PAGE_SHIFT) >> 12; |
83 | return 0; |
84 | } |
85 | |
86 | static int |
87 | nv50_sgdma_unbind(struct ttm_tt *ttm) |
88 | { |
89 | /* noop: unbound in move_notify() */ |
90 | return 0; |
91 | } |
92 | |
93 | static struct ttm_backend_func nv50_sgdma_backend = { |
94 | .bind = nv50_sgdma_bind, |
95 | .unbind = nv50_sgdma_unbind, |
96 | .destroy = nouveau_sgdma_destroy |
97 | }; |
98 | |
99 | struct ttm_tt * |
100 | nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, |
101 | unsigned long size, uint32_t page_flags, |
102 | struct page *dummy_read_page) |
103 | { |
104 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
105 | struct nouveau_sgdma_be *nvbe; |
106 | |
107 | nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); |
108 | if (!nvbe) |
109 | return NULL; |
110 | |
111 | nvbe->dev = drm->dev; |
112 | if (nv_device(drm->device)->card_type < NV_50) |
113 | nvbe->ttm.ttm.func = &nv04_sgdma_backend; |
114 | else |
115 | nvbe->ttm.ttm.func = &nv50_sgdma_backend; |
116 | |
117 | if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) |
118 | return NULL; |
119 | return &nvbe->ttm.ttm; |
120 | } |
121 | |