1 | /* $NetBSD: nouveau_subdev_bar_nv50.c,v 1.1.1.1 2014/08/06 12:36:28 riastradh Exp $ */ |
2 | |
3 | /* |
4 | * Copyright 2012 Red Hat Inc. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Ben Skeggs |
25 | */ |
26 | |
27 | #include <sys/cdefs.h> |
28 | __KERNEL_RCSID(0, "$NetBSD: nouveau_subdev_bar_nv50.c,v 1.1.1.1 2014/08/06 12:36:28 riastradh Exp $" ); |
29 | |
30 | #include <core/gpuobj.h> |
31 | |
32 | #include <subdev/timer.h> |
33 | #include <subdev/fb.h> |
34 | #include <subdev/vm.h> |
35 | |
36 | #include "priv.h" |
37 | |
38 | struct nv50_bar_priv { |
39 | struct nouveau_bar base; |
40 | spinlock_t lock; |
41 | struct nouveau_gpuobj *mem; |
42 | struct nouveau_gpuobj *pad; |
43 | struct nouveau_gpuobj *pgd; |
44 | struct nouveau_vm *bar1_vm; |
45 | struct nouveau_gpuobj *bar1; |
46 | struct nouveau_vm *bar3_vm; |
47 | struct nouveau_gpuobj *bar3; |
48 | }; |
49 | |
50 | static int |
51 | nv50_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem, |
52 | u32 flags, struct nouveau_vma *vma) |
53 | { |
54 | struct nv50_bar_priv *priv = (void *)bar; |
55 | int ret; |
56 | |
57 | ret = nouveau_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma); |
58 | if (ret) |
59 | return ret; |
60 | |
61 | nouveau_vm_map(vma, mem); |
62 | return 0; |
63 | } |
64 | |
65 | static int |
66 | nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem, |
67 | u32 flags, struct nouveau_vma *vma) |
68 | { |
69 | struct nv50_bar_priv *priv = (void *)bar; |
70 | int ret; |
71 | |
72 | ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma); |
73 | if (ret) |
74 | return ret; |
75 | |
76 | nouveau_vm_map(vma, mem); |
77 | return 0; |
78 | } |
79 | |
80 | static void |
81 | nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma) |
82 | { |
83 | nouveau_vm_unmap(vma); |
84 | nouveau_vm_put(vma); |
85 | } |
86 | |
87 | static void |
88 | nv50_bar_flush(struct nouveau_bar *bar) |
89 | { |
90 | struct nv50_bar_priv *priv = (void *)bar; |
91 | unsigned long flags; |
92 | spin_lock_irqsave(&priv->lock, flags); |
93 | nv_wr32(priv, 0x00330c, 0x00000001); |
94 | if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000)) |
95 | nv_warn(priv, "flush timeout\n" ); |
96 | spin_unlock_irqrestore(&priv->lock, flags); |
97 | } |
98 | |
99 | void |
100 | nv84_bar_flush(struct nouveau_bar *bar) |
101 | { |
102 | struct nv50_bar_priv *priv = (void *)bar; |
103 | unsigned long flags; |
104 | spin_lock_irqsave(&priv->lock, flags); |
105 | nv_wr32(bar, 0x070000, 0x00000001); |
106 | if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000)) |
107 | nv_warn(priv, "flush timeout\n" ); |
108 | spin_unlock_irqrestore(&priv->lock, flags); |
109 | } |
110 | |
111 | static int |
112 | nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
113 | struct nouveau_oclass *oclass, void *data, u32 size, |
114 | struct nouveau_object **pobject) |
115 | { |
116 | struct nouveau_device *device = nv_device(parent); |
117 | struct nouveau_object *heap; |
118 | struct nouveau_vm *vm; |
119 | struct nv50_bar_priv *priv; |
120 | u64 start, limit; |
121 | int ret; |
122 | |
123 | ret = nouveau_bar_create(parent, engine, oclass, &priv); |
124 | *pobject = nv_object(priv); |
125 | if (ret) |
126 | return ret; |
127 | |
128 | ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0, |
129 | NVOBJ_FLAG_HEAP, &priv->mem); |
130 | heap = nv_object(priv->mem); |
131 | if (ret) |
132 | return ret; |
133 | |
134 | ret = nouveau_gpuobj_new(nv_object(priv), heap, |
135 | (device->chipset == 0x50) ? 0x1400 : 0x0200, |
136 | 0, 0, &priv->pad); |
137 | if (ret) |
138 | return ret; |
139 | |
140 | ret = nouveau_gpuobj_new(nv_object(priv), heap, 0x4000, 0, |
141 | 0, &priv->pgd); |
142 | if (ret) |
143 | return ret; |
144 | |
145 | /* BAR3 */ |
146 | start = 0x0100000000ULL; |
147 | limit = start + nv_device_resource_len(device, 3); |
148 | |
149 | ret = nouveau_vm_new(device, start, limit, start, &vm); |
150 | if (ret) |
151 | return ret; |
152 | |
153 | atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); |
154 | |
155 | ret = nouveau_gpuobj_new(nv_object(priv), heap, |
156 | ((limit-- - start) >> 12) * 8, 0x1000, |
157 | NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]); |
158 | vm->pgt[0].refcount[0] = 1; |
159 | if (ret) |
160 | return ret; |
161 | |
162 | ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd); |
163 | nouveau_vm_ref(NULL, &vm, NULL); |
164 | if (ret) |
165 | return ret; |
166 | |
167 | ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3); |
168 | if (ret) |
169 | return ret; |
170 | |
171 | nv_wo32(priv->bar3, 0x00, 0x7fc00000); |
172 | nv_wo32(priv->bar3, 0x04, lower_32_bits(limit)); |
173 | nv_wo32(priv->bar3, 0x08, lower_32_bits(start)); |
174 | nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 | |
175 | upper_32_bits(start)); |
176 | nv_wo32(priv->bar3, 0x10, 0x00000000); |
177 | nv_wo32(priv->bar3, 0x14, 0x00000000); |
178 | |
179 | /* BAR1 */ |
180 | start = 0x0000000000ULL; |
181 | limit = start + nv_device_resource_len(device, 1); |
182 | |
183 | ret = nouveau_vm_new(device, start, limit--, start, &vm); |
184 | if (ret) |
185 | return ret; |
186 | |
187 | atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); |
188 | |
189 | ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd); |
190 | nouveau_vm_ref(NULL, &vm, NULL); |
191 | if (ret) |
192 | return ret; |
193 | |
194 | ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1); |
195 | if (ret) |
196 | return ret; |
197 | |
198 | nv_wo32(priv->bar1, 0x00, 0x7fc00000); |
199 | nv_wo32(priv->bar1, 0x04, lower_32_bits(limit)); |
200 | nv_wo32(priv->bar1, 0x08, lower_32_bits(start)); |
201 | nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 | |
202 | upper_32_bits(start)); |
203 | nv_wo32(priv->bar1, 0x10, 0x00000000); |
204 | nv_wo32(priv->bar1, 0x14, 0x00000000); |
205 | |
206 | priv->base.alloc = nouveau_bar_alloc; |
207 | priv->base.kmap = nv50_bar_kmap; |
208 | priv->base.umap = nv50_bar_umap; |
209 | priv->base.unmap = nv50_bar_unmap; |
210 | if (device->chipset == 0x50) |
211 | priv->base.flush = nv50_bar_flush; |
212 | else |
213 | priv->base.flush = nv84_bar_flush; |
214 | spin_lock_init(&priv->lock); |
215 | return 0; |
216 | } |
217 | |
218 | static void |
219 | nv50_bar_dtor(struct nouveau_object *object) |
220 | { |
221 | struct nv50_bar_priv *priv = (void *)object; |
222 | nouveau_gpuobj_ref(NULL, &priv->bar1); |
223 | nouveau_vm_ref(NULL, &priv->bar1_vm, priv->pgd); |
224 | nouveau_gpuobj_ref(NULL, &priv->bar3); |
225 | if (priv->bar3_vm) { |
226 | nouveau_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]); |
227 | nouveau_vm_ref(NULL, &priv->bar3_vm, priv->pgd); |
228 | } |
229 | nouveau_gpuobj_ref(NULL, &priv->pgd); |
230 | nouveau_gpuobj_ref(NULL, &priv->pad); |
231 | nouveau_gpuobj_ref(NULL, &priv->mem); |
232 | nouveau_bar_destroy(&priv->base); |
233 | } |
234 | |
235 | static int |
236 | nv50_bar_init(struct nouveau_object *object) |
237 | { |
238 | struct nv50_bar_priv *priv = (void *)object; |
239 | int ret, i; |
240 | |
241 | ret = nouveau_bar_init(&priv->base); |
242 | if (ret) |
243 | return ret; |
244 | |
245 | nv_mask(priv, 0x000200, 0x00000100, 0x00000000); |
246 | nv_mask(priv, 0x000200, 0x00000100, 0x00000100); |
247 | nv_wr32(priv, 0x100c80, 0x00060001); |
248 | if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) { |
249 | nv_error(priv, "vm flush timeout\n" ); |
250 | return -EBUSY; |
251 | } |
252 | |
253 | nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12); |
254 | nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12); |
255 | nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4); |
256 | nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4); |
257 | for (i = 0; i < 8; i++) |
258 | nv_wr32(priv, 0x001900 + (i * 4), 0x00000000); |
259 | return 0; |
260 | } |
261 | |
262 | static int |
263 | nv50_bar_fini(struct nouveau_object *object, bool suspend) |
264 | { |
265 | struct nv50_bar_priv *priv = (void *)object; |
266 | return nouveau_bar_fini(&priv->base, suspend); |
267 | } |
268 | |
269 | struct nouveau_oclass |
270 | nv50_bar_oclass = { |
271 | .handle = NV_SUBDEV(BAR, 0x50), |
272 | .ofuncs = &(struct nouveau_ofuncs) { |
273 | .ctor = nv50_bar_ctor, |
274 | .dtor = nv50_bar_dtor, |
275 | .init = nv50_bar_init, |
276 | .fini = nv50_bar_fini, |
277 | }, |
278 | }; |
279 | |