1 | /* $NetBSD: nouveau_engine_fifo_nve0.c,v 1.6 2015/10/26 07:12:08 mrg Exp $ */ |
2 | |
3 | /* |
4 | * Copyright 2012 Red Hat Inc. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Ben Skeggs |
25 | */ |
26 | |
27 | #include <sys/cdefs.h> |
28 | __KERNEL_RCSID(0, "$NetBSD: nouveau_engine_fifo_nve0.c,v 1.6 2015/10/26 07:12:08 mrg Exp $" ); |
29 | |
30 | #include <core/client.h> |
31 | #include <core/handle.h> |
32 | #include <core/namedb.h> |
33 | #include <core/gpuobj.h> |
34 | #include <core/engctx.h> |
35 | #include <core/event.h> |
36 | #include <core/class.h> |
37 | #include <core/enum.h> |
38 | |
39 | #include <subdev/timer.h> |
40 | #include <subdev/bar.h> |
41 | #include <subdev/fb.h> |
42 | #include <subdev/vm.h> |
43 | |
44 | #include <engine/dmaobj.h> |
45 | |
46 | #include <drm/drmP.h> /* XXX */ |
47 | #include <linux/workqueue.h> /* XXX */ |
48 | |
49 | #include "nve0.h" |
50 | |
51 | #define _(a,b) { (a), ((1ULL << (a)) | (b)) } |
52 | static const struct { |
53 | u64 subdev; |
54 | u64 mask; |
55 | } fifo_engine[] = { |
56 | _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) | |
57 | (1ULL << NVDEV_ENGINE_COPY2)), |
58 | _(NVDEV_ENGINE_VP , 0), |
59 | _(NVDEV_ENGINE_PPP , 0), |
60 | _(NVDEV_ENGINE_BSP , 0), |
61 | _(NVDEV_ENGINE_COPY0 , 0), |
62 | _(NVDEV_ENGINE_COPY1 , 0), |
63 | _(NVDEV_ENGINE_VENC , 0), |
64 | }; |
65 | #undef _ |
66 | #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine) |
67 | |
68 | struct nve0_fifo_engn { |
69 | struct nouveau_gpuobj *runlist[2]; |
70 | int cur_runlist; |
71 | #ifdef __NetBSD__ |
72 | spinlock_t lock; |
73 | drm_waitqueue_t wait; |
74 | #else |
75 | wait_queue_head_t wait; |
76 | #endif |
77 | }; |
78 | |
79 | struct nve0_fifo_priv { |
80 | struct nouveau_fifo base; |
81 | |
82 | struct work_struct fault; |
83 | u64 mask; |
84 | |
85 | struct nve0_fifo_engn engine[FIFO_ENGINE_NR]; |
86 | struct { |
87 | struct nouveau_gpuobj *mem; |
88 | struct nouveau_vma bar; |
89 | } user; |
90 | int spoon_nr; |
91 | }; |
92 | |
93 | struct nve0_fifo_base { |
94 | struct nouveau_fifo_base base; |
95 | struct nouveau_gpuobj *pgd; |
96 | struct nouveau_vm *vm; |
97 | }; |
98 | |
99 | struct nve0_fifo_chan { |
100 | struct nouveau_fifo_chan base; |
101 | u32 engine; |
102 | enum { |
103 | STOPPED, |
104 | RUNNING, |
105 | KILLED |
106 | } state; |
107 | }; |
108 | |
109 | /******************************************************************************* |
110 | * FIFO channel objects |
111 | ******************************************************************************/ |
112 | |
113 | static void |
114 | nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine) |
115 | { |
116 | struct nouveau_bar *bar = nouveau_bar(priv); |
117 | struct nve0_fifo_engn *engn = &priv->engine[engine]; |
118 | struct nouveau_gpuobj *cur; |
119 | int i, p; |
120 | |
121 | mutex_lock(&nv_subdev(priv)->mutex); |
122 | cur = engn->runlist[engn->cur_runlist]; |
123 | engn->cur_runlist = !engn->cur_runlist; |
124 | |
125 | for (i = 0, p = 0; i < priv->base.max; i++) { |
126 | struct nve0_fifo_chan *chan = (void *)priv->base.channel[i]; |
127 | if (chan && chan->state == RUNNING && chan->engine == engine) { |
128 | nv_wo32(cur, p + 0, i); |
129 | nv_wo32(cur, p + 4, 0x00000000); |
130 | p += 8; |
131 | } |
132 | } |
133 | bar->flush(bar); |
134 | |
135 | nv_wr32(priv, 0x002270, cur->addr >> 12); |
136 | nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); |
137 | |
138 | #ifdef __NetBSD__ |
139 | if (cold) { |
140 | uint count = 2000; |
141 | while (count-- > 0) { |
142 | if (!(nv_rd32(priv, 0x002284 + |
143 | (engine * 0x08)) & 0x00100000)) |
144 | break; |
145 | delay(1000); |
146 | } |
147 | if (count == 0) |
148 | nv_error(priv, "runlist %d update timeout\n" , engine); |
149 | } else { |
150 | int ret; |
151 | |
152 | spin_lock(&engn->lock); |
153 | DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &engn->wait, &engn->lock, |
154 | msecs_to_jiffies(2000), |
155 | !(nv_rd32(priv, 0x002284 + |
156 | (engine * 0x08)) & 0x00100000)); |
157 | if (ret == 0) |
158 | nv_error(priv, "runlist %d update timeout\n" , engine); |
159 | spin_unlock(&engn->lock); |
160 | } |
161 | #else |
162 | if (wait_event_timeout(engn->wait, !(nv_rd32(priv, 0x002284 + |
163 | (engine * 0x08)) & 0x00100000), |
164 | msecs_to_jiffies(2000)) == 0) |
165 | nv_error(priv, "runlist %d update timeout\n" , engine); |
166 | #endif |
167 | mutex_unlock(&nv_subdev(priv)->mutex); |
168 | } |
169 | |
170 | static int |
171 | nve0_fifo_context_attach(struct nouveau_object *parent, |
172 | struct nouveau_object *object) |
173 | { |
174 | struct nouveau_bar *bar = nouveau_bar(parent); |
175 | struct nve0_fifo_base *base = (void *)parent->parent; |
176 | struct nouveau_engctx *ectx = (void *)object; |
177 | u32 addr; |
178 | int ret; |
179 | |
180 | switch (nv_engidx(object->engine)) { |
181 | case NVDEV_ENGINE_SW : |
182 | return 0; |
183 | case NVDEV_ENGINE_COPY0: |
184 | case NVDEV_ENGINE_COPY1: |
185 | case NVDEV_ENGINE_COPY2: |
186 | nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; |
187 | return 0; |
188 | case NVDEV_ENGINE_GR : addr = 0x0210; break; |
189 | case NVDEV_ENGINE_BSP : addr = 0x0270; break; |
190 | case NVDEV_ENGINE_VP : addr = 0x0250; break; |
191 | case NVDEV_ENGINE_PPP : addr = 0x0260; break; |
192 | default: |
193 | return -EINVAL; |
194 | } |
195 | |
196 | if (!ectx->vma.node) { |
197 | ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm, |
198 | NV_MEM_ACCESS_RW, &ectx->vma); |
199 | if (ret) |
200 | return ret; |
201 | |
202 | nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; |
203 | } |
204 | |
205 | nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4); |
206 | nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset)); |
207 | bar->flush(bar); |
208 | return 0; |
209 | } |
210 | |
211 | static int |
212 | nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend, |
213 | struct nouveau_object *object) |
214 | { |
215 | struct nouveau_bar *bar = nouveau_bar(parent); |
216 | struct nve0_fifo_priv *priv = (void *)parent->engine; |
217 | struct nve0_fifo_base *base = (void *)parent->parent; |
218 | struct nve0_fifo_chan *chan = (void *)parent; |
219 | u32 addr; |
220 | |
221 | switch (nv_engidx(object->engine)) { |
222 | case NVDEV_ENGINE_SW : return 0; |
223 | case NVDEV_ENGINE_COPY0: |
224 | case NVDEV_ENGINE_COPY1: |
225 | case NVDEV_ENGINE_COPY2: addr = 0x0000; break; |
226 | case NVDEV_ENGINE_GR : addr = 0x0210; break; |
227 | case NVDEV_ENGINE_BSP : addr = 0x0270; break; |
228 | case NVDEV_ENGINE_VP : addr = 0x0250; break; |
229 | case NVDEV_ENGINE_PPP : addr = 0x0260; break; |
230 | default: |
231 | return -EINVAL; |
232 | } |
233 | |
234 | nv_wr32(priv, 0x002634, chan->base.chid); |
235 | if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { |
236 | nv_error(priv, "channel %d [%s] kick timeout\n" , |
237 | chan->base.chid, nouveau_client_name(chan)); |
238 | if (suspend) |
239 | return -EBUSY; |
240 | } |
241 | |
242 | if (addr) { |
243 | nv_wo32(base, addr + 0x00, 0x00000000); |
244 | nv_wo32(base, addr + 0x04, 0x00000000); |
245 | bar->flush(bar); |
246 | } |
247 | |
248 | return 0; |
249 | } |
250 | |
251 | static int |
252 | nve0_fifo_chan_ctor(struct nouveau_object *parent, |
253 | struct nouveau_object *engine, |
254 | struct nouveau_oclass *oclass, void *data, u32 size, |
255 | struct nouveau_object **pobject) |
256 | { |
257 | struct nouveau_bar *bar = nouveau_bar(parent); |
258 | struct nve0_fifo_priv *priv = (void *)engine; |
259 | struct nve0_fifo_base *base = (void *)parent; |
260 | struct nve0_fifo_chan *chan; |
261 | struct nve0_channel_ind_class *args = data; |
262 | u64 usermem, ioffset, ilength; |
263 | int ret, i; |
264 | |
265 | if (size < sizeof(*args)) |
266 | return -EINVAL; |
267 | |
268 | for (i = 0; i < FIFO_ENGINE_NR; i++) { |
269 | if (args->engine & (1 << i)) { |
270 | if (nouveau_engine(parent, fifo_engine[i].subdev)) { |
271 | args->engine = (1 << i); |
272 | break; |
273 | } |
274 | } |
275 | } |
276 | |
277 | if (i == FIFO_ENGINE_NR) { |
278 | nv_error(priv, "unsupported engines 0x%08x\n" , args->engine); |
279 | return -ENODEV; |
280 | } |
281 | |
282 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, |
283 | priv->user.bar.offset, 0x200, |
284 | args->pushbuf, |
285 | fifo_engine[i].mask, &chan); |
286 | *pobject = nv_object(chan); |
287 | if (ret) |
288 | return ret; |
289 | |
290 | nv_parent(chan)->context_attach = nve0_fifo_context_attach; |
291 | nv_parent(chan)->context_detach = nve0_fifo_context_detach; |
292 | chan->engine = i; |
293 | |
294 | usermem = chan->base.chid * 0x200; |
295 | ioffset = args->ioffset; |
296 | ilength = order_base_2(args->ilength / 8); |
297 | |
298 | for (i = 0; i < 0x200; i += 4) |
299 | nv_wo32(priv->user.mem, usermem + i, 0x00000000); |
300 | |
301 | nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem)); |
302 | nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem)); |
303 | nv_wo32(base, 0x10, 0x0000face); |
304 | nv_wo32(base, 0x30, 0xfffff902); |
305 | nv_wo32(base, 0x48, lower_32_bits(ioffset)); |
306 | nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16)); |
307 | nv_wo32(base, 0x84, 0x20400000); |
308 | nv_wo32(base, 0x94, 0x30000001); |
309 | nv_wo32(base, 0x9c, 0x00000100); |
310 | nv_wo32(base, 0xac, 0x0000001f); |
311 | nv_wo32(base, 0xe8, chan->base.chid); |
312 | nv_wo32(base, 0xb8, 0xf8000000); |
313 | nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */ |
314 | nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */ |
315 | bar->flush(bar); |
316 | return 0; |
317 | } |
318 | |
319 | static int |
320 | nve0_fifo_chan_init(struct nouveau_object *object) |
321 | { |
322 | struct nouveau_gpuobj *base = nv_gpuobj(object->parent); |
323 | struct nve0_fifo_priv *priv = (void *)object->engine; |
324 | struct nve0_fifo_chan *chan = (void *)object; |
325 | u32 chid = chan->base.chid; |
326 | int ret; |
327 | |
328 | ret = nouveau_fifo_channel_init(&chan->base); |
329 | if (ret) |
330 | return ret; |
331 | |
332 | nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16); |
333 | nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12); |
334 | |
335 | if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) { |
336 | nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); |
337 | nve0_fifo_runlist_update(priv, chan->engine); |
338 | nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); |
339 | } |
340 | |
341 | return 0; |
342 | } |
343 | |
344 | static int |
345 | nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend) |
346 | { |
347 | struct nve0_fifo_priv *priv = (void *)object->engine; |
348 | struct nve0_fifo_chan *chan = (void *)object; |
349 | u32 chid = chan->base.chid; |
350 | |
351 | if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) { |
352 | nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800); |
353 | nve0_fifo_runlist_update(priv, chan->engine); |
354 | } |
355 | |
356 | nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000); |
357 | return nouveau_fifo_channel_fini(&chan->base, suspend); |
358 | } |
359 | |
360 | static struct nouveau_ofuncs |
361 | nve0_fifo_ofuncs = { |
362 | .ctor = nve0_fifo_chan_ctor, |
363 | .dtor = _nouveau_fifo_channel_dtor, |
364 | .init = nve0_fifo_chan_init, |
365 | .fini = nve0_fifo_chan_fini, |
366 | .rd32 = _nouveau_fifo_channel_rd32, |
367 | .wr32 = _nouveau_fifo_channel_wr32, |
368 | }; |
369 | |
370 | static struct nouveau_oclass |
371 | nve0_fifo_sclass[] = { |
372 | { NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs }, |
373 | {} |
374 | }; |
375 | |
376 | /******************************************************************************* |
377 | * FIFO context - instmem heap and vm setup |
378 | ******************************************************************************/ |
379 | |
380 | static int |
381 | nve0_fifo_context_ctor(struct nouveau_object *parent, |
382 | struct nouveau_object *engine, |
383 | struct nouveau_oclass *oclass, void *data, u32 size, |
384 | struct nouveau_object **pobject) |
385 | { |
386 | struct nve0_fifo_base *base; |
387 | int ret; |
388 | |
389 | ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000, |
390 | 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base); |
391 | *pobject = nv_object(base); |
392 | if (ret) |
393 | return ret; |
394 | |
395 | ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0, |
396 | &base->pgd); |
397 | if (ret) |
398 | return ret; |
399 | |
400 | nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr)); |
401 | nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr)); |
402 | nv_wo32(base, 0x0208, 0xffffffff); |
403 | nv_wo32(base, 0x020c, 0x000000ff); |
404 | |
405 | ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd); |
406 | if (ret) |
407 | return ret; |
408 | |
409 | return 0; |
410 | } |
411 | |
412 | static void |
413 | nve0_fifo_context_dtor(struct nouveau_object *object) |
414 | { |
415 | struct nve0_fifo_base *base = (void *)object; |
416 | nouveau_vm_ref(NULL, &base->vm, base->pgd); |
417 | nouveau_gpuobj_ref(NULL, &base->pgd); |
418 | nouveau_fifo_context_destroy(&base->base); |
419 | } |
420 | |
421 | static struct nouveau_oclass |
422 | nve0_fifo_cclass = { |
423 | .handle = NV_ENGCTX(FIFO, 0xe0), |
424 | .ofuncs = &(struct nouveau_ofuncs) { |
425 | .ctor = nve0_fifo_context_ctor, |
426 | .dtor = nve0_fifo_context_dtor, |
427 | .init = _nouveau_fifo_context_init, |
428 | .fini = _nouveau_fifo_context_fini, |
429 | .rd32 = _nouveau_fifo_context_rd32, |
430 | .wr32 = _nouveau_fifo_context_wr32, |
431 | }, |
432 | }; |
433 | |
434 | /******************************************************************************* |
435 | * PFIFO engine |
436 | ******************************************************************************/ |
437 | |
438 | static inline int |
439 | nve0_fifo_engidx(struct nve0_fifo_priv *priv, u32 engn) |
440 | { |
441 | switch (engn) { |
442 | case NVDEV_ENGINE_GR : |
443 | case NVDEV_ENGINE_COPY2: engn = 0; break; |
444 | case NVDEV_ENGINE_BSP : engn = 1; break; |
445 | case NVDEV_ENGINE_PPP : engn = 2; break; |
446 | case NVDEV_ENGINE_VP : engn = 3; break; |
447 | case NVDEV_ENGINE_COPY0: engn = 4; break; |
448 | case NVDEV_ENGINE_COPY1: engn = 5; break; |
449 | case NVDEV_ENGINE_VENC : engn = 6; break; |
450 | default: |
451 | return -1; |
452 | } |
453 | |
454 | return engn; |
455 | } |
456 | |
457 | static inline struct nouveau_engine * |
458 | nve0_fifo_engine(struct nve0_fifo_priv *priv, u32 engn) |
459 | { |
460 | if (engn >= ARRAY_SIZE(fifo_engine)) |
461 | return NULL; |
462 | return nouveau_engine(priv, fifo_engine[engn].subdev); |
463 | } |
464 | |
465 | static void |
466 | nve0_fifo_recover_work(struct work_struct *work) |
467 | { |
468 | struct nve0_fifo_priv *priv = container_of(work, typeof(*priv), fault); |
469 | struct nouveau_object *engine; |
470 | unsigned long flags; |
471 | u32 engn, engm = 0; |
472 | u64 mask, todo; |
473 | |
474 | spin_lock_irqsave(&priv->base.lock, flags); |
475 | mask = priv->mask; |
476 | priv->mask = 0ULL; |
477 | spin_unlock_irqrestore(&priv->base.lock, flags); |
478 | |
479 | for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) |
480 | engm |= 1 << nve0_fifo_engidx(priv, engn); |
481 | nv_mask(priv, 0x002630, engm, engm); |
482 | |
483 | for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { |
484 | if ((engine = (void *)nouveau_engine(priv, engn))) { |
485 | nv_ofuncs(engine)->fini(engine, false); |
486 | WARN_ON(nv_ofuncs(engine)->init(engine)); |
487 | } |
488 | nve0_fifo_runlist_update(priv, nve0_fifo_engidx(priv, engn)); |
489 | } |
490 | |
491 | nv_wr32(priv, 0x00262c, engm); |
492 | nv_mask(priv, 0x002630, engm, 0x00000000); |
493 | } |
494 | |
495 | static void |
496 | nve0_fifo_recover(struct nve0_fifo_priv *priv, struct nouveau_engine *engine, |
497 | struct nve0_fifo_chan *chan) |
498 | { |
499 | struct nouveau_object *engobj = nv_object(engine); |
500 | u32 chid = chan->base.chid; |
501 | unsigned long flags; |
502 | |
503 | nv_error(priv, "%s engine fault on channel %d, recovering...\n" , |
504 | nv_subdev(engine)->name, chid); |
505 | |
506 | nv_mask(priv, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); |
507 | chan->state = KILLED; |
508 | |
509 | spin_lock_irqsave(&priv->base.lock, flags); |
510 | priv->mask |= 1ULL << nv_engidx(engobj); |
511 | spin_unlock_irqrestore(&priv->base.lock, flags); |
512 | schedule_work(&priv->fault); |
513 | } |
514 | |
515 | static int |
516 | nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data) |
517 | { |
518 | struct nve0_fifo_chan *chan = NULL; |
519 | struct nouveau_handle *bind; |
520 | unsigned long flags; |
521 | int ret = -EINVAL; |
522 | |
523 | spin_lock_irqsave(&priv->base.lock, flags); |
524 | if (likely(chid >= priv->base.min && chid <= priv->base.max)) |
525 | chan = (void *)priv->base.channel[chid]; |
526 | if (unlikely(!chan)) |
527 | goto out; |
528 | |
529 | bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e); |
530 | if (likely(bind)) { |
531 | if (!mthd || !nv_call(bind->object, mthd, data)) |
532 | ret = 0; |
533 | nouveau_namedb_put(bind); |
534 | } |
535 | |
536 | out: |
537 | spin_unlock_irqrestore(&priv->base.lock, flags); |
538 | return ret; |
539 | } |
540 | |
541 | static const struct nouveau_enum |
542 | nve0_fifo_bind_reason[] = { |
543 | { 0x01, "BIND_NOT_UNBOUND" }, |
544 | { 0x02, "SNOOP_WITHOUT_BAR1" }, |
545 | { 0x03, "UNBIND_WHILE_RUNNING" }, |
546 | { 0x05, "INVALID_RUNLIST" }, |
547 | { 0x06, "INVALID_CTX_TGT" }, |
548 | { 0x0b, "UNBIND_WHILE_PARKED" }, |
549 | {} |
550 | }; |
551 | |
552 | static void |
553 | nve0_fifo_intr_bind(struct nve0_fifo_priv *priv) |
554 | { |
555 | u32 intr = nv_rd32(priv, 0x00252c); |
556 | u32 code = intr & 0x000000ff; |
557 | const struct nouveau_enum *en; |
558 | char enunk[6] = "" ; |
559 | |
560 | en = nouveau_enum_find(nve0_fifo_bind_reason, code); |
561 | if (!en) |
562 | snprintf(enunk, sizeof(enunk), "UNK%02x" , code); |
563 | |
564 | nv_error(priv, "BIND_ERROR [ %s ]\n" , en ? en->name : enunk); |
565 | } |
566 | |
567 | static const struct nouveau_enum |
568 | nve0_fifo_sched_reason[] = { |
569 | { 0x0a, "CTXSW_TIMEOUT" }, |
570 | {} |
571 | }; |
572 | |
573 | static void |
574 | nve0_fifo_intr_sched_ctxsw(struct nve0_fifo_priv *priv) |
575 | { |
576 | struct nouveau_engine *engine; |
577 | struct nve0_fifo_chan *chan; |
578 | u32 engn; |
579 | |
580 | for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) { |
581 | u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04)); |
582 | u32 busy = (stat & 0x80000000); |
583 | u32 next = (stat & 0x07ff0000) >> 16; |
584 | u32 chsw = (stat & 0x00008000); |
585 | u32 save = (stat & 0x00004000); |
586 | u32 load = (stat & 0x00002000); |
587 | u32 prev = (stat & 0x000007ff); |
588 | u32 chid = load ? next : prev; |
589 | (void)save; |
590 | |
591 | if (busy && chsw) { |
592 | if (!(chan = (void *)priv->base.channel[chid])) |
593 | continue; |
594 | if (!(engine = nve0_fifo_engine(priv, engn))) |
595 | continue; |
596 | nve0_fifo_recover(priv, engine, chan); |
597 | } |
598 | } |
599 | } |
600 | |
601 | static void |
602 | nve0_fifo_intr_sched(struct nve0_fifo_priv *priv) |
603 | { |
604 | u32 intr = nv_rd32(priv, 0x00254c); |
605 | u32 code = intr & 0x000000ff; |
606 | const struct nouveau_enum *en; |
607 | char enunk[6] = "" ; |
608 | |
609 | en = nouveau_enum_find(nve0_fifo_sched_reason, code); |
610 | if (!en) |
611 | snprintf(enunk, sizeof(enunk), "UNK%02x" , code); |
612 | |
613 | nv_error(priv, "SCHED_ERROR [ %s ]\n" , en ? en->name : enunk); |
614 | |
615 | switch (code) { |
616 | case 0x0a: |
617 | nve0_fifo_intr_sched_ctxsw(priv); |
618 | break; |
619 | default: |
620 | break; |
621 | } |
622 | } |
623 | |
624 | static void |
625 | nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv) |
626 | { |
627 | u32 stat = nv_rd32(priv, 0x00256c); |
628 | nv_error(priv, "CHSW_ERROR 0x%08x\n" , stat); |
629 | nv_wr32(priv, 0x00256c, stat); |
630 | } |
631 | |
632 | static void |
633 | nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv *priv) |
634 | { |
635 | u32 stat = nv_rd32(priv, 0x00259c); |
636 | nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n" , stat); |
637 | } |
638 | |
639 | static const struct nouveau_enum |
640 | nve0_fifo_fault_engine[] = { |
641 | { 0x00, "GR" , NULL, NVDEV_ENGINE_GR }, |
642 | { 0x03, "IFB" , NULL, NVDEV_ENGINE_IFB }, |
643 | { 0x04, "BAR1" , NULL, NVDEV_SUBDEV_BAR }, |
644 | { 0x05, "BAR3" , NULL, NVDEV_SUBDEV_INSTMEM }, |
645 | { 0x07, "PBDMA0" , NULL, NVDEV_ENGINE_FIFO }, |
646 | { 0x08, "PBDMA1" , NULL, NVDEV_ENGINE_FIFO }, |
647 | { 0x09, "PBDMA2" , NULL, NVDEV_ENGINE_FIFO }, |
648 | { 0x10, "MSVLD" , NULL, NVDEV_ENGINE_BSP }, |
649 | { 0x11, "MSPPP" , NULL, NVDEV_ENGINE_PPP }, |
650 | { 0x13, "PERF" }, |
651 | { 0x14, "MSPDEC" , NULL, NVDEV_ENGINE_VP }, |
652 | { 0x15, "CE0" , NULL, NVDEV_ENGINE_COPY0 }, |
653 | { 0x16, "CE1" , NULL, NVDEV_ENGINE_COPY1 }, |
654 | { 0x17, "PMU" }, |
655 | { 0x19, "MSENC" , NULL, NVDEV_ENGINE_VENC }, |
656 | { 0x1b, "CE2" , NULL, NVDEV_ENGINE_COPY2 }, |
657 | {} |
658 | }; |
659 | |
660 | static const struct nouveau_enum |
661 | nve0_fifo_fault_reason[] = { |
662 | { 0x00, "PDE" }, |
663 | { 0x01, "PDE_SIZE" }, |
664 | { 0x02, "PTE" }, |
665 | { 0x03, "VA_LIMIT_VIOLATION" }, |
666 | { 0x04, "UNBOUND_INST_BLOCK" }, |
667 | { 0x05, "PRIV_VIOLATION" }, |
668 | { 0x06, "RO_VIOLATION" }, |
669 | { 0x07, "WO_VIOLATION" }, |
670 | { 0x08, "PITCH_MASK_VIOLATION" }, |
671 | { 0x09, "WORK_CREATION" }, |
672 | { 0x0a, "UNSUPPORTED_APERTURE" }, |
673 | { 0x0b, "COMPRESSION_FAILURE" }, |
674 | { 0x0c, "UNSUPPORTED_KIND" }, |
675 | { 0x0d, "REGION_VIOLATION" }, |
676 | { 0x0e, "BOTH_PTES_VALID" }, |
677 | { 0x0f, "INFO_TYPE_POISONED" }, |
678 | {} |
679 | }; |
680 | |
681 | static const struct nouveau_enum |
682 | nve0_fifo_fault_hubclient[] = { |
683 | { 0x00, "VIP" }, |
684 | { 0x01, "CE0" }, |
685 | { 0x02, "CE1" }, |
686 | { 0x03, "DNISO" }, |
687 | { 0x04, "FE" }, |
688 | { 0x05, "FECS" }, |
689 | { 0x06, "HOST" }, |
690 | { 0x07, "HOST_CPU" }, |
691 | { 0x08, "HOST_CPU_NB" }, |
692 | { 0x09, "ISO" }, |
693 | { 0x0a, "MMU" }, |
694 | { 0x0b, "MSPDEC" }, |
695 | { 0x0c, "MSPPP" }, |
696 | { 0x0d, "MSVLD" }, |
697 | { 0x0e, "NISO" }, |
698 | { 0x0f, "P2P" }, |
699 | { 0x10, "PD" }, |
700 | { 0x11, "PERF" }, |
701 | { 0x12, "PMU" }, |
702 | { 0x13, "RASTERTWOD" }, |
703 | { 0x14, "SCC" }, |
704 | { 0x15, "SCC_NB" }, |
705 | { 0x16, "SEC" }, |
706 | { 0x17, "SSYNC" }, |
707 | { 0x18, "GR_COPY" }, |
708 | { 0x19, "CE2" }, |
709 | { 0x1a, "XV" }, |
710 | { 0x1b, "MMU_NB" }, |
711 | { 0x1c, "MSENC" }, |
712 | { 0x1d, "DFALCON" }, |
713 | { 0x1e, "SKED" }, |
714 | { 0x1f, "AFALCON" }, |
715 | {} |
716 | }; |
717 | |
718 | static const struct nouveau_enum |
719 | nve0_fifo_fault_gpcclient[] = { |
720 | { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" }, |
721 | { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" }, |
722 | { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" }, |
723 | { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" }, |
724 | { 0x0c, "RAST" }, |
725 | { 0x0d, "GCC" }, |
726 | { 0x0e, "GPCCS" }, |
727 | { 0x0f, "PROP_0" }, |
728 | { 0x10, "PROP_1" }, |
729 | { 0x11, "PROP_2" }, |
730 | { 0x12, "PROP_3" }, |
731 | { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" }, |
732 | { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" }, |
733 | { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" }, |
734 | { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" }, |
735 | { 0x1f, "GPM" }, |
736 | { 0x20, "LTP_UTLB_0" }, |
737 | { 0x21, "LTP_UTLB_1" }, |
738 | { 0x22, "LTP_UTLB_2" }, |
739 | { 0x23, "LTP_UTLB_3" }, |
740 | { 0x24, "GPC_RGG_UTLB" }, |
741 | {} |
742 | }; |
743 | |
744 | static void |
745 | nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit) |
746 | { |
747 | u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10)); |
748 | u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10)); |
749 | u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10)); |
750 | u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10)); |
751 | u32 gpc = (stat & 0x1f000000) >> 24; |
752 | u32 client = (stat & 0x00001f00) >> 8; |
753 | u32 write = (stat & 0x00000080); |
754 | u32 hub = (stat & 0x00000040); |
755 | u32 reason = (stat & 0x0000000f); |
756 | struct nouveau_object *engctx = NULL, *object; |
757 | struct nouveau_engine *engine = NULL; |
758 | const struct nouveau_enum *er, *eu, *ec; |
759 | char erunk[6] = "" ; |
760 | char euunk[6] = "" ; |
761 | char ecunk[6] = "" ; |
762 | char gpcid[3] = "" ; |
763 | |
764 | er = nouveau_enum_find(nve0_fifo_fault_reason, reason); |
765 | if (!er) |
766 | snprintf(erunk, sizeof(erunk), "UNK%02X" , reason); |
767 | |
768 | eu = nouveau_enum_find(nve0_fifo_fault_engine, unit); |
769 | if (eu) { |
770 | switch (eu->data2) { |
771 | case NVDEV_SUBDEV_BAR: |
772 | nv_mask(priv, 0x001704, 0x00000000, 0x00000000); |
773 | break; |
774 | case NVDEV_SUBDEV_INSTMEM: |
775 | nv_mask(priv, 0x001714, 0x00000000, 0x00000000); |
776 | break; |
777 | case NVDEV_ENGINE_IFB: |
778 | nv_mask(priv, 0x001718, 0x00000000, 0x00000000); |
779 | break; |
780 | default: |
781 | engine = nouveau_engine(priv, eu->data2); |
782 | if (engine) |
783 | engctx = nouveau_engctx_get(engine, inst); |
784 | break; |
785 | } |
786 | } else { |
787 | snprintf(euunk, sizeof(euunk), "UNK%02x" , unit); |
788 | } |
789 | |
790 | if (hub) { |
791 | ec = nouveau_enum_find(nve0_fifo_fault_hubclient, client); |
792 | } else { |
793 | ec = nouveau_enum_find(nve0_fifo_fault_gpcclient, client); |
794 | snprintf(gpcid, sizeof(gpcid), "%d" , gpc); |
795 | } |
796 | |
797 | if (!ec) |
798 | snprintf(ecunk, sizeof(ecunk), "UNK%02x" , client); |
799 | |
800 | nv_error(priv, "%s fault at 0x%010" PRIx64" [%s] from %s/%s%s%s%s on " |
801 | "channel 0x%010" PRIx64" [%s]\n" , write ? "write" : "read" , |
802 | (u64)vahi << 32 | valo, er ? er->name : erunk, |
803 | eu ? eu->name : euunk, hub ? "" : "GPC" , gpcid, hub ? "" : "/" , |
804 | ec ? ec->name : ecunk, (u64)inst << 12, |
805 | nouveau_client_name(engctx)); |
806 | |
807 | object = engctx; |
808 | while (object) { |
809 | switch (nv_mclass(object)) { |
810 | case NVE0_CHANNEL_IND_CLASS: |
811 | nve0_fifo_recover(priv, engine, (void *)object); |
812 | break; |
813 | } |
814 | object = object->parent; |
815 | } |
816 | |
817 | nouveau_engctx_put(engctx); |
818 | } |
819 | |
820 | static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = { |
821 | { 0x00000001, "MEMREQ" }, |
822 | { 0x00000002, "MEMACK_TIMEOUT" }, |
823 | { 0x00000004, "MEMACK_EXTRA" }, |
824 | { 0x00000008, "MEMDAT_TIMEOUT" }, |
825 | { 0x00000010, "MEMDAT_EXTRA" }, |
826 | { 0x00000020, "MEMFLUSH" }, |
827 | { 0x00000040, "MEMOP" }, |
828 | { 0x00000080, "LBCONNECT" }, |
829 | { 0x00000100, "LBREQ" }, |
830 | { 0x00000200, "LBACK_TIMEOUT" }, |
831 | { 0x00000400, "LBACK_EXTRA" }, |
832 | { 0x00000800, "LBDAT_TIMEOUT" }, |
833 | { 0x00001000, "LBDAT_EXTRA" }, |
834 | { 0x00002000, "GPFIFO" }, |
835 | { 0x00004000, "GPPTR" }, |
836 | { 0x00008000, "GPENTRY" }, |
837 | { 0x00010000, "GPCRC" }, |
838 | { 0x00020000, "PBPTR" }, |
839 | { 0x00040000, "PBENTRY" }, |
840 | { 0x00080000, "PBCRC" }, |
841 | { 0x00100000, "XBARCONNECT" }, |
842 | { 0x00200000, "METHOD" }, |
843 | { 0x00400000, "METHODCRC" }, |
844 | { 0x00800000, "DEVICE" }, |
845 | { 0x02000000, "SEMAPHORE" }, |
846 | { 0x04000000, "ACQUIRE" }, |
847 | { 0x08000000, "PRI" }, |
848 | { 0x20000000, "NO_CTXSW_SEG" }, |
849 | { 0x40000000, "PBSEG" }, |
850 | { 0x80000000, "SIGNATURE" }, |
851 | {} |
852 | }; |
853 | |
854 | static void |
855 | nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit) |
856 | { |
857 | u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)); |
858 | u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000)); |
859 | u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000)); |
860 | u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff; |
861 | u32 subc = (addr & 0x00070000) >> 16; |
862 | u32 mthd = (addr & 0x00003ffc); |
863 | u32 show = stat; |
864 | |
865 | if (stat & 0x00800000) { |
866 | if (!nve0_fifo_swmthd(priv, chid, mthd, data)) |
867 | show &= ~0x00800000; |
868 | } |
869 | |
870 | if (show) { |
871 | nv_error(priv, "PBDMA%d:" , unit); |
872 | nouveau_bitfield_print(nve0_fifo_pbdma_intr, show); |
873 | pr_cont("\n" ); |
874 | nv_error(priv, |
875 | "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n" , |
876 | unit, chid, |
877 | nouveau_client_name_for_fifo_chid(&priv->base, chid), |
878 | subc, mthd, data); |
879 | } |
880 | |
881 | nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008); |
882 | nv_wr32(priv, 0x040108 + (unit * 0x2000), stat); |
883 | } |
884 | |
885 | static void |
886 | nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv) |
887 | { |
888 | u32 mask = nv_rd32(priv, 0x002a00); |
889 | while (mask) { |
890 | u32 engn = __ffs(mask); |
891 | #ifdef __NetBSD__ |
892 | spin_lock(&priv->engine[engn].lock); |
893 | DRM_SPIN_WAKEUP_ONE(&priv->engine[engn].wait, |
894 | &priv->engine[engn].lock); |
895 | spin_unlock(&priv->engine[engn].lock); |
896 | #else |
897 | wake_up(&priv->engine[engn].wait); |
898 | #endif |
899 | nv_wr32(priv, 0x002a00, 1 << engn); |
900 | mask &= ~(1 << engn); |
901 | } |
902 | } |
903 | |
904 | static void |
905 | nve0_fifo_intr_engine(struct nve0_fifo_priv *priv) |
906 | { |
907 | nouveau_event_trigger(priv->base.uevent, 0); |
908 | } |
909 | |
910 | static void |
911 | nve0_fifo_intr(struct nouveau_subdev *subdev) |
912 | { |
913 | struct nve0_fifo_priv *priv = (void *)subdev; |
914 | u32 mask = nv_rd32(priv, 0x002140); |
915 | u32 stat = nv_rd32(priv, 0x002100) & mask; |
916 | |
917 | if (stat & 0x00000001) { |
918 | nve0_fifo_intr_bind(priv); |
919 | nv_wr32(priv, 0x002100, 0x00000001); |
920 | stat &= ~0x00000001; |
921 | } |
922 | |
923 | if (stat & 0x00000010) { |
924 | nv_error(priv, "PIO_ERROR\n" ); |
925 | nv_wr32(priv, 0x002100, 0x00000010); |
926 | stat &= ~0x00000010; |
927 | } |
928 | |
929 | if (stat & 0x00000100) { |
930 | nve0_fifo_intr_sched(priv); |
931 | nv_wr32(priv, 0x002100, 0x00000100); |
932 | stat &= ~0x00000100; |
933 | } |
934 | |
935 | if (stat & 0x00010000) { |
936 | nve0_fifo_intr_chsw(priv); |
937 | nv_wr32(priv, 0x002100, 0x00010000); |
938 | stat &= ~0x00010000; |
939 | } |
940 | |
941 | if (stat & 0x00800000) { |
942 | nv_error(priv, "FB_FLUSH_TIMEOUT\n" ); |
943 | nv_wr32(priv, 0x002100, 0x00800000); |
944 | stat &= ~0x00800000; |
945 | } |
946 | |
947 | if (stat & 0x01000000) { |
948 | nv_error(priv, "LB_ERROR\n" ); |
949 | nv_wr32(priv, 0x002100, 0x01000000); |
950 | stat &= ~0x01000000; |
951 | } |
952 | |
953 | if (stat & 0x08000000) { |
954 | nve0_fifo_intr_dropped_fault(priv); |
955 | nv_wr32(priv, 0x002100, 0x08000000); |
956 | stat &= ~0x08000000; |
957 | } |
958 | |
959 | if (stat & 0x10000000) { |
960 | u32 mask = nv_rd32(priv, 0x00259c); |
961 | while (mask) { |
962 | u32 unit = __ffs(mask); |
963 | nve0_fifo_intr_fault(priv, unit); |
964 | nv_wr32(priv, 0x00259c, (1 << unit)); |
965 | mask &= ~(1 << unit); |
966 | } |
967 | stat &= ~0x10000000; |
968 | } |
969 | |
970 | if (stat & 0x20000000) { |
971 | u32 mask = nv_rd32(priv, 0x0025a0); |
972 | while (mask) { |
973 | u32 unit = __ffs(mask); |
974 | nve0_fifo_intr_pbdma(priv, unit); |
975 | nv_wr32(priv, 0x0025a0, (1 << unit)); |
976 | mask &= ~(1 << unit); |
977 | } |
978 | stat &= ~0x20000000; |
979 | } |
980 | |
981 | if (stat & 0x40000000) { |
982 | nve0_fifo_intr_runlist(priv); |
983 | stat &= ~0x40000000; |
984 | } |
985 | |
986 | if (stat & 0x80000000) { |
987 | nve0_fifo_intr_engine(priv); |
988 | nv_wr32(priv, 0x002100, 0x80000000); |
989 | stat &= ~0x80000000; |
990 | } |
991 | |
992 | if (stat) { |
993 | nv_error(priv, "INTR 0x%08x\n" , stat); |
994 | nv_mask(priv, 0x002140, stat, 0x00000000); |
995 | nv_wr32(priv, 0x002100, stat); |
996 | } |
997 | } |
998 | |
999 | static void |
1000 | nve0_fifo_uevent_enable(struct nouveau_event *event, int index) |
1001 | { |
1002 | struct nve0_fifo_priv *priv = event->priv; |
1003 | nv_mask(priv, 0x002140, 0x80000000, 0x80000000); |
1004 | } |
1005 | |
1006 | static void |
1007 | nve0_fifo_uevent_disable(struct nouveau_event *event, int index) |
1008 | { |
1009 | struct nve0_fifo_priv *priv = event->priv; |
1010 | nv_mask(priv, 0x002140, 0x80000000, 0x00000000); |
1011 | } |
1012 | |
1013 | int |
1014 | nve0_fifo_fini(struct nouveau_object *object, bool suspend) |
1015 | { |
1016 | struct nve0_fifo_priv *priv = (void *)object; |
1017 | int ret; |
1018 | |
1019 | ret = nouveau_fifo_fini(&priv->base, suspend); |
1020 | if (ret) |
1021 | return ret; |
1022 | |
1023 | /* allow mmu fault interrupts, even when we're not using fifo */ |
1024 | nv_mask(priv, 0x002140, 0x10000000, 0x10000000); |
1025 | return 0; |
1026 | } |
1027 | |
1028 | int |
1029 | nve0_fifo_init(struct nouveau_object *object) |
1030 | { |
1031 | struct nve0_fifo_priv *priv = (void *)object; |
1032 | int ret, i; |
1033 | |
1034 | ret = nouveau_fifo_init(&priv->base); |
1035 | if (ret) |
1036 | return ret; |
1037 | |
1038 | /* enable all available PBDMA units */ |
1039 | nv_wr32(priv, 0x000204, 0xffffffff); |
1040 | priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204)); |
1041 | nv_debug(priv, "%d PBDMA unit(s)\n" , priv->spoon_nr); |
1042 | |
1043 | /* PBDMA[n] */ |
1044 | for (i = 0; i < priv->spoon_nr; i++) { |
1045 | nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); |
1046 | nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ |
1047 | nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ |
1048 | } |
1049 | |
1050 | nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); |
1051 | |
1052 | nv_wr32(priv, 0x002100, 0xffffffff); |
1053 | nv_wr32(priv, 0x002140, 0x7fffffff); |
1054 | return 0; |
1055 | } |
1056 | |
1057 | void |
1058 | nve0_fifo_dtor(struct nouveau_object *object) |
1059 | { |
1060 | struct nve0_fifo_priv *priv = (void *)object; |
1061 | int i; |
1062 | |
1063 | nouveau_gpuobj_unmap(&priv->user.bar); |
1064 | nouveau_gpuobj_ref(NULL, &priv->user.mem); |
1065 | |
1066 | for (i = 0; i < FIFO_ENGINE_NR; i++) { |
1067 | nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[1]); |
1068 | nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[0]); |
1069 | #ifdef __NetBSD__ |
1070 | DRM_DESTROY_WAITQUEUE(&priv->engine[i].wait); |
1071 | spin_lock_destroy(&priv->engine[i].lock); |
1072 | #endif |
1073 | } |
1074 | |
1075 | nouveau_fifo_destroy(&priv->base); |
1076 | } |
1077 | |
1078 | int |
1079 | nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
1080 | struct nouveau_oclass *oclass, void *data, u32 size, |
1081 | struct nouveau_object **pobject) |
1082 | { |
1083 | struct nve0_fifo_impl *impl = (void *)oclass; |
1084 | struct nve0_fifo_priv *priv; |
1085 | int ret, i; |
1086 | |
1087 | ret = nouveau_fifo_create(parent, engine, oclass, 0, |
1088 | impl->channels - 1, &priv); |
1089 | *pobject = nv_object(priv); |
1090 | if (ret) |
1091 | return ret; |
1092 | |
1093 | INIT_WORK(&priv->fault, nve0_fifo_recover_work); |
1094 | |
1095 | for (i = 0; i < FIFO_ENGINE_NR; i++) { |
1096 | ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, |
1097 | 0, &priv->engine[i].runlist[0]); |
1098 | if (ret) |
1099 | return ret; |
1100 | |
1101 | ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, |
1102 | 0, &priv->engine[i].runlist[1]); |
1103 | if (ret) |
1104 | return ret; |
1105 | |
1106 | #ifdef __NetBSD__ |
1107 | spin_lock_init(&priv->engine[i].lock); |
1108 | DRM_INIT_WAITQUEUE(&priv->engine[i].wait, "nve0fifo" ); |
1109 | #else |
1110 | init_waitqueue_head(&priv->engine[i].wait); |
1111 | #endif |
1112 | } |
1113 | |
1114 | ret = nouveau_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200, |
1115 | 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem); |
1116 | if (ret) |
1117 | return ret; |
1118 | |
1119 | ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW, |
1120 | &priv->user.bar); |
1121 | if (ret) |
1122 | return ret; |
1123 | |
1124 | priv->base.uevent->enable = nve0_fifo_uevent_enable; |
1125 | priv->base.uevent->disable = nve0_fifo_uevent_disable; |
1126 | priv->base.uevent->priv = priv; |
1127 | |
1128 | nv_subdev(priv)->unit = 0x00000100; |
1129 | nv_subdev(priv)->intr = nve0_fifo_intr; |
1130 | nv_engine(priv)->cclass = &nve0_fifo_cclass; |
1131 | nv_engine(priv)->sclass = nve0_fifo_sclass; |
1132 | return 0; |
1133 | } |
1134 | |
1135 | struct nouveau_oclass * |
1136 | nve0_fifo_oclass = &(struct nve0_fifo_impl) { |
1137 | .base.handle = NV_ENGINE(FIFO, 0xe0), |
1138 | .base.ofuncs = &(struct nouveau_ofuncs) { |
1139 | .ctor = nve0_fifo_ctor, |
1140 | .dtor = nve0_fifo_dtor, |
1141 | .init = nve0_fifo_init, |
1142 | .fini = nve0_fifo_fini, |
1143 | }, |
1144 | .channels = 4096, |
1145 | }.base; |
1146 | |