1 | /* $NetBSD: nouveau_engine_fifo_nvc0.c,v 1.5 2015/10/25 21:44:16 mrg Exp $ */ |
2 | |
3 | /* |
4 | * Copyright 2012 Red Hat Inc. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Ben Skeggs |
25 | */ |
26 | |
27 | #include <sys/cdefs.h> |
28 | __KERNEL_RCSID(0, "$NetBSD: nouveau_engine_fifo_nvc0.c,v 1.5 2015/10/25 21:44:16 mrg Exp $" ); |
29 | |
30 | #include <core/client.h> |
31 | #include <core/handle.h> |
32 | #include <core/namedb.h> |
33 | #include <core/gpuobj.h> |
34 | #include <core/engctx.h> |
35 | #include <core/event.h> |
36 | #include <core/class.h> |
37 | #include <core/enum.h> |
38 | |
39 | #include <subdev/timer.h> |
40 | #include <subdev/bar.h> |
41 | #include <subdev/fb.h> |
42 | #include <subdev/vm.h> |
43 | |
44 | #include <engine/dmaobj.h> |
45 | #include <engine/fifo.h> |
46 | |
47 | #include <drm/drmP.h> /* XXX */ |
48 | #include <linux/workqueue.h> /* XXX */ |
49 | |
50 | struct nvc0_fifo_priv { |
51 | struct nouveau_fifo base; |
52 | |
53 | struct work_struct fault; |
54 | u64 mask; |
55 | |
56 | struct { |
57 | struct nouveau_gpuobj *mem[2]; |
58 | int active; |
59 | #ifdef __NetBSD__ |
60 | spinlock_t lock; |
61 | drm_waitqueue_t wait; |
62 | #else |
63 | wait_queue_head_t wait; |
64 | #endif |
65 | } runlist; |
66 | |
67 | struct { |
68 | struct nouveau_gpuobj *mem; |
69 | struct nouveau_vma bar; |
70 | } user; |
71 | int spoon_nr; |
72 | }; |
73 | |
74 | struct nvc0_fifo_base { |
75 | struct nouveau_fifo_base base; |
76 | struct nouveau_gpuobj *pgd; |
77 | struct nouveau_vm *vm; |
78 | }; |
79 | |
80 | struct nvc0_fifo_chan { |
81 | struct nouveau_fifo_chan base; |
82 | enum { |
83 | STOPPED, |
84 | RUNNING, |
85 | KILLED |
86 | } state; |
87 | }; |
88 | |
89 | /******************************************************************************* |
90 | * FIFO channel objects |
91 | ******************************************************************************/ |
92 | |
93 | static void |
94 | nvc0_fifo_runlist_update(struct nvc0_fifo_priv *priv) |
95 | { |
96 | struct nouveau_bar *bar = nouveau_bar(priv); |
97 | struct nouveau_gpuobj *cur; |
98 | int i, p; |
99 | |
100 | mutex_lock(&nv_subdev(priv)->mutex); |
101 | cur = priv->runlist.mem[priv->runlist.active]; |
102 | priv->runlist.active = !priv->runlist.active; |
103 | |
104 | for (i = 0, p = 0; i < 128; i++) { |
105 | struct nvc0_fifo_chan *chan = (void *)priv->base.channel[i]; |
106 | if (chan && chan->state == RUNNING) { |
107 | nv_wo32(cur, p + 0, i); |
108 | nv_wo32(cur, p + 4, 0x00000004); |
109 | p += 8; |
110 | } |
111 | } |
112 | bar->flush(bar); |
113 | |
114 | nv_wr32(priv, 0x002270, cur->addr >> 12); |
115 | nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3)); |
116 | |
117 | #ifdef __NetBSD__ |
118 | if (cold) { |
119 | uint count = 2000; |
120 | while (count-- > 0) { |
121 | if (!(nv_rd32(priv, 0x00227c) & 0x00100000)) |
122 | break; |
123 | delay(1000); |
124 | } |
125 | if (count == 0) |
126 | nv_error(priv, "runlist update timeout\n" ); |
127 | } else { |
128 | int ret; |
129 | |
130 | spin_lock(&priv->runlist.lock); |
131 | DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &priv->runlist.wait, |
132 | &priv->runlist.lock, msecs_to_jiffies(2000), |
133 | !(nv_rd32(priv, 0x00227c) & 0x00100000)); |
134 | if (ret == 0) |
135 | nv_error(priv, "runlist update timeout\n" ); |
136 | spin_unlock(&priv->runlist.lock); |
137 | } |
138 | #else |
139 | if (wait_event_timeout(priv->runlist.wait, |
140 | !(nv_rd32(priv, 0x00227c) & 0x00100000), |
141 | msecs_to_jiffies(2000)) == 0) |
142 | nv_error(priv, "runlist update timeout\n" ); |
143 | #endif |
144 | mutex_unlock(&nv_subdev(priv)->mutex); |
145 | } |
146 | |
147 | static int |
148 | nvc0_fifo_context_attach(struct nouveau_object *parent, |
149 | struct nouveau_object *object) |
150 | { |
151 | struct nouveau_bar *bar = nouveau_bar(parent); |
152 | struct nvc0_fifo_base *base = (void *)parent->parent; |
153 | struct nouveau_engctx *ectx = (void *)object; |
154 | u32 addr; |
155 | int ret; |
156 | |
157 | switch (nv_engidx(object->engine)) { |
158 | case NVDEV_ENGINE_SW : return 0; |
159 | case NVDEV_ENGINE_GR : addr = 0x0210; break; |
160 | case NVDEV_ENGINE_COPY0: addr = 0x0230; break; |
161 | case NVDEV_ENGINE_COPY1: addr = 0x0240; break; |
162 | case NVDEV_ENGINE_BSP : addr = 0x0270; break; |
163 | case NVDEV_ENGINE_VP : addr = 0x0250; break; |
164 | case NVDEV_ENGINE_PPP : addr = 0x0260; break; |
165 | default: |
166 | return -EINVAL; |
167 | } |
168 | |
169 | if (!ectx->vma.node) { |
170 | ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm, |
171 | NV_MEM_ACCESS_RW, &ectx->vma); |
172 | if (ret) |
173 | return ret; |
174 | |
175 | nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12; |
176 | } |
177 | |
178 | nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4); |
179 | nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset)); |
180 | bar->flush(bar); |
181 | return 0; |
182 | } |
183 | |
184 | static int |
185 | nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend, |
186 | struct nouveau_object *object) |
187 | { |
188 | struct nouveau_bar *bar = nouveau_bar(parent); |
189 | struct nvc0_fifo_priv *priv = (void *)parent->engine; |
190 | struct nvc0_fifo_base *base = (void *)parent->parent; |
191 | struct nvc0_fifo_chan *chan = (void *)parent; |
192 | u32 addr; |
193 | |
194 | switch (nv_engidx(object->engine)) { |
195 | case NVDEV_ENGINE_SW : return 0; |
196 | case NVDEV_ENGINE_GR : addr = 0x0210; break; |
197 | case NVDEV_ENGINE_COPY0: addr = 0x0230; break; |
198 | case NVDEV_ENGINE_COPY1: addr = 0x0240; break; |
199 | case NVDEV_ENGINE_BSP : addr = 0x0270; break; |
200 | case NVDEV_ENGINE_VP : addr = 0x0250; break; |
201 | case NVDEV_ENGINE_PPP : addr = 0x0260; break; |
202 | default: |
203 | return -EINVAL; |
204 | } |
205 | |
206 | nv_wr32(priv, 0x002634, chan->base.chid); |
207 | if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { |
208 | nv_error(priv, "channel %d [%s] kick timeout\n" , |
209 | chan->base.chid, nouveau_client_name(chan)); |
210 | if (suspend) |
211 | return -EBUSY; |
212 | } |
213 | |
214 | nv_wo32(base, addr + 0x00, 0x00000000); |
215 | nv_wo32(base, addr + 0x04, 0x00000000); |
216 | bar->flush(bar); |
217 | return 0; |
218 | } |
219 | |
220 | static int |
221 | nvc0_fifo_chan_ctor(struct nouveau_object *parent, |
222 | struct nouveau_object *engine, |
223 | struct nouveau_oclass *oclass, void *data, u32 size, |
224 | struct nouveau_object **pobject) |
225 | { |
226 | struct nouveau_bar *bar = nouveau_bar(parent); |
227 | struct nvc0_fifo_priv *priv = (void *)engine; |
228 | struct nvc0_fifo_base *base = (void *)parent; |
229 | struct nvc0_fifo_chan *chan; |
230 | struct nv50_channel_ind_class *args = data; |
231 | u64 usermem, ioffset, ilength; |
232 | int ret, i; |
233 | |
234 | if (size < sizeof(*args)) |
235 | return -EINVAL; |
236 | |
237 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, |
238 | priv->user.bar.offset, 0x1000, |
239 | args->pushbuf, |
240 | (1ULL << NVDEV_ENGINE_SW) | |
241 | (1ULL << NVDEV_ENGINE_GR) | |
242 | (1ULL << NVDEV_ENGINE_COPY0) | |
243 | (1ULL << NVDEV_ENGINE_COPY1) | |
244 | (1ULL << NVDEV_ENGINE_BSP) | |
245 | (1ULL << NVDEV_ENGINE_VP) | |
246 | (1ULL << NVDEV_ENGINE_PPP), &chan); |
247 | *pobject = nv_object(chan); |
248 | if (ret) |
249 | return ret; |
250 | |
251 | nv_parent(chan)->context_attach = nvc0_fifo_context_attach; |
252 | nv_parent(chan)->context_detach = nvc0_fifo_context_detach; |
253 | |
254 | usermem = chan->base.chid * 0x1000; |
255 | ioffset = args->ioffset; |
256 | ilength = order_base_2(args->ilength / 8); |
257 | |
258 | for (i = 0; i < 0x1000; i += 4) |
259 | nv_wo32(priv->user.mem, usermem + i, 0x00000000); |
260 | |
261 | nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem)); |
262 | nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem)); |
263 | nv_wo32(base, 0x10, 0x0000face); |
264 | nv_wo32(base, 0x30, 0xfffff902); |
265 | nv_wo32(base, 0x48, lower_32_bits(ioffset)); |
266 | nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16)); |
267 | nv_wo32(base, 0x54, 0x00000002); |
268 | nv_wo32(base, 0x84, 0x20400000); |
269 | nv_wo32(base, 0x94, 0x30000001); |
270 | nv_wo32(base, 0x9c, 0x00000100); |
271 | nv_wo32(base, 0xa4, 0x1f1f1f1f); |
272 | nv_wo32(base, 0xa8, 0x1f1f1f1f); |
273 | nv_wo32(base, 0xac, 0x0000001f); |
274 | nv_wo32(base, 0xb8, 0xf8000000); |
275 | nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */ |
276 | nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */ |
277 | bar->flush(bar); |
278 | return 0; |
279 | } |
280 | |
281 | static int |
282 | nvc0_fifo_chan_init(struct nouveau_object *object) |
283 | { |
284 | struct nouveau_gpuobj *base = nv_gpuobj(object->parent); |
285 | struct nvc0_fifo_priv *priv = (void *)object->engine; |
286 | struct nvc0_fifo_chan *chan = (void *)object; |
287 | u32 chid = chan->base.chid; |
288 | int ret; |
289 | |
290 | ret = nouveau_fifo_channel_init(&chan->base); |
291 | if (ret) |
292 | return ret; |
293 | |
294 | nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12); |
295 | |
296 | if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) { |
297 | nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001); |
298 | nvc0_fifo_runlist_update(priv); |
299 | } |
300 | |
301 | return 0; |
302 | } |
303 | |
304 | static void nvc0_fifo_intr_engine(struct nvc0_fifo_priv *priv); |
305 | |
306 | static int |
307 | nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend) |
308 | { |
309 | struct nvc0_fifo_priv *priv = (void *)object->engine; |
310 | struct nvc0_fifo_chan *chan = (void *)object; |
311 | u32 chid = chan->base.chid; |
312 | |
313 | if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) { |
314 | nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000); |
315 | nvc0_fifo_runlist_update(priv); |
316 | } |
317 | |
318 | nvc0_fifo_intr_engine(priv); |
319 | |
320 | nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000); |
321 | return nouveau_fifo_channel_fini(&chan->base, suspend); |
322 | } |
323 | |
324 | static struct nouveau_ofuncs |
325 | nvc0_fifo_ofuncs = { |
326 | .ctor = nvc0_fifo_chan_ctor, |
327 | .dtor = _nouveau_fifo_channel_dtor, |
328 | .init = nvc0_fifo_chan_init, |
329 | .fini = nvc0_fifo_chan_fini, |
330 | .rd32 = _nouveau_fifo_channel_rd32, |
331 | .wr32 = _nouveau_fifo_channel_wr32, |
332 | }; |
333 | |
334 | static struct nouveau_oclass |
335 | nvc0_fifo_sclass[] = { |
336 | { NVC0_CHANNEL_IND_CLASS, &nvc0_fifo_ofuncs }, |
337 | {} |
338 | }; |
339 | |
340 | /******************************************************************************* |
341 | * FIFO context - instmem heap and vm setup |
342 | ******************************************************************************/ |
343 | |
344 | static int |
345 | nvc0_fifo_context_ctor(struct nouveau_object *parent, |
346 | struct nouveau_object *engine, |
347 | struct nouveau_oclass *oclass, void *data, u32 size, |
348 | struct nouveau_object **pobject) |
349 | { |
350 | struct nvc0_fifo_base *base; |
351 | int ret; |
352 | |
353 | ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000, |
354 | 0x1000, NVOBJ_FLAG_ZERO_ALLOC | |
355 | NVOBJ_FLAG_HEAP, &base); |
356 | *pobject = nv_object(base); |
357 | if (ret) |
358 | return ret; |
359 | |
360 | ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0, |
361 | &base->pgd); |
362 | if (ret) |
363 | return ret; |
364 | |
365 | nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr)); |
366 | nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr)); |
367 | nv_wo32(base, 0x0208, 0xffffffff); |
368 | nv_wo32(base, 0x020c, 0x000000ff); |
369 | |
370 | ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd); |
371 | if (ret) |
372 | return ret; |
373 | |
374 | return 0; |
375 | } |
376 | |
377 | static void |
378 | nvc0_fifo_context_dtor(struct nouveau_object *object) |
379 | { |
380 | struct nvc0_fifo_base *base = (void *)object; |
381 | nouveau_vm_ref(NULL, &base->vm, base->pgd); |
382 | nouveau_gpuobj_ref(NULL, &base->pgd); |
383 | nouveau_fifo_context_destroy(&base->base); |
384 | } |
385 | |
386 | static struct nouveau_oclass |
387 | nvc0_fifo_cclass = { |
388 | .handle = NV_ENGCTX(FIFO, 0xc0), |
389 | .ofuncs = &(struct nouveau_ofuncs) { |
390 | .ctor = nvc0_fifo_context_ctor, |
391 | .dtor = nvc0_fifo_context_dtor, |
392 | .init = _nouveau_fifo_context_init, |
393 | .fini = _nouveau_fifo_context_fini, |
394 | .rd32 = _nouveau_fifo_context_rd32, |
395 | .wr32 = _nouveau_fifo_context_wr32, |
396 | }, |
397 | }; |
398 | |
399 | /******************************************************************************* |
400 | * PFIFO engine |
401 | ******************************************************************************/ |
402 | |
403 | static inline int |
404 | nvc0_fifo_engidx(struct nvc0_fifo_priv *priv, u32 engn) |
405 | { |
406 | switch (engn) { |
407 | case NVDEV_ENGINE_GR : engn = 0; break; |
408 | case NVDEV_ENGINE_BSP : engn = 1; break; |
409 | case NVDEV_ENGINE_PPP : engn = 2; break; |
410 | case NVDEV_ENGINE_VP : engn = 3; break; |
411 | case NVDEV_ENGINE_COPY0: engn = 4; break; |
412 | case NVDEV_ENGINE_COPY1: engn = 5; break; |
413 | default: |
414 | return -1; |
415 | } |
416 | |
417 | return engn; |
418 | } |
419 | |
420 | static inline struct nouveau_engine * |
421 | nvc0_fifo_engine(struct nvc0_fifo_priv *priv, u32 engn) |
422 | { |
423 | switch (engn) { |
424 | case 0: engn = NVDEV_ENGINE_GR; break; |
425 | case 1: engn = NVDEV_ENGINE_BSP; break; |
426 | case 2: engn = NVDEV_ENGINE_PPP; break; |
427 | case 3: engn = NVDEV_ENGINE_VP; break; |
428 | case 4: engn = NVDEV_ENGINE_COPY0; break; |
429 | case 5: engn = NVDEV_ENGINE_COPY1; break; |
430 | default: |
431 | return NULL; |
432 | } |
433 | |
434 | return nouveau_engine(priv, engn); |
435 | } |
436 | |
437 | static void |
438 | nvc0_fifo_recover_work(struct work_struct *work) |
439 | { |
440 | struct nvc0_fifo_priv *priv = container_of(work, typeof(*priv), fault); |
441 | struct nouveau_object *engine; |
442 | unsigned long flags; |
443 | u32 engn, engm = 0; |
444 | u64 mask, todo; |
445 | |
446 | spin_lock_irqsave(&priv->base.lock, flags); |
447 | mask = priv->mask; |
448 | priv->mask = 0ULL; |
449 | spin_unlock_irqrestore(&priv->base.lock, flags); |
450 | |
451 | for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) |
452 | engm |= 1 << nvc0_fifo_engidx(priv, engn); |
453 | nv_mask(priv, 0x002630, engm, engm); |
454 | |
455 | for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { |
456 | if ((engine = (void *)nouveau_engine(priv, engn))) { |
457 | nv_ofuncs(engine)->fini(engine, false); |
458 | WARN_ON(nv_ofuncs(engine)->init(engine)); |
459 | } |
460 | } |
461 | |
462 | nvc0_fifo_runlist_update(priv); |
463 | nv_wr32(priv, 0x00262c, engm); |
464 | nv_mask(priv, 0x002630, engm, 0x00000000); |
465 | } |
466 | |
467 | static void |
468 | nvc0_fifo_recover(struct nvc0_fifo_priv *priv, struct nouveau_engine *engine, |
469 | struct nvc0_fifo_chan *chan) |
470 | { |
471 | struct nouveau_object *engobj = nv_object(engine); |
472 | u32 chid = chan->base.chid; |
473 | unsigned long flags; |
474 | |
475 | nv_error(priv, "%s engine fault on channel %d, recovering...\n" , |
476 | nv_subdev(engine)->name, chid); |
477 | |
478 | nv_mask(priv, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000); |
479 | chan->state = KILLED; |
480 | |
481 | spin_lock_irqsave(&priv->base.lock, flags); |
482 | priv->mask |= 1ULL << nv_engidx(engobj); |
483 | spin_unlock_irqrestore(&priv->base.lock, flags); |
484 | schedule_work(&priv->fault); |
485 | } |
486 | |
487 | static int |
488 | nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data) |
489 | { |
490 | struct nvc0_fifo_chan *chan = NULL; |
491 | struct nouveau_handle *bind; |
492 | unsigned long flags; |
493 | int ret = -EINVAL; |
494 | |
495 | spin_lock_irqsave(&priv->base.lock, flags); |
496 | if (likely(chid >= priv->base.min && chid <= priv->base.max)) |
497 | chan = (void *)priv->base.channel[chid]; |
498 | if (unlikely(!chan)) |
499 | goto out; |
500 | |
501 | bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e); |
502 | if (likely(bind)) { |
503 | if (!mthd || !nv_call(bind->object, mthd, data)) |
504 | ret = 0; |
505 | nouveau_namedb_put(bind); |
506 | } |
507 | |
508 | out: |
509 | spin_unlock_irqrestore(&priv->base.lock, flags); |
510 | return ret; |
511 | } |
512 | |
513 | static const struct nouveau_enum |
514 | nvc0_fifo_sched_reason[] = { |
515 | { 0x0a, "CTXSW_TIMEOUT" }, |
516 | {} |
517 | }; |
518 | |
519 | static void |
520 | nvc0_fifo_intr_sched_ctxsw(struct nvc0_fifo_priv *priv) |
521 | { |
522 | struct nouveau_engine *engine; |
523 | struct nvc0_fifo_chan *chan; |
524 | u32 engn; |
525 | |
526 | for (engn = 0; engn < 6; engn++) { |
527 | u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04)); |
528 | u32 busy = (stat & 0x80000000); |
529 | u32 save = (stat & 0x00100000); /* maybe? */ |
530 | u32 unk0 = (stat & 0x00040000); |
531 | u32 unk1 = (stat & 0x00001000); |
532 | u32 chid = (stat & 0x0000007f); |
533 | (void)save; |
534 | |
535 | if (busy && unk0 && unk1) { |
536 | if (!(chan = (void *)priv->base.channel[chid])) |
537 | continue; |
538 | if (!(engine = nvc0_fifo_engine(priv, engn))) |
539 | continue; |
540 | nvc0_fifo_recover(priv, engine, chan); |
541 | } |
542 | } |
543 | } |
544 | |
545 | static void |
546 | nvc0_fifo_intr_sched(struct nvc0_fifo_priv *priv) |
547 | { |
548 | u32 intr = nv_rd32(priv, 0x00254c); |
549 | u32 code = intr & 0x000000ff; |
550 | const struct nouveau_enum *en; |
551 | char enunk[6] = "" ; |
552 | |
553 | en = nouveau_enum_find(nvc0_fifo_sched_reason, code); |
554 | if (!en) |
555 | snprintf(enunk, sizeof(enunk), "UNK%02x" , code); |
556 | |
557 | nv_error(priv, "SCHED_ERROR [ %s ]\n" , en ? en->name : enunk); |
558 | |
559 | switch (code) { |
560 | case 0x0a: |
561 | nvc0_fifo_intr_sched_ctxsw(priv); |
562 | break; |
563 | default: |
564 | break; |
565 | } |
566 | } |
567 | |
568 | static const struct nouveau_enum |
569 | nvc0_fifo_fault_engine[] = { |
570 | { 0x00, "PGRAPH" , NULL, NVDEV_ENGINE_GR }, |
571 | { 0x03, "PEEPHOLE" , NULL, NVDEV_ENGINE_IFB }, |
572 | { 0x04, "BAR1" , NULL, NVDEV_SUBDEV_BAR }, |
573 | { 0x05, "BAR3" , NULL, NVDEV_SUBDEV_INSTMEM }, |
574 | { 0x07, "PFIFO" , NULL, NVDEV_ENGINE_FIFO }, |
575 | { 0x10, "PBSP" , NULL, NVDEV_ENGINE_BSP }, |
576 | { 0x11, "PPPP" , NULL, NVDEV_ENGINE_PPP }, |
577 | { 0x13, "PCOUNTER" }, |
578 | { 0x14, "PVP" , NULL, NVDEV_ENGINE_VP }, |
579 | { 0x15, "PCOPY0" , NULL, NVDEV_ENGINE_COPY0 }, |
580 | { 0x16, "PCOPY1" , NULL, NVDEV_ENGINE_COPY1 }, |
581 | { 0x17, "PDAEMON" }, |
582 | {} |
583 | }; |
584 | |
585 | static const struct nouveau_enum |
586 | nvc0_fifo_fault_reason[] = { |
587 | { 0x00, "PT_NOT_PRESENT" }, |
588 | { 0x01, "PT_TOO_SHORT" }, |
589 | { 0x02, "PAGE_NOT_PRESENT" }, |
590 | { 0x03, "VM_LIMIT_EXCEEDED" }, |
591 | { 0x04, "NO_CHANNEL" }, |
592 | { 0x05, "PAGE_SYSTEM_ONLY" }, |
593 | { 0x06, "PAGE_READ_ONLY" }, |
594 | { 0x0a, "COMPRESSED_SYSRAM" }, |
595 | { 0x0c, "INVALID_STORAGE_TYPE" }, |
596 | {} |
597 | }; |
598 | |
599 | static const struct nouveau_enum |
600 | nvc0_fifo_fault_hubclient[] = { |
601 | { 0x01, "PCOPY0" }, |
602 | { 0x02, "PCOPY1" }, |
603 | { 0x04, "DISPATCH" }, |
604 | { 0x05, "CTXCTL" }, |
605 | { 0x06, "PFIFO" }, |
606 | { 0x07, "BAR_READ" }, |
607 | { 0x08, "BAR_WRITE" }, |
608 | { 0x0b, "PVP" }, |
609 | { 0x0c, "PPPP" }, |
610 | { 0x0d, "PBSP" }, |
611 | { 0x11, "PCOUNTER" }, |
612 | { 0x12, "PDAEMON" }, |
613 | { 0x14, "CCACHE" }, |
614 | { 0x15, "CCACHE_POST" }, |
615 | {} |
616 | }; |
617 | |
618 | static const struct nouveau_enum |
619 | nvc0_fifo_fault_gpcclient[] = { |
620 | { 0x01, "TEX" }, |
621 | { 0x0c, "ESETUP" }, |
622 | { 0x0e, "CTXCTL" }, |
623 | { 0x0f, "PROP" }, |
624 | {} |
625 | }; |
626 | |
627 | static void |
628 | nvc0_fifo_intr_fault(struct nvc0_fifo_priv *priv, int unit) |
629 | { |
630 | u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10)); |
631 | u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10)); |
632 | u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10)); |
633 | u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10)); |
634 | u32 gpc = (stat & 0x1f000000) >> 24; |
635 | u32 client = (stat & 0x00001f00) >> 8; |
636 | u32 write = (stat & 0x00000080); |
637 | u32 hub = (stat & 0x00000040); |
638 | u32 reason = (stat & 0x0000000f); |
639 | struct nouveau_object *engctx = NULL, *object; |
640 | struct nouveau_engine *engine = NULL; |
641 | const struct nouveau_enum *er, *eu, *ec; |
642 | char erunk[6] = "" ; |
643 | char euunk[6] = "" ; |
644 | char ecunk[6] = "" ; |
645 | char gpcid[3] = "" ; |
646 | |
647 | er = nouveau_enum_find(nvc0_fifo_fault_reason, reason); |
648 | if (!er) |
649 | snprintf(erunk, sizeof(erunk), "UNK%02X" , reason); |
650 | |
651 | eu = nouveau_enum_find(nvc0_fifo_fault_engine, unit); |
652 | if (eu) { |
653 | switch (eu->data2) { |
654 | case NVDEV_SUBDEV_BAR: |
655 | nv_mask(priv, 0x001704, 0x00000000, 0x00000000); |
656 | break; |
657 | case NVDEV_SUBDEV_INSTMEM: |
658 | nv_mask(priv, 0x001714, 0x00000000, 0x00000000); |
659 | break; |
660 | case NVDEV_ENGINE_IFB: |
661 | nv_mask(priv, 0x001718, 0x00000000, 0x00000000); |
662 | break; |
663 | default: |
664 | engine = nouveau_engine(priv, eu->data2); |
665 | if (engine) |
666 | engctx = nouveau_engctx_get(engine, inst); |
667 | break; |
668 | } |
669 | } else { |
670 | snprintf(euunk, sizeof(euunk), "UNK%02x" , unit); |
671 | } |
672 | |
673 | if (hub) { |
674 | ec = nouveau_enum_find(nvc0_fifo_fault_hubclient, client); |
675 | } else { |
676 | ec = nouveau_enum_find(nvc0_fifo_fault_gpcclient, client); |
677 | snprintf(gpcid, sizeof(gpcid), "%d" , gpc); |
678 | } |
679 | |
680 | if (!ec) |
681 | snprintf(ecunk, sizeof(ecunk), "UNK%02x" , client); |
682 | |
683 | nv_error(priv, "%s fault at 0x%010" PRIx64" [%s] from %s/%s%s%s%s on " |
684 | "channel 0x%010" PRIx64" [%s]\n" , write ? "write" : "read" , |
685 | (u64)vahi << 32 | valo, er ? er->name : erunk, |
686 | eu ? eu->name : euunk, hub ? "" : "GPC" , gpcid, hub ? "" : "/" , |
687 | ec ? ec->name : ecunk, (u64)inst << 12, |
688 | nouveau_client_name(engctx)); |
689 | |
690 | object = engctx; |
691 | while (object) { |
692 | switch (nv_mclass(object)) { |
693 | case NVC0_CHANNEL_IND_CLASS: |
694 | nvc0_fifo_recover(priv, engine, (void *)object); |
695 | break; |
696 | } |
697 | object = object->parent; |
698 | } |
699 | |
700 | nouveau_engctx_put(engctx); |
701 | } |
702 | |
703 | static const struct nouveau_bitfield |
704 | nvc0_fifo_pbdma_intr[] = { |
705 | /* { 0x00008000, "" } seen with null ib push */ |
706 | { 0x00200000, "ILLEGAL_MTHD" }, |
707 | { 0x00800000, "EMPTY_SUBC" }, |
708 | {} |
709 | }; |
710 | |
711 | static void |
712 | nvc0_fifo_intr_pbdma(struct nvc0_fifo_priv *priv, int unit) |
713 | { |
714 | u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)); |
715 | u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000)); |
716 | u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000)); |
717 | u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f; |
718 | u32 subc = (addr & 0x00070000) >> 16; |
719 | u32 mthd = (addr & 0x00003ffc); |
720 | u32 show = stat; |
721 | |
722 | if (stat & 0x00800000) { |
723 | if (!nvc0_fifo_swmthd(priv, chid, mthd, data)) |
724 | show &= ~0x00800000; |
725 | } |
726 | |
727 | if (show) { |
728 | nv_error(priv, "PBDMA%d:" , unit); |
729 | nouveau_bitfield_print(nvc0_fifo_pbdma_intr, show); |
730 | pr_cont("\n" ); |
731 | nv_error(priv, |
732 | "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n" , |
733 | unit, chid, |
734 | nouveau_client_name_for_fifo_chid(&priv->base, chid), |
735 | subc, mthd, data); |
736 | } |
737 | |
738 | nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008); |
739 | nv_wr32(priv, 0x040108 + (unit * 0x2000), stat); |
740 | } |
741 | |
742 | static void |
743 | nvc0_fifo_intr_runlist(struct nvc0_fifo_priv *priv) |
744 | { |
745 | u32 intr = nv_rd32(priv, 0x002a00); |
746 | |
747 | if (intr & 0x10000000) { |
748 | #ifdef __NetBSD__ |
749 | spin_lock(&priv->runlist.lock); |
750 | DRM_SPIN_WAKEUP_ONE(&priv->runlist.wait, &priv->runlist.lock); |
751 | spin_unlock(&priv->runlist.lock); |
752 | #else |
753 | wake_up(&priv->runlist.wait); |
754 | #endif |
755 | nv_wr32(priv, 0x002a00, 0x10000000); |
756 | intr &= ~0x10000000; |
757 | } |
758 | |
759 | if (intr) { |
760 | nv_error(priv, "RUNLIST 0x%08x\n" , intr); |
761 | nv_wr32(priv, 0x002a00, intr); |
762 | } |
763 | } |
764 | |
765 | static void |
766 | nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn) |
767 | { |
768 | u32 intr = nv_rd32(priv, 0x0025a8 + (engn * 0x04)); |
769 | u32 inte = nv_rd32(priv, 0x002628); |
770 | u32 unkn; |
771 | |
772 | for (unkn = 0; unkn < 8; unkn++) { |
773 | u32 ints = (intr >> (unkn * 0x04)) & inte; |
774 | if (ints & 0x1) { |
775 | nouveau_event_trigger(priv->base.uevent, 0); |
776 | ints &= ~1; |
777 | } |
778 | if (ints) { |
779 | nv_error(priv, "ENGINE %d %d %01x" , engn, unkn, ints); |
780 | nv_mask(priv, 0x002628, ints, 0); |
781 | } |
782 | } |
783 | |
784 | nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr); |
785 | } |
786 | |
787 | static void |
788 | nvc0_fifo_intr_engine(struct nvc0_fifo_priv *priv) |
789 | { |
790 | u32 mask = nv_rd32(priv, 0x0025a4); |
791 | while (mask) { |
792 | u32 unit = __ffs(mask); |
793 | nvc0_fifo_intr_engine_unit(priv, unit); |
794 | mask &= ~(1 << unit); |
795 | } |
796 | } |
797 | |
798 | static void |
799 | nvc0_fifo_intr(struct nouveau_subdev *subdev) |
800 | { |
801 | struct nvc0_fifo_priv *priv = (void *)subdev; |
802 | u32 mask = nv_rd32(priv, 0x002140); |
803 | u32 stat = nv_rd32(priv, 0x002100) & mask; |
804 | |
805 | if (stat & 0x00000001) { |
806 | u32 intr = nv_rd32(priv, 0x00252c); |
807 | nv_warn(priv, "INTR 0x00000001: 0x%08x\n" , intr); |
808 | nv_wr32(priv, 0x002100, 0x00000001); |
809 | stat &= ~0x00000001; |
810 | } |
811 | |
812 | if (stat & 0x00000100) { |
813 | nvc0_fifo_intr_sched(priv); |
814 | nv_wr32(priv, 0x002100, 0x00000100); |
815 | stat &= ~0x00000100; |
816 | } |
817 | |
818 | if (stat & 0x00010000) { |
819 | u32 intr = nv_rd32(priv, 0x00256c); |
820 | nv_warn(priv, "INTR 0x00010000: 0x%08x\n" , intr); |
821 | nv_wr32(priv, 0x002100, 0x00010000); |
822 | stat &= ~0x00010000; |
823 | } |
824 | |
825 | if (stat & 0x01000000) { |
826 | u32 intr = nv_rd32(priv, 0x00258c); |
827 | nv_warn(priv, "INTR 0x01000000: 0x%08x\n" , intr); |
828 | nv_wr32(priv, 0x002100, 0x01000000); |
829 | stat &= ~0x01000000; |
830 | } |
831 | |
832 | if (stat & 0x10000000) { |
833 | u32 mask = nv_rd32(priv, 0x00259c); |
834 | while (mask) { |
835 | u32 unit = __ffs(mask); |
836 | nvc0_fifo_intr_fault(priv, unit); |
837 | nv_wr32(priv, 0x00259c, (1 << unit)); |
838 | mask &= ~(1 << unit); |
839 | } |
840 | stat &= ~0x10000000; |
841 | } |
842 | |
843 | if (stat & 0x20000000) { |
844 | u32 mask = nv_rd32(priv, 0x0025a0); |
845 | while (mask) { |
846 | u32 unit = __ffs(mask); |
847 | nvc0_fifo_intr_pbdma(priv, unit); |
848 | nv_wr32(priv, 0x0025a0, (1 << unit)); |
849 | mask &= ~(1 << unit); |
850 | } |
851 | stat &= ~0x20000000; |
852 | } |
853 | |
854 | if (stat & 0x40000000) { |
855 | nvc0_fifo_intr_runlist(priv); |
856 | stat &= ~0x40000000; |
857 | } |
858 | |
859 | if (stat & 0x80000000) { |
860 | nvc0_fifo_intr_engine(priv); |
861 | stat &= ~0x80000000; |
862 | } |
863 | |
864 | if (stat) { |
865 | nv_error(priv, "INTR 0x%08x\n" , stat); |
866 | nv_mask(priv, 0x002140, stat, 0x00000000); |
867 | nv_wr32(priv, 0x002100, stat); |
868 | } |
869 | } |
870 | |
871 | static void |
872 | nvc0_fifo_uevent_enable(struct nouveau_event *event, int index) |
873 | { |
874 | struct nvc0_fifo_priv *priv = event->priv; |
875 | nv_mask(priv, 0x002140, 0x80000000, 0x80000000); |
876 | } |
877 | |
878 | static void |
879 | nvc0_fifo_uevent_disable(struct nouveau_event *event, int index) |
880 | { |
881 | struct nvc0_fifo_priv *priv = event->priv; |
882 | nv_mask(priv, 0x002140, 0x80000000, 0x00000000); |
883 | } |
884 | |
885 | static int |
886 | nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
887 | struct nouveau_oclass *oclass, void *data, u32 size, |
888 | struct nouveau_object **pobject) |
889 | { |
890 | struct nvc0_fifo_priv *priv; |
891 | int ret; |
892 | |
893 | ret = nouveau_fifo_create(parent, engine, oclass, 0, 127, &priv); |
894 | *pobject = nv_object(priv); |
895 | if (ret) |
896 | return ret; |
897 | |
898 | INIT_WORK(&priv->fault, nvc0_fifo_recover_work); |
899 | |
900 | #ifdef __NetBSD__ |
901 | spin_lock_init(&priv->runlist.lock); |
902 | DRM_INIT_WAITQUEUE(&priv->runlist.wait, "nvfifo" ); |
903 | #endif |
904 | |
905 | ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0, |
906 | &priv->runlist.mem[0]); |
907 | if (ret) |
908 | return ret; |
909 | |
910 | ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0, |
911 | &priv->runlist.mem[1]); |
912 | if (ret) |
913 | return ret; |
914 | |
915 | #ifndef __NetBSD__ |
916 | init_waitqueue_head(&priv->runlist.wait); |
917 | #endif |
918 | |
919 | ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0, |
920 | &priv->user.mem); |
921 | if (ret) |
922 | return ret; |
923 | |
924 | ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW, |
925 | &priv->user.bar); |
926 | if (ret) |
927 | return ret; |
928 | |
929 | priv->base.uevent->enable = nvc0_fifo_uevent_enable; |
930 | priv->base.uevent->disable = nvc0_fifo_uevent_disable; |
931 | priv->base.uevent->priv = priv; |
932 | |
933 | nv_subdev(priv)->unit = 0x00000100; |
934 | nv_subdev(priv)->intr = nvc0_fifo_intr; |
935 | nv_engine(priv)->cclass = &nvc0_fifo_cclass; |
936 | nv_engine(priv)->sclass = nvc0_fifo_sclass; |
937 | return 0; |
938 | } |
939 | |
940 | static void |
941 | nvc0_fifo_dtor(struct nouveau_object *object) |
942 | { |
943 | struct nvc0_fifo_priv *priv = (void *)object; |
944 | |
945 | nouveau_gpuobj_unmap(&priv->user.bar); |
946 | nouveau_gpuobj_ref(NULL, &priv->user.mem); |
947 | nouveau_gpuobj_ref(NULL, &priv->runlist.mem[0]); |
948 | nouveau_gpuobj_ref(NULL, &priv->runlist.mem[1]); |
949 | |
950 | #ifdef __NetBSD__ |
951 | DRM_DESTROY_WAITQUEUE(&priv->runlist.wait); |
952 | spin_lock_destroy(&priv->runlist.lock); |
953 | #endif |
954 | |
955 | nouveau_fifo_destroy(&priv->base); |
956 | } |
957 | |
958 | static int |
959 | nvc0_fifo_init(struct nouveau_object *object) |
960 | { |
961 | struct nvc0_fifo_priv *priv = (void *)object; |
962 | int ret, i; |
963 | |
964 | ret = nouveau_fifo_init(&priv->base); |
965 | if (ret) |
966 | return ret; |
967 | |
968 | nv_wr32(priv, 0x000204, 0xffffffff); |
969 | nv_wr32(priv, 0x002204, 0xffffffff); |
970 | |
971 | priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204)); |
972 | nv_debug(priv, "%d PBDMA unit(s)\n" , priv->spoon_nr); |
973 | |
974 | /* assign engines to PBDMAs */ |
975 | if (priv->spoon_nr >= 3) { |
976 | nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */ |
977 | nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */ |
978 | nv_wr32(priv, 0x002210, ~(1 << 1)); /* PPP */ |
979 | nv_wr32(priv, 0x002214, ~(1 << 1)); /* PBSP */ |
980 | nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */ |
981 | nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */ |
982 | } |
983 | |
984 | /* PBDMA[n] */ |
985 | for (i = 0; i < priv->spoon_nr; i++) { |
986 | nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); |
987 | nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ |
988 | nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ |
989 | } |
990 | |
991 | nv_mask(priv, 0x002200, 0x00000001, 0x00000001); |
992 | nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); |
993 | |
994 | nv_wr32(priv, 0x002100, 0xffffffff); |
995 | nv_wr32(priv, 0x002140, 0x7fffffff); |
996 | nv_wr32(priv, 0x002628, 0x00000001); /* ENGINE_INTR_EN */ |
997 | return 0; |
998 | } |
999 | |
1000 | struct nouveau_oclass * |
1001 | nvc0_fifo_oclass = &(struct nouveau_oclass) { |
1002 | .handle = NV_ENGINE(FIFO, 0xc0), |
1003 | .ofuncs = &(struct nouveau_ofuncs) { |
1004 | .ctor = nvc0_fifo_ctor, |
1005 | .dtor = nvc0_fifo_dtor, |
1006 | .init = nvc0_fifo_init, |
1007 | .fini = _nouveau_fifo_fini, |
1008 | }, |
1009 | }; |
1010 | |