1 | /* $NetBSD: nouveau_engine_fifo_nv04.c,v 1.1.1.1 2014/08/06 12:36:24 riastradh Exp $ */ |
2 | |
3 | /* |
4 | * Copyright 2012 Red Hat Inc. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Ben Skeggs |
25 | */ |
26 | |
27 | #include <sys/cdefs.h> |
28 | __KERNEL_RCSID(0, "$NetBSD: nouveau_engine_fifo_nv04.c,v 1.1.1.1 2014/08/06 12:36:24 riastradh Exp $" ); |
29 | |
30 | #include <core/os.h> |
31 | #include <core/class.h> |
32 | #include <core/engctx.h> |
33 | #include <core/namedb.h> |
34 | #include <core/handle.h> |
35 | #include <core/ramht.h> |
36 | #include <core/event.h> |
37 | |
38 | #include <subdev/instmem.h> |
39 | #include <subdev/instmem/nv04.h> |
40 | #include <subdev/timer.h> |
41 | #include <subdev/fb.h> |
42 | |
43 | #include <engine/fifo.h> |
44 | |
45 | #include "nv04.h" |
46 | |
47 | static struct ramfc_desc |
48 | nv04_ramfc[] = { |
49 | { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, |
50 | { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, |
51 | { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, |
52 | { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, |
53 | { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE }, |
54 | { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, |
55 | { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE }, |
56 | { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 }, |
57 | {} |
58 | }; |
59 | |
60 | /******************************************************************************* |
61 | * FIFO channel objects |
62 | ******************************************************************************/ |
63 | |
64 | int |
65 | nv04_fifo_object_attach(struct nouveau_object *parent, |
66 | struct nouveau_object *object, u32 handle) |
67 | { |
68 | struct nv04_fifo_priv *priv = (void *)parent->engine; |
69 | struct nv04_fifo_chan *chan = (void *)parent; |
70 | u32 context, chid = chan->base.chid; |
71 | int ret; |
72 | |
73 | if (nv_iclass(object, NV_GPUOBJ_CLASS)) |
74 | context = nv_gpuobj(object)->addr >> 4; |
75 | else |
76 | context = 0x00000004; /* just non-zero */ |
77 | |
78 | switch (nv_engidx(object->engine)) { |
79 | case NVDEV_ENGINE_DMAOBJ: |
80 | case NVDEV_ENGINE_SW: |
81 | context |= 0x00000000; |
82 | break; |
83 | case NVDEV_ENGINE_GR: |
84 | context |= 0x00010000; |
85 | break; |
86 | case NVDEV_ENGINE_MPEG: |
87 | context |= 0x00020000; |
88 | break; |
89 | default: |
90 | return -EINVAL; |
91 | } |
92 | |
93 | context |= 0x80000000; /* valid */ |
94 | context |= chid << 24; |
95 | |
96 | mutex_lock(&nv_subdev(priv)->mutex); |
97 | ret = nouveau_ramht_insert(priv->ramht, chid, handle, context); |
98 | mutex_unlock(&nv_subdev(priv)->mutex); |
99 | return ret; |
100 | } |
101 | |
102 | void |
103 | nv04_fifo_object_detach(struct nouveau_object *parent, int cookie) |
104 | { |
105 | struct nv04_fifo_priv *priv = (void *)parent->engine; |
106 | mutex_lock(&nv_subdev(priv)->mutex); |
107 | nouveau_ramht_remove(priv->ramht, cookie); |
108 | mutex_unlock(&nv_subdev(priv)->mutex); |
109 | } |
110 | |
111 | int |
112 | nv04_fifo_context_attach(struct nouveau_object *parent, |
113 | struct nouveau_object *object) |
114 | { |
115 | nv_engctx(object)->addr = nouveau_fifo_chan(parent)->chid; |
116 | return 0; |
117 | } |
118 | |
119 | static int |
120 | nv04_fifo_chan_ctor(struct nouveau_object *parent, |
121 | struct nouveau_object *engine, |
122 | struct nouveau_oclass *oclass, void *data, u32 size, |
123 | struct nouveau_object **pobject) |
124 | { |
125 | struct nv04_fifo_priv *priv = (void *)engine; |
126 | struct nv04_fifo_chan *chan; |
127 | struct nv03_channel_dma_class *args = data; |
128 | int ret; |
129 | |
130 | if (size < sizeof(*args)) |
131 | return -EINVAL; |
132 | |
133 | ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, |
134 | 0x10000, args->pushbuf, |
135 | (1ULL << NVDEV_ENGINE_DMAOBJ) | |
136 | (1ULL << NVDEV_ENGINE_SW) | |
137 | (1ULL << NVDEV_ENGINE_GR), &chan); |
138 | *pobject = nv_object(chan); |
139 | if (ret) |
140 | return ret; |
141 | |
142 | nv_parent(chan)->object_attach = nv04_fifo_object_attach; |
143 | nv_parent(chan)->object_detach = nv04_fifo_object_detach; |
144 | nv_parent(chan)->context_attach = nv04_fifo_context_attach; |
145 | chan->ramfc = chan->base.chid * 32; |
146 | |
147 | nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); |
148 | nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); |
149 | nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); |
150 | nv_wo32(priv->ramfc, chan->ramfc + 0x10, |
151 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | |
152 | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | |
153 | #ifdef __BIG_ENDIAN |
154 | NV_PFIFO_CACHE1_BIG_ENDIAN | |
155 | #endif |
156 | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); |
157 | return 0; |
158 | } |
159 | |
160 | void |
161 | nv04_fifo_chan_dtor(struct nouveau_object *object) |
162 | { |
163 | struct nv04_fifo_priv *priv = (void *)object->engine; |
164 | struct nv04_fifo_chan *chan = (void *)object; |
165 | struct ramfc_desc *c = priv->ramfc_desc; |
166 | |
167 | do { |
168 | nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000); |
169 | } while ((++c)->bits); |
170 | |
171 | nouveau_fifo_channel_destroy(&chan->base); |
172 | } |
173 | |
174 | int |
175 | nv04_fifo_chan_init(struct nouveau_object *object) |
176 | { |
177 | struct nv04_fifo_priv *priv = (void *)object->engine; |
178 | struct nv04_fifo_chan *chan = (void *)object; |
179 | u32 mask = 1 << chan->base.chid; |
180 | unsigned long flags; |
181 | int ret; |
182 | |
183 | ret = nouveau_fifo_channel_init(&chan->base); |
184 | if (ret) |
185 | return ret; |
186 | |
187 | spin_lock_irqsave(&priv->base.lock, flags); |
188 | nv_mask(priv, NV04_PFIFO_MODE, mask, mask); |
189 | spin_unlock_irqrestore(&priv->base.lock, flags); |
190 | return 0; |
191 | } |
192 | |
193 | int |
194 | nv04_fifo_chan_fini(struct nouveau_object *object, bool suspend) |
195 | { |
196 | struct nv04_fifo_priv *priv = (void *)object->engine; |
197 | struct nv04_fifo_chan *chan = (void *)object; |
198 | struct nouveau_gpuobj *fctx = priv->ramfc; |
199 | struct ramfc_desc *c; |
200 | unsigned long flags; |
201 | u32 data = chan->ramfc; |
202 | u32 chid; |
203 | |
204 | /* prevent fifo context switches */ |
205 | spin_lock_irqsave(&priv->base.lock, flags); |
206 | nv_wr32(priv, NV03_PFIFO_CACHES, 0); |
207 | |
208 | /* if this channel is active, replace it with a null context */ |
209 | chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; |
210 | if (chid == chan->base.chid) { |
211 | nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); |
212 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0); |
213 | nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); |
214 | |
215 | c = priv->ramfc_desc; |
216 | do { |
217 | u32 rm = ((1ULL << c->bits) - 1) << c->regs; |
218 | u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; |
219 | u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs; |
220 | u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm); |
221 | nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); |
222 | } while ((++c)->bits); |
223 | |
224 | c = priv->ramfc_desc; |
225 | do { |
226 | nv_wr32(priv, c->regp, 0x00000000); |
227 | } while ((++c)->bits); |
228 | |
229 | nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0); |
230 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0); |
231 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); |
232 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); |
233 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); |
234 | } |
235 | |
236 | /* restore normal operation, after disabling dma mode */ |
237 | nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); |
238 | nv_wr32(priv, NV03_PFIFO_CACHES, 1); |
239 | spin_unlock_irqrestore(&priv->base.lock, flags); |
240 | |
241 | return nouveau_fifo_channel_fini(&chan->base, suspend); |
242 | } |
243 | |
244 | static struct nouveau_ofuncs |
245 | nv04_fifo_ofuncs = { |
246 | .ctor = nv04_fifo_chan_ctor, |
247 | .dtor = nv04_fifo_chan_dtor, |
248 | .init = nv04_fifo_chan_init, |
249 | .fini = nv04_fifo_chan_fini, |
250 | .rd32 = _nouveau_fifo_channel_rd32, |
251 | .wr32 = _nouveau_fifo_channel_wr32, |
252 | }; |
253 | |
254 | static struct nouveau_oclass |
255 | nv04_fifo_sclass[] = { |
256 | { NV03_CHANNEL_DMA_CLASS, &nv04_fifo_ofuncs }, |
257 | {} |
258 | }; |
259 | |
260 | /******************************************************************************* |
261 | * FIFO context - basically just the instmem reserved for the channel |
262 | ******************************************************************************/ |
263 | |
264 | int |
265 | nv04_fifo_context_ctor(struct nouveau_object *parent, |
266 | struct nouveau_object *engine, |
267 | struct nouveau_oclass *oclass, void *data, u32 size, |
268 | struct nouveau_object **pobject) |
269 | { |
270 | struct nv04_fifo_base *base; |
271 | int ret; |
272 | |
273 | ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000, |
274 | 0x1000, NVOBJ_FLAG_HEAP, &base); |
275 | *pobject = nv_object(base); |
276 | if (ret) |
277 | return ret; |
278 | |
279 | return 0; |
280 | } |
281 | |
282 | static struct nouveau_oclass |
283 | nv04_fifo_cclass = { |
284 | .handle = NV_ENGCTX(FIFO, 0x04), |
285 | .ofuncs = &(struct nouveau_ofuncs) { |
286 | .ctor = nv04_fifo_context_ctor, |
287 | .dtor = _nouveau_fifo_context_dtor, |
288 | .init = _nouveau_fifo_context_init, |
289 | .fini = _nouveau_fifo_context_fini, |
290 | .rd32 = _nouveau_fifo_context_rd32, |
291 | .wr32 = _nouveau_fifo_context_wr32, |
292 | }, |
293 | }; |
294 | |
295 | /******************************************************************************* |
296 | * PFIFO engine |
297 | ******************************************************************************/ |
298 | |
299 | void |
300 | nv04_fifo_pause(struct nouveau_fifo *pfifo, unsigned long *pflags) |
301 | __acquires(priv->base.lock) |
302 | { |
303 | struct nv04_fifo_priv *priv = (void *)pfifo; |
304 | unsigned long flags; |
305 | |
306 | spin_lock_irqsave(&priv->base.lock, flags); |
307 | *pflags = flags; |
308 | |
309 | nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000); |
310 | nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); |
311 | |
312 | /* in some cases the puller may be left in an inconsistent state |
313 | * if you try to stop it while it's busy translating handles. |
314 | * sometimes you get a CACHE_ERROR, sometimes it just fails |
315 | * silently; sending incorrect instance offsets to PGRAPH after |
316 | * it's started up again. |
317 | * |
318 | * to avoid this, we invalidate the most recently calculated |
319 | * instance. |
320 | */ |
321 | if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0, |
322 | NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000)) |
323 | nv_warn(priv, "timeout idling puller\n" ); |
324 | |
325 | if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) & |
326 | NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) |
327 | nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); |
328 | |
329 | nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000); |
330 | } |
331 | |
332 | void |
333 | nv04_fifo_start(struct nouveau_fifo *pfifo, unsigned long *pflags) |
334 | __releases(priv->base.lock) |
335 | { |
336 | struct nv04_fifo_priv *priv = (void *)pfifo; |
337 | unsigned long flags = *pflags; |
338 | |
339 | nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); |
340 | nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001); |
341 | |
342 | spin_unlock_irqrestore(&priv->base.lock, flags); |
343 | } |
344 | |
345 | static const char * |
346 | nv_dma_state_err(u32 state) |
347 | { |
348 | static const char * const desc[] = { |
349 | "NONE" , "CALL_SUBR_ACTIVE" , "INVALID_MTHD" , "RET_SUBR_INACTIVE" , |
350 | "INVALID_CMD" , "IB_EMPTY" /* NV50+ */, "MEM_FAULT" , "UNK" |
351 | }; |
352 | return desc[(state >> 29) & 0x7]; |
353 | } |
354 | |
355 | static bool |
356 | nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data) |
357 | { |
358 | struct nv04_fifo_chan *chan = NULL; |
359 | struct nouveau_handle *bind; |
360 | const int subc = (addr >> 13) & 0x7; |
361 | const int mthd = addr & 0x1ffc; |
362 | bool handled = false; |
363 | unsigned long flags; |
364 | u32 engine; |
365 | |
366 | spin_lock_irqsave(&priv->base.lock, flags); |
367 | if (likely(chid >= priv->base.min && chid <= priv->base.max)) |
368 | chan = (void *)priv->base.channel[chid]; |
369 | if (unlikely(!chan)) |
370 | goto out; |
371 | |
372 | switch (mthd) { |
373 | case 0x0000: |
374 | bind = nouveau_namedb_get(nv_namedb(chan), data); |
375 | if (unlikely(!bind)) |
376 | break; |
377 | |
378 | if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) { |
379 | engine = 0x0000000f << (subc * 4); |
380 | chan->subc[subc] = data; |
381 | handled = true; |
382 | |
383 | nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0); |
384 | } |
385 | |
386 | nouveau_namedb_put(bind); |
387 | break; |
388 | default: |
389 | engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE); |
390 | if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) |
391 | break; |
392 | |
393 | bind = nouveau_namedb_get(nv_namedb(chan), chan->subc[subc]); |
394 | if (likely(bind)) { |
395 | if (!nv_call(bind->object, mthd, data)) |
396 | handled = true; |
397 | nouveau_namedb_put(bind); |
398 | } |
399 | break; |
400 | } |
401 | |
402 | out: |
403 | spin_unlock_irqrestore(&priv->base.lock, flags); |
404 | return handled; |
405 | } |
406 | |
407 | static void |
408 | nv04_fifo_cache_error(struct nouveau_device *device, |
409 | struct nv04_fifo_priv *priv, u32 chid, u32 get) |
410 | { |
411 | u32 mthd, data; |
412 | int ptr; |
413 | |
414 | /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my |
415 | * G80 chips, but CACHE1 isn't big enough for this much data.. Tests |
416 | * show that it wraps around to the start at GET=0x800.. No clue as to |
417 | * why.. |
418 | */ |
419 | ptr = (get & 0x7ff) >> 2; |
420 | |
421 | if (device->card_type < NV_40) { |
422 | mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr)); |
423 | data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr)); |
424 | } else { |
425 | mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr)); |
426 | data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr)); |
427 | } |
428 | |
429 | if (!nv04_fifo_swmthd(priv, chid, mthd, data)) { |
430 | const char *client_name = |
431 | nouveau_client_name_for_fifo_chid(&priv->base, chid); |
432 | nv_error(priv, |
433 | "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n" , |
434 | chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc, |
435 | data); |
436 | } |
437 | |
438 | nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0); |
439 | nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); |
440 | |
441 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, |
442 | nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1); |
443 | nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); |
444 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, |
445 | nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1); |
446 | nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0); |
447 | |
448 | nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, |
449 | nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); |
450 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); |
451 | } |
452 | |
453 | static void |
454 | nv04_fifo_dma_pusher(struct nouveau_device *device, struct nv04_fifo_priv *priv, |
455 | u32 chid) |
456 | { |
457 | const char *client_name; |
458 | u32 dma_get = nv_rd32(priv, 0x003244); |
459 | u32 dma_put = nv_rd32(priv, 0x003240); |
460 | u32 push = nv_rd32(priv, 0x003220); |
461 | u32 state = nv_rd32(priv, 0x003228); |
462 | |
463 | client_name = nouveau_client_name_for_fifo_chid(&priv->base, chid); |
464 | |
465 | if (device->card_type == NV_50) { |
466 | u32 ho_get = nv_rd32(priv, 0x003328); |
467 | u32 ho_put = nv_rd32(priv, 0x003320); |
468 | u32 ib_get = nv_rd32(priv, 0x003334); |
469 | u32 ib_put = nv_rd32(priv, 0x003330); |
470 | |
471 | nv_error(priv, |
472 | "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n" , |
473 | chid, client_name, ho_get, dma_get, ho_put, dma_put, |
474 | ib_get, ib_put, state, nv_dma_state_err(state), push); |
475 | |
476 | /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ |
477 | nv_wr32(priv, 0x003364, 0x00000000); |
478 | if (dma_get != dma_put || ho_get != ho_put) { |
479 | nv_wr32(priv, 0x003244, dma_put); |
480 | nv_wr32(priv, 0x003328, ho_put); |
481 | } else |
482 | if (ib_get != ib_put) |
483 | nv_wr32(priv, 0x003334, ib_put); |
484 | } else { |
485 | nv_error(priv, |
486 | "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n" , |
487 | chid, client_name, dma_get, dma_put, state, |
488 | nv_dma_state_err(state), push); |
489 | |
490 | if (dma_get != dma_put) |
491 | nv_wr32(priv, 0x003244, dma_put); |
492 | } |
493 | |
494 | nv_wr32(priv, 0x003228, 0x00000000); |
495 | nv_wr32(priv, 0x003220, 0x00000001); |
496 | nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); |
497 | } |
498 | |
499 | void |
500 | nv04_fifo_intr(struct nouveau_subdev *subdev) |
501 | { |
502 | struct nouveau_device *device = nv_device(subdev); |
503 | struct nv04_fifo_priv *priv = (void *)subdev; |
504 | uint32_t status, reassign; |
505 | int cnt = 0; |
506 | |
507 | reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; |
508 | while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { |
509 | uint32_t chid, get; |
510 | |
511 | nv_wr32(priv, NV03_PFIFO_CACHES, 0); |
512 | |
513 | chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; |
514 | get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); |
515 | |
516 | if (status & NV_PFIFO_INTR_CACHE_ERROR) { |
517 | nv04_fifo_cache_error(device, priv, chid, get); |
518 | status &= ~NV_PFIFO_INTR_CACHE_ERROR; |
519 | } |
520 | |
521 | if (status & NV_PFIFO_INTR_DMA_PUSHER) { |
522 | nv04_fifo_dma_pusher(device, priv, chid); |
523 | status &= ~NV_PFIFO_INTR_DMA_PUSHER; |
524 | } |
525 | |
526 | if (status & NV_PFIFO_INTR_SEMAPHORE) { |
527 | uint32_t sem; |
528 | |
529 | status &= ~NV_PFIFO_INTR_SEMAPHORE; |
530 | nv_wr32(priv, NV03_PFIFO_INTR_0, |
531 | NV_PFIFO_INTR_SEMAPHORE); |
532 | |
533 | sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); |
534 | nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); |
535 | |
536 | nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); |
537 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); |
538 | } |
539 | |
540 | if (device->card_type == NV_50) { |
541 | if (status & 0x00000010) { |
542 | status &= ~0x00000010; |
543 | nv_wr32(priv, 0x002100, 0x00000010); |
544 | } |
545 | |
546 | if (status & 0x40000000) { |
547 | nouveau_event_trigger(priv->base.uevent, 0); |
548 | nv_wr32(priv, 0x002100, 0x40000000); |
549 | status &= ~0x40000000; |
550 | } |
551 | } |
552 | |
553 | if (status) { |
554 | nv_warn(priv, "unknown intr 0x%08x, ch %d\n" , |
555 | status, chid); |
556 | nv_wr32(priv, NV03_PFIFO_INTR_0, status); |
557 | status = 0; |
558 | } |
559 | |
560 | nv_wr32(priv, NV03_PFIFO_CACHES, reassign); |
561 | } |
562 | |
563 | if (status) { |
564 | nv_error(priv, "still angry after %d spins, halt\n" , cnt); |
565 | nv_wr32(priv, 0x002140, 0); |
566 | nv_wr32(priv, 0x000140, 0); |
567 | } |
568 | |
569 | nv_wr32(priv, 0x000100, 0x00000100); |
570 | } |
571 | |
572 | static int |
573 | nv04_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, |
574 | struct nouveau_oclass *oclass, void *data, u32 size, |
575 | struct nouveau_object **pobject) |
576 | { |
577 | struct nv04_instmem_priv *imem = nv04_instmem(parent); |
578 | struct nv04_fifo_priv *priv; |
579 | int ret; |
580 | |
581 | ret = nouveau_fifo_create(parent, engine, oclass, 0, 15, &priv); |
582 | *pobject = nv_object(priv); |
583 | if (ret) |
584 | return ret; |
585 | |
586 | nouveau_ramht_ref(imem->ramht, &priv->ramht); |
587 | nouveau_gpuobj_ref(imem->ramro, &priv->ramro); |
588 | nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc); |
589 | |
590 | nv_subdev(priv)->unit = 0x00000100; |
591 | nv_subdev(priv)->intr = nv04_fifo_intr; |
592 | nv_engine(priv)->cclass = &nv04_fifo_cclass; |
593 | nv_engine(priv)->sclass = nv04_fifo_sclass; |
594 | priv->base.pause = nv04_fifo_pause; |
595 | priv->base.start = nv04_fifo_start; |
596 | priv->ramfc_desc = nv04_ramfc; |
597 | return 0; |
598 | } |
599 | |
600 | void |
601 | nv04_fifo_dtor(struct nouveau_object *object) |
602 | { |
603 | struct nv04_fifo_priv *priv = (void *)object; |
604 | nouveau_gpuobj_ref(NULL, &priv->ramfc); |
605 | nouveau_gpuobj_ref(NULL, &priv->ramro); |
606 | nouveau_ramht_ref(NULL, &priv->ramht); |
607 | nouveau_fifo_destroy(&priv->base); |
608 | } |
609 | |
610 | int |
611 | nv04_fifo_init(struct nouveau_object *object) |
612 | { |
613 | struct nv04_fifo_priv *priv = (void *)object; |
614 | int ret; |
615 | |
616 | ret = nouveau_fifo_init(&priv->base); |
617 | if (ret) |
618 | return ret; |
619 | |
620 | nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff); |
621 | nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); |
622 | |
623 | nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | |
624 | ((priv->ramht->bits - 9) << 16) | |
625 | (priv->ramht->base.addr >> 8)); |
626 | nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8); |
627 | nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8); |
628 | |
629 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); |
630 | |
631 | nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff); |
632 | nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff); |
633 | |
634 | nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); |
635 | nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); |
636 | nv_wr32(priv, NV03_PFIFO_CACHES, 1); |
637 | return 0; |
638 | } |
639 | |
640 | struct nouveau_oclass * |
641 | nv04_fifo_oclass = &(struct nouveau_oclass) { |
642 | .handle = NV_ENGINE(FIFO, 0x04), |
643 | .ofuncs = &(struct nouveau_ofuncs) { |
644 | .ctor = nv04_fifo_ctor, |
645 | .dtor = nv04_fifo_dtor, |
646 | .init = nv04_fifo_init, |
647 | .fini = _nouveau_fifo_fini, |
648 | }, |
649 | }; |
650 | |