1/* $NetBSD: nouveau_dma.c,v 1.2 2014/08/06 15:01:33 riastradh Exp $ */
2
3/*
4 * Copyright (C) 2007 Ben Skeggs.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sublicense, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial
17 * portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: nouveau_dma.c,v 1.2 2014/08/06 15:01:33 riastradh Exp $");
31
32#include <core/client.h>
33
34#include "nouveau_drm.h"
35#include "nouveau_dma.h"
36
37#ifdef __NetBSD__
38# define __iomem
39# define __force
40#endif
41
42void
43OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
44{
45 bool is_iomem;
46 u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem);
47 mem = &mem[chan->dma.cur];
48 if (is_iomem)
49 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
50 else
51 memcpy(mem, data, nr_dwords * 4);
52 chan->dma.cur += nr_dwords;
53}
54
55#ifdef __NetBSD__
56# undef __force
57# undef __iomem
58#endif
59
60/* Fetch and adjust GPU GET pointer
61 *
62 * Returns:
63 * value >= 0, the adjusted GET pointer
64 * -EINVAL if GET pointer currently outside main push buffer
65 * -EBUSY if timeout exceeded
66 */
67static inline int
68READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
69{
70 uint64_t val;
71
72 val = nv_ro32(chan->object, chan->user_get);
73 if (chan->user_get_hi)
74 val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32;
75
76 /* reset counter as long as GET is still advancing, this is
77 * to avoid misdetecting a GPU lockup if the GPU happens to
78 * just be processing an operation that takes a long time
79 */
80 if (val != *prev_get) {
81 *prev_get = val;
82 *timeout = 0;
83 }
84
85 if ((++*timeout & 0xff) == 0) {
86 udelay(1);
87 if (*timeout > 100000)
88 return -EBUSY;
89 }
90
91 if (val < chan->push.vma.offset ||
92 val > chan->push.vma.offset + (chan->dma.max << 2))
93 return -EINVAL;
94
95 return (val - chan->push.vma.offset) >> 2;
96}
97
98void
99nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
100 int delta, int length)
101{
102 struct nouveau_bo *pb = chan->push.buffer;
103 struct nouveau_vma *vma;
104 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
105 u64 offset;
106
107 vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm);
108 BUG_ON(!vma);
109 offset = vma->offset + delta;
110
111 BUG_ON(chan->dma.ib_free < 1);
112
113 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
114 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
115
116 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
117
118 mb();
119 /* Flush writes. */
120 nouveau_bo_rd32(pb, 0);
121
122 nv_wo32(chan->object, 0x8c, chan->dma.ib_put);
123 chan->dma.ib_free--;
124}
125
126static int
127nv50_dma_push_wait(struct nouveau_channel *chan, int count)
128{
129 uint32_t cnt = 0, prev_get = 0;
130
131 while (chan->dma.ib_free < count) {
132 uint32_t get = nv_ro32(chan->object, 0x88);
133 if (get != prev_get) {
134 prev_get = get;
135 cnt = 0;
136 }
137
138 if ((++cnt & 0xff) == 0) {
139 DRM_UDELAY(1);
140 if (cnt > 100000)
141 return -EBUSY;
142 }
143
144 chan->dma.ib_free = get - chan->dma.ib_put;
145 if (chan->dma.ib_free <= 0)
146 chan->dma.ib_free += chan->dma.ib_max;
147 }
148
149 return 0;
150}
151
152static int
153nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
154{
155 uint64_t prev_get = 0;
156 int ret, cnt = 0;
157
158 ret = nv50_dma_push_wait(chan, slots + 1);
159 if (unlikely(ret))
160 return ret;
161
162 while (chan->dma.free < count) {
163 int get = READ_GET(chan, &prev_get, &cnt);
164 if (unlikely(get < 0)) {
165 if (get == -EINVAL)
166 continue;
167
168 return get;
169 }
170
171 if (get <= chan->dma.cur) {
172 chan->dma.free = chan->dma.max - chan->dma.cur;
173 if (chan->dma.free >= count)
174 break;
175
176 FIRE_RING(chan);
177 do {
178 get = READ_GET(chan, &prev_get, &cnt);
179 if (unlikely(get < 0)) {
180 if (get == -EINVAL)
181 continue;
182 return get;
183 }
184 } while (get == 0);
185 chan->dma.cur = 0;
186 chan->dma.put = 0;
187 }
188
189 chan->dma.free = get - chan->dma.cur - 1;
190 }
191
192 return 0;
193}
194
195int
196nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
197{
198 uint64_t prev_get = 0;
199 int cnt = 0, get;
200
201 if (chan->dma.ib_max)
202 return nv50_dma_wait(chan, slots, size);
203
204 while (chan->dma.free < size) {
205 get = READ_GET(chan, &prev_get, &cnt);
206 if (unlikely(get == -EBUSY))
207 return -EBUSY;
208
209 /* loop until we have a usable GET pointer. the value
210 * we read from the GPU may be outside the main ring if
211 * PFIFO is processing a buffer called from the main ring,
212 * discard these values until something sensible is seen.
213 *
214 * the other case we discard GET is while the GPU is fetching
215 * from the SKIPS area, so the code below doesn't have to deal
216 * with some fun corner cases.
217 */
218 if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
219 continue;
220
221 if (get <= chan->dma.cur) {
222 /* engine is fetching behind us, or is completely
223 * idle (GET == PUT) so we have free space up until
224 * the end of the push buffer
225 *
226 * we can only hit that path once per call due to
227 * looping back to the beginning of the push buffer,
228 * we'll hit the fetching-ahead-of-us path from that
229 * point on.
230 *
231 * the *one* exception to that rule is if we read
232 * GET==PUT, in which case the below conditional will
233 * always succeed and break us out of the wait loop.
234 */
235 chan->dma.free = chan->dma.max - chan->dma.cur;
236 if (chan->dma.free >= size)
237 break;
238
239 /* not enough space left at the end of the push buffer,
240 * instruct the GPU to jump back to the start right
241 * after processing the currently pending commands.
242 */
243 OUT_RING(chan, chan->push.vma.offset | 0x20000000);
244
245 /* wait for GET to depart from the skips area.
246 * prevents writing GET==PUT and causing a race
247 * condition that causes us to think the GPU is
248 * idle when it's not.
249 */
250 do {
251 get = READ_GET(chan, &prev_get, &cnt);
252 if (unlikely(get == -EBUSY))
253 return -EBUSY;
254 if (unlikely(get == -EINVAL))
255 continue;
256 } while (get <= NOUVEAU_DMA_SKIPS);
257 WRITE_PUT(NOUVEAU_DMA_SKIPS);
258
259 /* we're now submitting commands at the start of
260 * the push buffer.
261 */
262 chan->dma.cur =
263 chan->dma.put = NOUVEAU_DMA_SKIPS;
264 }
265
266 /* engine fetching ahead of us, we have space up until the
267 * current GET pointer. the "- 1" is to ensure there's
268 * space left to emit a jump back to the beginning of the
269 * push buffer if we require it. we can never get GET == PUT
270 * here, so this is safe.
271 */
272 chan->dma.free = get - chan->dma.cur - 1;
273 }
274
275 return 0;
276}
277
278