1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
24 | * |
25 | */ |
26 | /* |
27 | * Authors: |
28 | * Jerome Glisse <glisse@freedesktop.org> |
29 | * Dave Airlie |
30 | */ |
31 | #include <linux/seq_file.h> |
32 | #include <linux/atomic.h> |
33 | #include <linux/wait.h> |
34 | #include <linux/kref.h> |
35 | #include <linux/slab.h> |
36 | #include <linux/firmware.h> |
37 | #include <drm/drmP.h> |
38 | #include "radeon_reg.h" |
39 | #include "radeon.h" |
40 | #include "radeon_trace.h" |
41 | |
42 | /* |
43 | * Fences |
44 | * Fences mark an event in the GPUs pipeline and are used |
45 | * for GPU/CPU synchronization. When the fence is written, |
46 | * it is expected that all buffers associated with that fence |
47 | * are no longer in use by the associated ring on the GPU and |
48 | * that the the relevant GPU caches have been flushed. Whether |
49 | * we use a scratch register or memory location depends on the asic |
50 | * and whether writeback is enabled. |
51 | */ |
52 | |
53 | /** |
54 | * radeon_fence_write - write a fence value |
55 | * |
56 | * @rdev: radeon_device pointer |
57 | * @seq: sequence number to write |
58 | * @ring: ring index the fence is associated with |
59 | * |
60 | * Writes a fence value to memory or a scratch register (all asics). |
61 | */ |
62 | static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) |
63 | { |
64 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
65 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
66 | if (drv->cpu_addr) { |
67 | *drv->cpu_addr = cpu_to_le32(seq); |
68 | } |
69 | } else { |
70 | WREG32(drv->scratch_reg, seq); |
71 | } |
72 | } |
73 | |
74 | /** |
75 | * radeon_fence_read - read a fence value |
76 | * |
77 | * @rdev: radeon_device pointer |
78 | * @ring: ring index the fence is associated with |
79 | * |
80 | * Reads a fence value from memory or a scratch register (all asics). |
81 | * Returns the value of the fence read from memory or register. |
82 | */ |
83 | static u32 radeon_fence_read(struct radeon_device *rdev, int ring) |
84 | { |
85 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
86 | u32 seq = 0; |
87 | |
88 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
89 | if (drv->cpu_addr) { |
90 | seq = le32_to_cpu(*drv->cpu_addr); |
91 | } else { |
92 | seq = lower_32_bits(atomic64_read(&drv->last_seq)); |
93 | } |
94 | } else { |
95 | seq = RREG32(drv->scratch_reg); |
96 | } |
97 | return seq; |
98 | } |
99 | |
100 | /** |
101 | * radeon_fence_emit - emit a fence on the requested ring |
102 | * |
103 | * @rdev: radeon_device pointer |
104 | * @fence: radeon fence object |
105 | * @ring: ring index the fence is associated with |
106 | * |
107 | * Emits a fence command on the requested ring (all asics). |
108 | * Returns 0 on success, -ENOMEM on failure. |
109 | */ |
110 | int radeon_fence_emit(struct radeon_device *rdev, |
111 | struct radeon_fence **fence, |
112 | int ring) |
113 | { |
114 | /* we are protected by the ring emission mutex */ |
115 | *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
116 | if ((*fence) == NULL) { |
117 | return -ENOMEM; |
118 | } |
119 | kref_init(&((*fence)->kref)); |
120 | (*fence)->rdev = rdev; |
121 | (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
122 | (*fence)->ring = ring; |
123 | radeon_fence_ring_emit(rdev, ring, *fence); |
124 | trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); |
125 | return 0; |
126 | } |
127 | |
128 | /** |
129 | * radeon_fence_process - process a fence |
130 | * |
131 | * @rdev: radeon_device pointer |
132 | * @ring: ring index the fence is associated with |
133 | * |
134 | * Checks the current fence value and wakes the fence queue |
135 | * if the sequence number has increased (all asics). |
136 | */ |
137 | static void radeon_fence_process_locked(struct radeon_device *rdev, int ring) |
138 | { |
139 | uint64_t seq, last_seq, last_emitted; |
140 | unsigned count_loop = 0; |
141 | bool wake = false; |
142 | |
143 | BUG_ON(!spin_is_locked(&rdev->fence_lock)); |
144 | |
145 | /* Note there is a scenario here for an infinite loop but it's |
146 | * very unlikely to happen. For it to happen, the current polling |
147 | * process need to be interrupted by another process and another |
148 | * process needs to update the last_seq btw the atomic read and |
149 | * xchg of the current process. |
150 | * |
151 | * More over for this to go in infinite loop there need to be |
152 | * continuously new fence signaled ie radeon_fence_read needs |
153 | * to return a different value each time for both the currently |
154 | * polling process and the other process that xchg the last_seq |
155 | * btw atomic read and xchg of the current process. And the |
156 | * value the other process set as last seq must be higher than |
157 | * the seq value we just read. Which means that current process |
158 | * need to be interrupted after radeon_fence_read and before |
159 | * atomic xchg. |
160 | * |
161 | * To be even more safe we count the number of time we loop and |
162 | * we bail after 10 loop just accepting the fact that we might |
163 | * have temporarly set the last_seq not to the true real last |
164 | * seq but to an older one. |
165 | */ |
166 | last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); |
167 | do { |
168 | last_emitted = rdev->fence_drv[ring].sync_seq[ring]; |
169 | seq = radeon_fence_read(rdev, ring); |
170 | seq |= last_seq & 0xffffffff00000000LL; |
171 | if (seq < last_seq) { |
172 | seq &= 0xffffffff; |
173 | seq |= last_emitted & 0xffffffff00000000LL; |
174 | } |
175 | |
176 | if (seq <= last_seq || seq > last_emitted) { |
177 | break; |
178 | } |
179 | /* If we loop over we don't want to return without |
180 | * checking if a fence is signaled as it means that the |
181 | * seq we just read is different from the previous on. |
182 | */ |
183 | wake = true; |
184 | last_seq = seq; |
185 | if ((count_loop++) > 10) { |
186 | /* We looped over too many time leave with the |
187 | * fact that we might have set an older fence |
188 | * seq then the current real last seq as signaled |
189 | * by the hw. |
190 | */ |
191 | break; |
192 | } |
193 | } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); |
194 | |
195 | if (wake) |
196 | #ifdef __NetBSD__ |
197 | DRM_SPIN_WAKEUP_ALL(&rdev->fence_queue, &rdev->fence_lock); |
198 | #else |
199 | wake_up_all(&rdev->fence_queue); |
200 | #endif |
201 | } |
202 | |
203 | void radeon_fence_process(struct radeon_device *rdev, int ring) |
204 | { |
205 | |
206 | spin_lock(&rdev->fence_lock); |
207 | radeon_fence_process_locked(rdev, ring); |
208 | spin_unlock(&rdev->fence_lock); |
209 | } |
210 | |
211 | /** |
212 | * radeon_fence_destroy - destroy a fence |
213 | * |
214 | * @kref: fence kref |
215 | * |
216 | * Frees the fence object (all asics). |
217 | */ |
218 | static void radeon_fence_destroy(struct kref *kref) |
219 | { |
220 | struct radeon_fence *fence; |
221 | |
222 | fence = container_of(kref, struct radeon_fence, kref); |
223 | kfree(fence); |
224 | } |
225 | |
226 | /** |
227 | * radeon_fence_seq_signaled - check if a fence sequence number has signaled |
228 | * |
229 | * @rdev: radeon device pointer |
230 | * @seq: sequence number |
231 | * @ring: ring index the fence is associated with |
232 | * |
233 | * Check if the last signaled fence sequnce number is >= the requested |
234 | * sequence number (all asics). |
235 | * Returns true if the fence has signaled (current fence value |
236 | * is >= requested value) or false if it has not (current fence |
237 | * value is < the requested value. Helper function for |
238 | * radeon_fence_signaled(). |
239 | */ |
240 | static bool radeon_fence_seq_signaled(struct radeon_device *rdev, |
241 | u64 seq, unsigned ring) |
242 | { |
243 | BUG_ON(!spin_is_locked(&rdev->fence_lock)); |
244 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
245 | return true; |
246 | } |
247 | /* poll new last sequence at least once */ |
248 | radeon_fence_process_locked(rdev, ring); |
249 | if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
250 | return true; |
251 | } |
252 | return false; |
253 | } |
254 | |
255 | /** |
256 | * radeon_fence_signaled - check if a fence has signaled |
257 | * |
258 | * @fence: radeon fence object |
259 | * |
260 | * Check if the requested fence has signaled (all asics). |
261 | * Returns true if the fence has signaled or false if it has not. |
262 | */ |
263 | bool radeon_fence_signaled(struct radeon_fence *fence) |
264 | { |
265 | if (!fence) { |
266 | return true; |
267 | } |
268 | spin_lock(&fence->rdev->fence_lock); |
269 | if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { |
270 | spin_unlock(&fence->rdev->fence_lock); |
271 | return true; |
272 | } |
273 | if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { |
274 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
275 | spin_unlock(&fence->rdev->fence_lock); |
276 | return true; |
277 | } |
278 | spin_unlock(&fence->rdev->fence_lock); |
279 | return false; |
280 | } |
281 | |
282 | /** |
283 | * radeon_fence_any_seq_signaled - check if any sequence number is signaled |
284 | * |
285 | * @rdev: radeon device pointer |
286 | * @seq: sequence numbers |
287 | * |
288 | * Check if the last signaled fence sequnce number is >= the requested |
289 | * sequence number (all asics). |
290 | * Returns true if any has signaled (current value is >= requested value) |
291 | * or false if it has not. Helper function for radeon_fence_wait_seq. |
292 | */ |
293 | static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) |
294 | { |
295 | unsigned i; |
296 | |
297 | BUG_ON(!spin_is_locked(&rdev->fence_lock)); |
298 | |
299 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
300 | if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) |
301 | return true; |
302 | } |
303 | return false; |
304 | } |
305 | |
306 | /** |
307 | * radeon_fence_wait_seq - wait for a specific sequence numbers |
308 | * |
309 | * @rdev: radeon device pointer |
310 | * @target_seq: sequence number(s) we want to wait for |
311 | * @intr: use interruptable sleep |
312 | * |
313 | * Wait for the requested sequence number(s) to be written by any ring |
314 | * (all asics). Sequnce number array is indexed by ring id. |
315 | * @intr selects whether to use interruptable (true) or non-interruptable |
316 | * (false) sleep when waiting for the sequence number. Helper function |
317 | * for radeon_fence_wait_*(). |
318 | * Returns 0 if the sequence number has passed, error for all other cases. |
319 | * -EDEADLK is returned when a GPU lockup has been detected. |
320 | */ |
321 | static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, |
322 | bool intr) |
323 | { |
324 | uint64_t last_seq[RADEON_NUM_RINGS]; |
325 | bool signaled; |
326 | int i, r = 0; |
327 | |
328 | spin_lock(&rdev->fence_lock); |
329 | while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { |
330 | |
331 | /* Save current sequence values, used to check for GPU lockups */ |
332 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
333 | if (!target_seq[i]) |
334 | continue; |
335 | |
336 | last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq); |
337 | trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); |
338 | radeon_irq_kms_sw_irq_get(rdev, i); |
339 | } |
340 | |
341 | #ifdef __NetBSD__ |
342 | if (intr) |
343 | DRM_SPIN_TIMED_WAIT_UNTIL(r, &rdev->fence_queue, |
344 | &rdev->fence_lock, RADEON_FENCE_JIFFIES_TIMEOUT, |
345 | ((signaled = radeon_fence_any_seq_signaled(rdev, |
346 | target_seq)) |
347 | || rdev->needs_reset)); |
348 | else |
349 | DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(r, &rdev->fence_queue, |
350 | &rdev->fence_lock, RADEON_FENCE_JIFFIES_TIMEOUT, |
351 | ((signaled = radeon_fence_any_seq_signaled(rdev, |
352 | target_seq)) |
353 | || rdev->needs_reset)); |
354 | #else |
355 | if (intr) { |
356 | r = wait_event_interruptible_timeout(rdev->fence_queue, ( |
357 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) |
358 | || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); |
359 | } else { |
360 | r = wait_event_timeout(rdev->fence_queue, ( |
361 | (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) |
362 | || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); |
363 | } |
364 | #endif |
365 | |
366 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
367 | if (!target_seq[i]) |
368 | continue; |
369 | |
370 | radeon_irq_kms_sw_irq_put(rdev, i); |
371 | trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); |
372 | } |
373 | |
374 | if (unlikely(r < 0)) |
375 | break; |
376 | |
377 | if (unlikely(!signaled)) { |
378 | if (rdev->needs_reset) { |
379 | r = -EDEADLK; |
380 | break; |
381 | } |
382 | |
383 | /* we were interrupted for some reason and fence |
384 | * isn't signaled yet, resume waiting */ |
385 | if (r) |
386 | continue; |
387 | |
388 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
389 | if (!target_seq[i]) |
390 | continue; |
391 | |
392 | if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq)) |
393 | break; |
394 | } |
395 | |
396 | if (i != RADEON_NUM_RINGS) |
397 | continue; |
398 | |
399 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
400 | if (!target_seq[i]) |
401 | continue; |
402 | |
403 | if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i])) |
404 | break; |
405 | } |
406 | |
407 | if (i < RADEON_NUM_RINGS) { |
408 | /* good news we believe it's a lockup */ |
409 | dev_warn(rdev->dev, "GPU lockup (waiting for " |
410 | "0x%016" PRIx64" last fence id 0x%016" PRIx64" on" |
411 | " ring %d)\n" , |
412 | target_seq[i], last_seq[i], i); |
413 | |
414 | /* remember that we need an reset */ |
415 | rdev->needs_reset = true; |
416 | #ifdef __NetBSD__ |
417 | DRM_SPIN_WAKEUP_ALL(&rdev->fence_queue, |
418 | &rdev->fence_lock); |
419 | #else |
420 | wake_up_all(&rdev->fence_queue); |
421 | #endif |
422 | r = -EDEADLK; |
423 | break; |
424 | } |
425 | } |
426 | } |
427 | spin_unlock(&rdev->fence_lock); |
428 | /* |
429 | * The timed wait returns 0 on timeout or the positive number |
430 | * of ticks left (minimum 1) if the condition passed. We |
431 | * return zero on success. |
432 | */ |
433 | return (r < 0? r : 0); |
434 | } |
435 | |
436 | /** |
437 | * radeon_fence_wait - wait for a fence to signal |
438 | * |
439 | * @fence: radeon fence object |
440 | * @intr: use interruptable sleep |
441 | * |
442 | * Wait for the requested fence to signal (all asics). |
443 | * @intr selects whether to use interruptable (true) or non-interruptable |
444 | * (false) sleep when waiting for the fence. |
445 | * Returns 0 if the fence has passed, error for all other cases. |
446 | */ |
447 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
448 | { |
449 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
450 | int r; |
451 | |
452 | if (fence == NULL) { |
453 | WARN(1, "Querying an invalid fence : %p !\n" , fence); |
454 | return -EINVAL; |
455 | } |
456 | |
457 | seq[fence->ring] = fence->seq; |
458 | if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) |
459 | return 0; |
460 | |
461 | r = radeon_fence_wait_seq(fence->rdev, seq, intr); |
462 | if (r) |
463 | return r; |
464 | |
465 | fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
466 | return 0; |
467 | } |
468 | |
469 | /** |
470 | * radeon_fence_wait_any - wait for a fence to signal on any ring |
471 | * |
472 | * @rdev: radeon device pointer |
473 | * @fences: radeon fence object(s) |
474 | * @intr: use interruptable sleep |
475 | * |
476 | * Wait for any requested fence to signal (all asics). Fence |
477 | * array is indexed by ring id. @intr selects whether to use |
478 | * interruptable (true) or non-interruptable (false) sleep when |
479 | * waiting for the fences. Used by the suballocator. |
480 | * Returns 0 if any fence has passed, error for all other cases. |
481 | */ |
482 | int radeon_fence_wait_any(struct radeon_device *rdev, |
483 | struct radeon_fence **fences, |
484 | bool intr) |
485 | { |
486 | uint64_t seq[RADEON_NUM_RINGS]; |
487 | unsigned i, num_rings = 0; |
488 | int r; |
489 | |
490 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
491 | seq[i] = 0; |
492 | |
493 | if (!fences[i]) { |
494 | continue; |
495 | } |
496 | |
497 | seq[i] = fences[i]->seq; |
498 | ++num_rings; |
499 | |
500 | /* test if something was allready signaled */ |
501 | if (seq[i] == RADEON_FENCE_SIGNALED_SEQ) |
502 | return 0; |
503 | } |
504 | |
505 | /* nothing to wait for ? */ |
506 | if (num_rings == 0) |
507 | return -ENOENT; |
508 | |
509 | r = radeon_fence_wait_seq(rdev, seq, intr); |
510 | if (r) { |
511 | return r; |
512 | } |
513 | return 0; |
514 | } |
515 | |
516 | /** |
517 | * radeon_fence_wait_next - wait for the next fence to signal |
518 | * |
519 | * @rdev: radeon device pointer |
520 | * @ring: ring index the fence is associated with |
521 | * |
522 | * Wait for the next fence on the requested ring to signal (all asics). |
523 | * Returns 0 if the next fence has passed, error for all other cases. |
524 | * Caller must hold ring lock. |
525 | */ |
526 | int radeon_fence_wait_next(struct radeon_device *rdev, int ring) |
527 | { |
528 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
529 | |
530 | seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
531 | if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { |
532 | /* nothing to wait for, last_seq is |
533 | already the last emited fence */ |
534 | return -ENOENT; |
535 | } |
536 | return radeon_fence_wait_seq(rdev, seq, false); |
537 | } |
538 | |
539 | /** |
540 | * radeon_fence_wait_empty - wait for all fences to signal |
541 | * |
542 | * @rdev: radeon device pointer |
543 | * @ring: ring index the fence is associated with |
544 | * |
545 | * Wait for all fences on the requested ring to signal (all asics). |
546 | * Returns 0 if the fences have passed, error for all other cases. |
547 | * Caller must hold ring lock. |
548 | */ |
549 | int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) |
550 | { |
551 | uint64_t seq[RADEON_NUM_RINGS] = {}; |
552 | int r; |
553 | |
554 | seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; |
555 | if (!seq[ring]) |
556 | return 0; |
557 | |
558 | r = radeon_fence_wait_seq(rdev, seq, false); |
559 | if (r) { |
560 | if (r == -EDEADLK) |
561 | return -EDEADLK; |
562 | |
563 | dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n" , |
564 | ring, r); |
565 | } |
566 | return 0; |
567 | } |
568 | |
569 | /** |
570 | * radeon_fence_ref - take a ref on a fence |
571 | * |
572 | * @fence: radeon fence object |
573 | * |
574 | * Take a reference on a fence (all asics). |
575 | * Returns the fence. |
576 | */ |
577 | struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
578 | { |
579 | kref_get(&fence->kref); |
580 | return fence; |
581 | } |
582 | |
583 | /** |
584 | * radeon_fence_unref - remove a ref on a fence |
585 | * |
586 | * @fence: radeon fence object |
587 | * |
588 | * Remove a reference on a fence (all asics). |
589 | */ |
590 | void radeon_fence_unref(struct radeon_fence **fence) |
591 | { |
592 | struct radeon_fence *tmp = *fence; |
593 | |
594 | *fence = NULL; |
595 | if (tmp) { |
596 | kref_put(&tmp->kref, radeon_fence_destroy); |
597 | } |
598 | } |
599 | |
600 | /** |
601 | * radeon_fence_count_emitted - get the count of emitted fences |
602 | * |
603 | * @rdev: radeon device pointer |
604 | * @ring: ring index the fence is associated with |
605 | * |
606 | * Get the number of fences emitted on the requested ring (all asics). |
607 | * Returns the number of emitted fences on the ring. Used by the |
608 | * dynpm code to ring track activity. |
609 | */ |
610 | unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring) |
611 | { |
612 | uint64_t emitted; |
613 | |
614 | /* We are not protected by ring lock when reading the last sequence |
615 | * but it's ok to report slightly wrong fence count here. |
616 | */ |
617 | radeon_fence_process(rdev, ring); |
618 | emitted = rdev->fence_drv[ring].sync_seq[ring] |
619 | - atomic64_read(&rdev->fence_drv[ring].last_seq); |
620 | /* to avoid 32bits warp around */ |
621 | if (emitted > 0x10000000) { |
622 | emitted = 0x10000000; |
623 | } |
624 | return (unsigned)emitted; |
625 | } |
626 | |
627 | /** |
628 | * radeon_fence_need_sync - do we need a semaphore |
629 | * |
630 | * @fence: radeon fence object |
631 | * @dst_ring: which ring to check against |
632 | * |
633 | * Check if the fence needs to be synced against another ring |
634 | * (all asics). If so, we need to emit a semaphore. |
635 | * Returns true if we need to sync with another ring, false if |
636 | * not. |
637 | */ |
638 | bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring) |
639 | { |
640 | struct radeon_fence_driver *fdrv; |
641 | |
642 | if (!fence) { |
643 | return false; |
644 | } |
645 | |
646 | if (fence->ring == dst_ring) { |
647 | return false; |
648 | } |
649 | |
650 | /* we are protected by the ring mutex */ |
651 | fdrv = &fence->rdev->fence_drv[dst_ring]; |
652 | if (fence->seq <= fdrv->sync_seq[fence->ring]) { |
653 | return false; |
654 | } |
655 | |
656 | return true; |
657 | } |
658 | |
659 | /** |
660 | * radeon_fence_note_sync - record the sync point |
661 | * |
662 | * @fence: radeon fence object |
663 | * @dst_ring: which ring to check against |
664 | * |
665 | * Note the sequence number at which point the fence will |
666 | * be synced with the requested ring (all asics). |
667 | */ |
668 | void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring) |
669 | { |
670 | struct radeon_fence_driver *dst, *src; |
671 | unsigned i; |
672 | |
673 | if (!fence) { |
674 | return; |
675 | } |
676 | |
677 | if (fence->ring == dst_ring) { |
678 | return; |
679 | } |
680 | |
681 | /* we are protected by the ring mutex */ |
682 | src = &fence->rdev->fence_drv[fence->ring]; |
683 | dst = &fence->rdev->fence_drv[dst_ring]; |
684 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
685 | if (i == dst_ring) { |
686 | continue; |
687 | } |
688 | dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]); |
689 | } |
690 | } |
691 | |
692 | /** |
693 | * radeon_fence_driver_start_ring - make the fence driver |
694 | * ready for use on the requested ring. |
695 | * |
696 | * @rdev: radeon device pointer |
697 | * @ring: ring index to start the fence driver on |
698 | * |
699 | * Make the fence driver ready for processing (all asics). |
700 | * Not all asics have all rings, so each asic will only |
701 | * start the fence driver on the rings it has. |
702 | * Returns 0 for success, errors for failure. |
703 | */ |
704 | int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) |
705 | { |
706 | uint64_t index; |
707 | int r; |
708 | |
709 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
710 | if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { |
711 | rdev->fence_drv[ring].scratch_reg = 0; |
712 | if (ring != R600_RING_TYPE_UVD_INDEX) { |
713 | index = R600_WB_EVENT_OFFSET + ring * 4; |
714 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
715 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + |
716 | index; |
717 | |
718 | } else { |
719 | /* put fence directly behind firmware */ |
720 | #ifdef __NetBSD__ /* XXX ALIGN means something else. */ |
721 | index = round_up(rdev->uvd_fw->size, 8); |
722 | #else |
723 | index = ALIGN(rdev->uvd_fw->size, 8); |
724 | #endif |
725 | rdev->fence_drv[ring].cpu_addr = (uint32_t *)((uint8_t *)rdev->uvd.cpu_addr + index); |
726 | rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; |
727 | } |
728 | |
729 | } else { |
730 | r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg); |
731 | if (r) { |
732 | dev_err(rdev->dev, "fence failed to get scratch register\n" ); |
733 | return r; |
734 | } |
735 | index = RADEON_WB_SCRATCH_OFFSET + |
736 | rdev->fence_drv[ring].scratch_reg - |
737 | rdev->scratch.reg_base; |
738 | rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; |
739 | rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; |
740 | } |
741 | radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring); |
742 | rdev->fence_drv[ring].initialized = true; |
743 | dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016" PRIx64" and cpu addr 0x%p\n" , |
744 | ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr); |
745 | return 0; |
746 | } |
747 | |
748 | /** |
749 | * radeon_fence_driver_init_ring - init the fence driver |
750 | * for the requested ring. |
751 | * |
752 | * @rdev: radeon device pointer |
753 | * @ring: ring index to start the fence driver on |
754 | * |
755 | * Init the fence driver for the requested ring (all asics). |
756 | * Helper function for radeon_fence_driver_init(). |
757 | */ |
758 | static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) |
759 | { |
760 | int i; |
761 | |
762 | rdev->fence_drv[ring].scratch_reg = -1; |
763 | rdev->fence_drv[ring].cpu_addr = NULL; |
764 | rdev->fence_drv[ring].gpu_addr = 0; |
765 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
766 | rdev->fence_drv[ring].sync_seq[i] = 0; |
767 | atomic64_set(&rdev->fence_drv[ring].last_seq, 0); |
768 | rdev->fence_drv[ring].initialized = false; |
769 | } |
770 | |
771 | /** |
772 | * radeon_fence_driver_init - init the fence driver |
773 | * for all possible rings. |
774 | * |
775 | * @rdev: radeon device pointer |
776 | * |
777 | * Init the fence driver for all possible rings (all asics). |
778 | * Not all asics have all rings, so each asic will only |
779 | * start the fence driver on the rings it has using |
780 | * radeon_fence_driver_start_ring(). |
781 | * Returns 0 for success. |
782 | */ |
783 | int radeon_fence_driver_init(struct radeon_device *rdev) |
784 | { |
785 | int ring; |
786 | |
787 | #ifdef __NetBSD__ |
788 | spin_lock_init(&rdev->fence_lock); |
789 | DRM_INIT_WAITQUEUE(&rdev->fence_queue, "radfence" ); |
790 | #else |
791 | init_waitqueue_head(&rdev->fence_queue); |
792 | #endif |
793 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
794 | radeon_fence_driver_init_ring(rdev, ring); |
795 | } |
796 | if (radeon_debugfs_fence_init(rdev)) { |
797 | dev_err(rdev->dev, "fence debugfs file creation failed\n" ); |
798 | } |
799 | return 0; |
800 | } |
801 | |
802 | /** |
803 | * radeon_fence_driver_fini - tear down the fence driver |
804 | * for all possible rings. |
805 | * |
806 | * @rdev: radeon device pointer |
807 | * |
808 | * Tear down the fence driver for all possible rings (all asics). |
809 | */ |
810 | void radeon_fence_driver_fini(struct radeon_device *rdev) |
811 | { |
812 | int ring, r; |
813 | |
814 | mutex_lock(&rdev->ring_lock); |
815 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
816 | if (!rdev->fence_drv[ring].initialized) |
817 | continue; |
818 | r = radeon_fence_wait_empty(rdev, ring); |
819 | if (r) { |
820 | /* no need to trigger GPU reset as we are unloading */ |
821 | radeon_fence_driver_force_completion(rdev); |
822 | } |
823 | #ifdef __NetBSD__ |
824 | spin_lock(&rdev->fence_lock); |
825 | DRM_SPIN_WAKEUP_ALL(&rdev->fence_queue, &rdev->fence_lock); |
826 | spin_unlock(&rdev->fence_lock); |
827 | #else |
828 | wake_up_all(&rdev->fence_queue); |
829 | #endif |
830 | radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
831 | rdev->fence_drv[ring].initialized = false; |
832 | } |
833 | mutex_unlock(&rdev->ring_lock); |
834 | |
835 | #ifdef __NetBSD__ |
836 | DRM_DESTROY_WAITQUEUE(&rdev->fence_queue); |
837 | spin_lock_destroy(&rdev->fence_lock); |
838 | #endif |
839 | } |
840 | |
841 | /** |
842 | * radeon_fence_driver_force_completion - force all fence waiter to complete |
843 | * |
844 | * @rdev: radeon device pointer |
845 | * |
846 | * In case of GPU reset failure make sure no process keep waiting on fence |
847 | * that will never complete. |
848 | */ |
849 | void radeon_fence_driver_force_completion(struct radeon_device *rdev) |
850 | { |
851 | int ring; |
852 | |
853 | for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
854 | if (!rdev->fence_drv[ring].initialized) |
855 | continue; |
856 | radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); |
857 | } |
858 | } |
859 | |
860 | |
861 | /* |
862 | * Fence debugfs |
863 | */ |
864 | #if defined(CONFIG_DEBUG_FS) |
865 | static int radeon_debugfs_fence_info(struct seq_file *m, void *data) |
866 | { |
867 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
868 | struct drm_device *dev = node->minor->dev; |
869 | struct radeon_device *rdev = dev->dev_private; |
870 | int i, j; |
871 | |
872 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
873 | if (!rdev->fence_drv[i].initialized) |
874 | continue; |
875 | |
876 | radeon_fence_process(rdev, i); |
877 | |
878 | seq_printf(m, "--- ring %d ---\n" , i); |
879 | seq_printf(m, "Last signaled fence 0x%016llx\n" , |
880 | (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq)); |
881 | seq_printf(m, "Last emitted 0x%016" PRIx64"\n" , |
882 | rdev->fence_drv[i].sync_seq[i]); |
883 | |
884 | for (j = 0; j < RADEON_NUM_RINGS; ++j) { |
885 | if (i != j && rdev->fence_drv[j].initialized) |
886 | seq_printf(m, "Last sync to ring %d 0x%016" PRIx64"\n" , |
887 | j, rdev->fence_drv[i].sync_seq[j]); |
888 | } |
889 | } |
890 | return 0; |
891 | } |
892 | |
893 | static struct drm_info_list radeon_debugfs_fence_list[] = { |
894 | {"radeon_fence_info" , &radeon_debugfs_fence_info, 0, NULL}, |
895 | }; |
896 | #endif |
897 | |
898 | int radeon_debugfs_fence_init(struct radeon_device *rdev) |
899 | { |
900 | #if defined(CONFIG_DEBUG_FS) |
901 | return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1); |
902 | #else |
903 | return 0; |
904 | #endif |
905 | } |
906 | |