1 | /* |
2 | * Copyright 2009 VMware, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: Michel Dänzer |
23 | */ |
24 | #include <drm/drmP.h> |
25 | #include <drm/radeon_drm.h> |
26 | #include "radeon_reg.h" |
27 | #include "radeon.h" |
28 | |
29 | #define RADEON_TEST_COPY_BLIT 1 |
30 | #define RADEON_TEST_COPY_DMA 0 |
31 | |
32 | |
33 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ |
34 | static void radeon_do_test_moves(struct radeon_device *rdev, int flag) |
35 | { |
36 | struct radeon_bo *vram_obj = NULL; |
37 | struct radeon_bo **gtt_obj = NULL; |
38 | uint64_t gtt_addr, vram_addr; |
39 | unsigned n, size; |
40 | int i, r, ring; |
41 | |
42 | switch (flag) { |
43 | case RADEON_TEST_COPY_DMA: |
44 | ring = radeon_copy_dma_ring_index(rdev); |
45 | break; |
46 | case RADEON_TEST_COPY_BLIT: |
47 | ring = radeon_copy_blit_ring_index(rdev); |
48 | break; |
49 | default: |
50 | DRM_ERROR("Unknown copy method\n" ); |
51 | return; |
52 | } |
53 | |
54 | size = 1024 * 1024; |
55 | |
56 | /* Number of tests = |
57 | * (Total GTT - IB pool - writeback page - ring buffers) / test size |
58 | */ |
59 | n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024; |
60 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
61 | n -= rdev->ring[i].ring_size; |
62 | if (rdev->wb.wb_obj) |
63 | n -= RADEON_GPU_PAGE_SIZE; |
64 | if (rdev->ih.ring_obj) |
65 | n -= rdev->ih.ring_size; |
66 | n /= size; |
67 | |
68 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); |
69 | if (!gtt_obj) { |
70 | DRM_ERROR("Failed to allocate %d pointers\n" , n); |
71 | r = 1; |
72 | goto out_cleanup; |
73 | } |
74 | |
75 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
76 | NULL, &vram_obj); |
77 | if (r) { |
78 | DRM_ERROR("Failed to create VRAM object\n" ); |
79 | goto out_cleanup; |
80 | } |
81 | r = radeon_bo_reserve(vram_obj, false); |
82 | if (unlikely(r != 0)) |
83 | goto out_unref; |
84 | r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); |
85 | if (r) { |
86 | DRM_ERROR("Failed to pin VRAM object\n" ); |
87 | goto out_unres; |
88 | } |
89 | for (i = 0; i < n; i++) { |
90 | void *gtt_map, *vram_map; |
91 | void **gtt_start, **gtt_end; |
92 | void **vram_start, **vram_end; |
93 | struct radeon_fence *fence = NULL; |
94 | |
95 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, |
96 | RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i); |
97 | if (r) { |
98 | DRM_ERROR("Failed to create GTT object %d\n" , i); |
99 | goto out_lclean; |
100 | } |
101 | |
102 | r = radeon_bo_reserve(gtt_obj[i], false); |
103 | if (unlikely(r != 0)) |
104 | goto out_lclean_unref; |
105 | r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); |
106 | if (r) { |
107 | DRM_ERROR("Failed to pin GTT object %d\n" , i); |
108 | goto out_lclean_unres; |
109 | } |
110 | |
111 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
112 | if (r) { |
113 | DRM_ERROR("Failed to map GTT object %d\n" , i); |
114 | goto out_lclean_unpin; |
115 | } |
116 | |
117 | for (gtt_start = gtt_map, gtt_end = gtt_start + size; |
118 | gtt_start < gtt_end; |
119 | gtt_start++) |
120 | *gtt_start = gtt_start; |
121 | |
122 | radeon_bo_kunmap(gtt_obj[i]); |
123 | |
124 | if (ring == R600_RING_TYPE_DMA_INDEX) |
125 | r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
126 | else |
127 | r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
128 | if (r) { |
129 | DRM_ERROR("Failed GTT->VRAM copy %d\n" , i); |
130 | goto out_lclean_unpin; |
131 | } |
132 | |
133 | r = radeon_fence_wait(fence, false); |
134 | if (r) { |
135 | DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n" , i); |
136 | goto out_lclean_unpin; |
137 | } |
138 | |
139 | radeon_fence_unref(&fence); |
140 | |
141 | r = radeon_bo_kmap(vram_obj, &vram_map); |
142 | if (r) { |
143 | DRM_ERROR("Failed to map VRAM object after copy %d\n" , i); |
144 | goto out_lclean_unpin; |
145 | } |
146 | |
147 | for (gtt_start = gtt_map, gtt_end = gtt_start + size, |
148 | vram_start = vram_map, vram_end = vram_start + size; |
149 | vram_start < vram_end; |
150 | gtt_start++, vram_start++) { |
151 | if (*vram_start != gtt_start) { |
152 | DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " |
153 | "expected 0x%p (GTT/VRAM offset " |
154 | "0x%16llx/0x%16llx)\n" , |
155 | i, *vram_start, gtt_start, |
156 | (unsigned long long) |
157 | (gtt_addr - rdev->mc.gtt_start + |
158 | (u8*)gtt_start - (u8*)gtt_map), |
159 | (unsigned long long) |
160 | (vram_addr - rdev->mc.vram_start + |
161 | (u8*)gtt_start - (u8*)gtt_map)); |
162 | radeon_bo_kunmap(vram_obj); |
163 | goto out_lclean_unpin; |
164 | } |
165 | *vram_start = vram_start; |
166 | } |
167 | |
168 | radeon_bo_kunmap(vram_obj); |
169 | |
170 | if (ring == R600_RING_TYPE_DMA_INDEX) |
171 | r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
172 | else |
173 | r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
174 | if (r) { |
175 | DRM_ERROR("Failed VRAM->GTT copy %d\n" , i); |
176 | goto out_lclean_unpin; |
177 | } |
178 | |
179 | r = radeon_fence_wait(fence, false); |
180 | if (r) { |
181 | DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n" , i); |
182 | goto out_lclean_unpin; |
183 | } |
184 | |
185 | radeon_fence_unref(&fence); |
186 | |
187 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
188 | if (r) { |
189 | DRM_ERROR("Failed to map GTT object after copy %d\n" , i); |
190 | goto out_lclean_unpin; |
191 | } |
192 | |
193 | for (gtt_start = gtt_map, gtt_end = gtt_start + size, |
194 | vram_start = vram_map, vram_end = vram_start + size; |
195 | gtt_start < gtt_end; |
196 | gtt_start++, vram_start++) { |
197 | if (*gtt_start != vram_start) { |
198 | DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " |
199 | "expected 0x%p (VRAM/GTT offset " |
200 | "0x%16llx/0x%16llx)\n" , |
201 | i, *gtt_start, vram_start, |
202 | (unsigned long long) |
203 | (vram_addr - rdev->mc.vram_start + |
204 | (u8*)vram_start - (u8*)vram_map), |
205 | (unsigned long long) |
206 | (gtt_addr - rdev->mc.gtt_start + |
207 | (u8*)vram_start - (u8*)vram_map)); |
208 | radeon_bo_kunmap(gtt_obj[i]); |
209 | goto out_lclean_unpin; |
210 | } |
211 | } |
212 | |
213 | radeon_bo_kunmap(gtt_obj[i]); |
214 | |
215 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%" PRIx64"\n" , |
216 | gtt_addr - rdev->mc.gtt_start); |
217 | continue; |
218 | |
219 | out_lclean_unpin: |
220 | radeon_bo_unpin(gtt_obj[i]); |
221 | out_lclean_unres: |
222 | radeon_bo_unreserve(gtt_obj[i]); |
223 | out_lclean_unref: |
224 | radeon_bo_unref(>t_obj[i]); |
225 | out_lclean: |
226 | for (--i; i >= 0; --i) { |
227 | radeon_bo_unpin(gtt_obj[i]); |
228 | radeon_bo_unreserve(gtt_obj[i]); |
229 | radeon_bo_unref(>t_obj[i]); |
230 | } |
231 | if (fence) |
232 | radeon_fence_unref(&fence); |
233 | break; |
234 | } |
235 | |
236 | radeon_bo_unpin(vram_obj); |
237 | out_unres: |
238 | radeon_bo_unreserve(vram_obj); |
239 | out_unref: |
240 | radeon_bo_unref(&vram_obj); |
241 | out_cleanup: |
242 | kfree(gtt_obj); |
243 | if (r) { |
244 | printk(KERN_WARNING "Error while testing BO move.\n" ); |
245 | } |
246 | } |
247 | |
248 | void radeon_test_moves(struct radeon_device *rdev) |
249 | { |
250 | if (rdev->asic->copy.dma) |
251 | radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA); |
252 | if (rdev->asic->copy.blit) |
253 | radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT); |
254 | } |
255 | |
256 | static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, |
257 | struct radeon_ring *ring, |
258 | struct radeon_fence **fence) |
259 | { |
260 | uint32_t handle = ring->idx ^ 0xdeafbeef; |
261 | int r; |
262 | |
263 | if (ring->idx == R600_RING_TYPE_UVD_INDEX) { |
264 | r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL); |
265 | if (r) { |
266 | DRM_ERROR("Failed to get dummy create msg\n" ); |
267 | return r; |
268 | } |
269 | |
270 | r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence); |
271 | if (r) { |
272 | DRM_ERROR("Failed to get dummy destroy msg\n" ); |
273 | return r; |
274 | } |
275 | |
276 | } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX || |
277 | ring->idx == TN_RING_TYPE_VCE2_INDEX) { |
278 | r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL); |
279 | if (r) { |
280 | DRM_ERROR("Failed to get dummy create msg\n" ); |
281 | return r; |
282 | } |
283 | |
284 | r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence); |
285 | if (r) { |
286 | DRM_ERROR("Failed to get dummy destroy msg\n" ); |
287 | return r; |
288 | } |
289 | |
290 | } else { |
291 | r = radeon_ring_lock(rdev, ring, 64); |
292 | if (r) { |
293 | DRM_ERROR("Failed to lock ring A %d\n" , ring->idx); |
294 | return r; |
295 | } |
296 | radeon_fence_emit(rdev, fence, ring->idx); |
297 | radeon_ring_unlock_commit(rdev, ring); |
298 | } |
299 | return 0; |
300 | } |
301 | |
302 | void radeon_test_ring_sync(struct radeon_device *rdev, |
303 | struct radeon_ring *ringA, |
304 | struct radeon_ring *ringB) |
305 | { |
306 | struct radeon_fence *fence1 = NULL, *fence2 = NULL; |
307 | struct radeon_semaphore *semaphore = NULL; |
308 | int r; |
309 | |
310 | r = radeon_semaphore_create(rdev, &semaphore); |
311 | if (r) { |
312 | DRM_ERROR("Failed to create semaphore\n" ); |
313 | goto out_cleanup; |
314 | } |
315 | |
316 | r = radeon_ring_lock(rdev, ringA, 64); |
317 | if (r) { |
318 | DRM_ERROR("Failed to lock ring A %d\n" , ringA->idx); |
319 | goto out_cleanup; |
320 | } |
321 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
322 | radeon_ring_unlock_commit(rdev, ringA); |
323 | |
324 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); |
325 | if (r) |
326 | goto out_cleanup; |
327 | |
328 | r = radeon_ring_lock(rdev, ringA, 64); |
329 | if (r) { |
330 | DRM_ERROR("Failed to lock ring A %d\n" , ringA->idx); |
331 | goto out_cleanup; |
332 | } |
333 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
334 | radeon_ring_unlock_commit(rdev, ringA); |
335 | |
336 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); |
337 | if (r) |
338 | goto out_cleanup; |
339 | |
340 | mdelay(1000); |
341 | |
342 | if (radeon_fence_signaled(fence1)) { |
343 | DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n" ); |
344 | goto out_cleanup; |
345 | } |
346 | |
347 | r = radeon_ring_lock(rdev, ringB, 64); |
348 | if (r) { |
349 | DRM_ERROR("Failed to lock ring B %p\n" , ringB); |
350 | goto out_cleanup; |
351 | } |
352 | radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); |
353 | radeon_ring_unlock_commit(rdev, ringB); |
354 | |
355 | r = radeon_fence_wait(fence1, false); |
356 | if (r) { |
357 | DRM_ERROR("Failed to wait for sync fence 1\n" ); |
358 | goto out_cleanup; |
359 | } |
360 | |
361 | mdelay(1000); |
362 | |
363 | if (radeon_fence_signaled(fence2)) { |
364 | DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n" ); |
365 | goto out_cleanup; |
366 | } |
367 | |
368 | r = radeon_ring_lock(rdev, ringB, 64); |
369 | if (r) { |
370 | DRM_ERROR("Failed to lock ring B %p\n" , ringB); |
371 | goto out_cleanup; |
372 | } |
373 | radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); |
374 | radeon_ring_unlock_commit(rdev, ringB); |
375 | |
376 | r = radeon_fence_wait(fence2, false); |
377 | if (r) { |
378 | DRM_ERROR("Failed to wait for sync fence 1\n" ); |
379 | goto out_cleanup; |
380 | } |
381 | |
382 | out_cleanup: |
383 | radeon_semaphore_free(rdev, &semaphore, NULL); |
384 | |
385 | if (fence1) |
386 | radeon_fence_unref(&fence1); |
387 | |
388 | if (fence2) |
389 | radeon_fence_unref(&fence2); |
390 | |
391 | if (r) |
392 | printk(KERN_WARNING "Error while testing ring sync (%d).\n" , r); |
393 | } |
394 | |
395 | static void radeon_test_ring_sync2(struct radeon_device *rdev, |
396 | struct radeon_ring *ringA, |
397 | struct radeon_ring *ringB, |
398 | struct radeon_ring *ringC) |
399 | { |
400 | struct radeon_fence *fenceA = NULL, *fenceB = NULL; |
401 | struct radeon_semaphore *semaphore = NULL; |
402 | bool sigA, sigB; |
403 | int i, r; |
404 | |
405 | r = radeon_semaphore_create(rdev, &semaphore); |
406 | if (r) { |
407 | DRM_ERROR("Failed to create semaphore\n" ); |
408 | goto out_cleanup; |
409 | } |
410 | |
411 | r = radeon_ring_lock(rdev, ringA, 64); |
412 | if (r) { |
413 | DRM_ERROR("Failed to lock ring A %d\n" , ringA->idx); |
414 | goto out_cleanup; |
415 | } |
416 | radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); |
417 | radeon_ring_unlock_commit(rdev, ringA); |
418 | |
419 | r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); |
420 | if (r) |
421 | goto out_cleanup; |
422 | |
423 | r = radeon_ring_lock(rdev, ringB, 64); |
424 | if (r) { |
425 | DRM_ERROR("Failed to lock ring B %d\n" , ringB->idx); |
426 | goto out_cleanup; |
427 | } |
428 | radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); |
429 | radeon_ring_unlock_commit(rdev, ringB); |
430 | r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); |
431 | if (r) |
432 | goto out_cleanup; |
433 | |
434 | mdelay(1000); |
435 | |
436 | if (radeon_fence_signaled(fenceA)) { |
437 | DRM_ERROR("Fence A signaled without waiting for semaphore.\n" ); |
438 | goto out_cleanup; |
439 | } |
440 | if (radeon_fence_signaled(fenceB)) { |
441 | DRM_ERROR("Fence B signaled without waiting for semaphore.\n" ); |
442 | goto out_cleanup; |
443 | } |
444 | |
445 | r = radeon_ring_lock(rdev, ringC, 64); |
446 | if (r) { |
447 | DRM_ERROR("Failed to lock ring B %p\n" , ringC); |
448 | goto out_cleanup; |
449 | } |
450 | radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); |
451 | radeon_ring_unlock_commit(rdev, ringC); |
452 | |
453 | for (i = 0; i < 30; ++i) { |
454 | mdelay(100); |
455 | sigA = radeon_fence_signaled(fenceA); |
456 | sigB = radeon_fence_signaled(fenceB); |
457 | if (sigA || sigB) |
458 | break; |
459 | } |
460 | |
461 | if (!sigA && !sigB) { |
462 | DRM_ERROR("Neither fence A nor B has been signaled\n" ); |
463 | goto out_cleanup; |
464 | } else if (sigA && sigB) { |
465 | DRM_ERROR("Both fence A and B has been signaled\n" ); |
466 | goto out_cleanup; |
467 | } |
468 | |
469 | DRM_INFO("Fence %c was first signaled\n" , sigA ? 'A' : 'B'); |
470 | |
471 | r = radeon_ring_lock(rdev, ringC, 64); |
472 | if (r) { |
473 | DRM_ERROR("Failed to lock ring B %p\n" , ringC); |
474 | goto out_cleanup; |
475 | } |
476 | radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); |
477 | radeon_ring_unlock_commit(rdev, ringC); |
478 | |
479 | mdelay(1000); |
480 | |
481 | r = radeon_fence_wait(fenceA, false); |
482 | if (r) { |
483 | DRM_ERROR("Failed to wait for sync fence A\n" ); |
484 | goto out_cleanup; |
485 | } |
486 | r = radeon_fence_wait(fenceB, false); |
487 | if (r) { |
488 | DRM_ERROR("Failed to wait for sync fence B\n" ); |
489 | goto out_cleanup; |
490 | } |
491 | |
492 | out_cleanup: |
493 | radeon_semaphore_free(rdev, &semaphore, NULL); |
494 | |
495 | if (fenceA) |
496 | radeon_fence_unref(&fenceA); |
497 | |
498 | if (fenceB) |
499 | radeon_fence_unref(&fenceB); |
500 | |
501 | if (r) |
502 | printk(KERN_WARNING "Error while testing ring sync (%d).\n" , r); |
503 | } |
504 | |
505 | static bool radeon_test_sync_possible(struct radeon_ring *ringA, |
506 | struct radeon_ring *ringB) |
507 | { |
508 | if (ringA->idx == TN_RING_TYPE_VCE2_INDEX && |
509 | ringB->idx == TN_RING_TYPE_VCE1_INDEX) |
510 | return false; |
511 | |
512 | return true; |
513 | } |
514 | |
515 | void radeon_test_syncing(struct radeon_device *rdev) |
516 | { |
517 | int i, j, k; |
518 | |
519 | for (i = 1; i < RADEON_NUM_RINGS; ++i) { |
520 | struct radeon_ring *ringA = &rdev->ring[i]; |
521 | if (!ringA->ready) |
522 | continue; |
523 | |
524 | for (j = 0; j < i; ++j) { |
525 | struct radeon_ring *ringB = &rdev->ring[j]; |
526 | if (!ringB->ready) |
527 | continue; |
528 | |
529 | if (!radeon_test_sync_possible(ringA, ringB)) |
530 | continue; |
531 | |
532 | DRM_INFO("Testing syncing between rings %d and %d...\n" , i, j); |
533 | radeon_test_ring_sync(rdev, ringA, ringB); |
534 | |
535 | DRM_INFO("Testing syncing between rings %d and %d...\n" , j, i); |
536 | radeon_test_ring_sync(rdev, ringB, ringA); |
537 | |
538 | for (k = 0; k < j; ++k) { |
539 | struct radeon_ring *ringC = &rdev->ring[k]; |
540 | if (!ringC->ready) |
541 | continue; |
542 | |
543 | if (!radeon_test_sync_possible(ringA, ringC)) |
544 | continue; |
545 | |
546 | if (!radeon_test_sync_possible(ringB, ringC)) |
547 | continue; |
548 | |
549 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n" , i, j, k); |
550 | radeon_test_ring_sync2(rdev, ringA, ringB, ringC); |
551 | |
552 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n" , i, k, j); |
553 | radeon_test_ring_sync2(rdev, ringA, ringC, ringB); |
554 | |
555 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n" , j, i, k); |
556 | radeon_test_ring_sync2(rdev, ringB, ringA, ringC); |
557 | |
558 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n" , j, k, i); |
559 | radeon_test_ring_sync2(rdev, ringB, ringC, ringA); |
560 | |
561 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n" , k, i, j); |
562 | radeon_test_ring_sync2(rdev, ringC, ringA, ringB); |
563 | |
564 | DRM_INFO("Testing syncing between rings %d, %d and %d...\n" , k, j, i); |
565 | radeon_test_ring_sync2(rdev, ringC, ringB, ringA); |
566 | } |
567 | } |
568 | } |
569 | } |
570 | |