1 | /* |
2 | * Copyright 2008 Jerome Glisse. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice (including the next |
13 | * paragraph) shall be included in all copies or substantial portions of the |
14 | * Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
22 | * DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: |
25 | * Jerome Glisse <glisse@freedesktop.org> |
26 | */ |
27 | #include <linux/list_sort.h> |
28 | #include <drm/drmP.h> |
29 | #include <drm/radeon_drm.h> |
30 | #include "radeon_reg.h" |
31 | #include "radeon.h" |
32 | #include "radeon_trace.h" |
33 | |
34 | #define RADEON_CS_MAX_PRIORITY 32u |
35 | #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1) |
36 | |
37 | /* This is based on the bucket sort with O(n) time complexity. |
38 | * An item with priority "i" is added to bucket[i]. The lists are then |
39 | * concatenated in descending order. |
40 | */ |
41 | struct radeon_cs_buckets { |
42 | struct list_head bucket[RADEON_CS_NUM_BUCKETS]; |
43 | }; |
44 | |
45 | static void radeon_cs_buckets_init(struct radeon_cs_buckets *b) |
46 | { |
47 | unsigned i; |
48 | |
49 | for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) |
50 | INIT_LIST_HEAD(&b->bucket[i]); |
51 | } |
52 | |
53 | static void radeon_cs_buckets_add(struct radeon_cs_buckets *b, |
54 | struct list_head *item, unsigned priority) |
55 | { |
56 | /* Since buffers which appear sooner in the relocation list are |
57 | * likely to be used more often than buffers which appear later |
58 | * in the list, the sort mustn't change the ordering of buffers |
59 | * with the same priority, i.e. it must be stable. |
60 | */ |
61 | list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]); |
62 | } |
63 | |
64 | static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b, |
65 | struct list_head *out_list) |
66 | { |
67 | unsigned i; |
68 | |
69 | /* Connect the sorted buckets in the output list. */ |
70 | for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) { |
71 | list_splice(&b->bucket[i], out_list); |
72 | } |
73 | } |
74 | |
75 | static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) |
76 | { |
77 | struct drm_device *ddev = p->rdev->ddev; |
78 | struct radeon_cs_chunk *chunk; |
79 | struct radeon_cs_buckets buckets; |
80 | unsigned i, j; |
81 | bool duplicate; |
82 | |
83 | if (p->chunk_relocs_idx == -1) { |
84 | return 0; |
85 | } |
86 | chunk = &p->chunks[p->chunk_relocs_idx]; |
87 | p->dma_reloc_idx = 0; |
88 | /* FIXME: we assume that each relocs use 4 dwords */ |
89 | p->nrelocs = chunk->length_dw / 4; |
90 | p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL); |
91 | if (p->relocs_ptr == NULL) { |
92 | return -ENOMEM; |
93 | } |
94 | p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL); |
95 | if (p->relocs == NULL) { |
96 | return -ENOMEM; |
97 | } |
98 | |
99 | radeon_cs_buckets_init(&buckets); |
100 | |
101 | for (i = 0; i < p->nrelocs; i++) { |
102 | struct drm_radeon_cs_reloc *r; |
103 | unsigned priority; |
104 | |
105 | duplicate = false; |
106 | r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; |
107 | for (j = 0; j < i; j++) { |
108 | if (r->handle == p->relocs[j].handle) { |
109 | p->relocs_ptr[i] = &p->relocs[j]; |
110 | duplicate = true; |
111 | break; |
112 | } |
113 | } |
114 | if (duplicate) { |
115 | p->relocs[i].handle = 0; |
116 | continue; |
117 | } |
118 | |
119 | p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp, |
120 | r->handle); |
121 | if (p->relocs[i].gobj == NULL) { |
122 | DRM_ERROR("gem object lookup failed 0x%x\n" , |
123 | r->handle); |
124 | return -ENOENT; |
125 | } |
126 | p->relocs_ptr[i] = &p->relocs[i]; |
127 | p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); |
128 | |
129 | /* The userspace buffer priorities are from 0 to 15. A higher |
130 | * number means the buffer is more important. |
131 | * Also, the buffers used for write have a higher priority than |
132 | * the buffers used for read only, which doubles the range |
133 | * to 0 to 31. 32 is reserved for the kernel driver. |
134 | */ |
135 | priority = (r->flags & 0xf) * 2 + !!r->write_domain; |
136 | |
137 | /* the first reloc of an UVD job is the msg and that must be in |
138 | VRAM, also but everything into VRAM on AGP cards to avoid |
139 | image corruptions */ |
140 | if (p->ring == R600_RING_TYPE_UVD_INDEX && |
141 | (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { |
142 | /* TODO: is this still needed for NI+ ? */ |
143 | p->relocs[i].domain = |
144 | RADEON_GEM_DOMAIN_VRAM; |
145 | |
146 | p->relocs[i].alt_domain = |
147 | RADEON_GEM_DOMAIN_VRAM; |
148 | |
149 | /* prioritize this over any other relocation */ |
150 | priority = RADEON_CS_MAX_PRIORITY; |
151 | } else { |
152 | uint32_t domain = r->write_domain ? |
153 | r->write_domain : r->read_domains; |
154 | |
155 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
156 | DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid " |
157 | "for command submission\n" ); |
158 | return -EINVAL; |
159 | } |
160 | |
161 | p->relocs[i].domain = domain; |
162 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
163 | domain |= RADEON_GEM_DOMAIN_GTT; |
164 | p->relocs[i].alt_domain = domain; |
165 | } |
166 | |
167 | p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; |
168 | p->relocs[i].handle = r->handle; |
169 | |
170 | radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, |
171 | priority); |
172 | } |
173 | |
174 | radeon_cs_buckets_get_list(&buckets, &p->validated); |
175 | |
176 | if (p->cs_flags & RADEON_CS_USE_VM) |
177 | p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, |
178 | &p->validated); |
179 | |
180 | return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); |
181 | } |
182 | |
183 | static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) |
184 | { |
185 | p->priority = priority; |
186 | |
187 | switch (ring) { |
188 | default: |
189 | DRM_ERROR("unknown ring id: %d\n" , ring); |
190 | return -EINVAL; |
191 | case RADEON_CS_RING_GFX: |
192 | p->ring = RADEON_RING_TYPE_GFX_INDEX; |
193 | break; |
194 | case RADEON_CS_RING_COMPUTE: |
195 | if (p->rdev->family >= CHIP_TAHITI) { |
196 | if (p->priority > 0) |
197 | p->ring = CAYMAN_RING_TYPE_CP1_INDEX; |
198 | else |
199 | p->ring = CAYMAN_RING_TYPE_CP2_INDEX; |
200 | } else |
201 | p->ring = RADEON_RING_TYPE_GFX_INDEX; |
202 | break; |
203 | case RADEON_CS_RING_DMA: |
204 | if (p->rdev->family >= CHIP_CAYMAN) { |
205 | if (p->priority > 0) |
206 | p->ring = R600_RING_TYPE_DMA_INDEX; |
207 | else |
208 | p->ring = CAYMAN_RING_TYPE_DMA1_INDEX; |
209 | } else if (p->rdev->family >= CHIP_RV770) { |
210 | p->ring = R600_RING_TYPE_DMA_INDEX; |
211 | } else { |
212 | return -EINVAL; |
213 | } |
214 | break; |
215 | case RADEON_CS_RING_UVD: |
216 | p->ring = R600_RING_TYPE_UVD_INDEX; |
217 | break; |
218 | case RADEON_CS_RING_VCE: |
219 | /* TODO: only use the low priority ring for now */ |
220 | p->ring = TN_RING_TYPE_VCE1_INDEX; |
221 | break; |
222 | } |
223 | return 0; |
224 | } |
225 | |
226 | static void radeon_cs_sync_rings(struct radeon_cs_parser *p) |
227 | { |
228 | int i; |
229 | |
230 | for (i = 0; i < p->nrelocs; i++) { |
231 | if (!p->relocs[i].robj) |
232 | continue; |
233 | |
234 | radeon_semaphore_sync_to(p->ib.semaphore, |
235 | p->relocs[i].robj->tbo.sync_obj); |
236 | } |
237 | } |
238 | |
239 | /* XXX: note that this is called from the legacy UMS CS ioctl as well */ |
240 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) |
241 | { |
242 | struct drm_radeon_cs *cs = data; |
243 | uint64_t *chunk_array_ptr; |
244 | unsigned size, i; |
245 | u32 ring = RADEON_CS_RING_GFX; |
246 | s32 priority = 0; |
247 | |
248 | if (!cs->num_chunks) { |
249 | return 0; |
250 | } |
251 | /* get chunks */ |
252 | INIT_LIST_HEAD(&p->validated); |
253 | p->idx = 0; |
254 | p->ib.sa_bo = NULL; |
255 | p->ib.semaphore = NULL; |
256 | p->const_ib.sa_bo = NULL; |
257 | p->const_ib.semaphore = NULL; |
258 | p->chunk_ib_idx = -1; |
259 | p->chunk_relocs_idx = -1; |
260 | p->chunk_flags_idx = -1; |
261 | p->chunk_const_ib_idx = -1; |
262 | p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); |
263 | if (p->chunks_array == NULL) { |
264 | return -ENOMEM; |
265 | } |
266 | chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); |
267 | if (copy_from_user(p->chunks_array, chunk_array_ptr, |
268 | sizeof(uint64_t)*cs->num_chunks)) { |
269 | return -EFAULT; |
270 | } |
271 | p->cs_flags = 0; |
272 | p->nchunks = cs->num_chunks; |
273 | p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL); |
274 | if (p->chunks == NULL) { |
275 | return -ENOMEM; |
276 | } |
277 | for (i = 0; i < p->nchunks; i++) { |
278 | struct drm_radeon_cs_chunk __user **chunk_ptr = NULL; |
279 | struct drm_radeon_cs_chunk user_chunk; |
280 | uint32_t __user *cdata; |
281 | |
282 | chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i]; |
283 | if (copy_from_user(&user_chunk, chunk_ptr, |
284 | sizeof(struct drm_radeon_cs_chunk))) { |
285 | return -EFAULT; |
286 | } |
287 | p->chunks[i].length_dw = user_chunk.length_dw; |
288 | p->chunks[i].chunk_id = user_chunk.chunk_id; |
289 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { |
290 | p->chunk_relocs_idx = i; |
291 | } |
292 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { |
293 | p->chunk_ib_idx = i; |
294 | /* zero length IB isn't useful */ |
295 | if (p->chunks[i].length_dw == 0) |
296 | return -EINVAL; |
297 | } |
298 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) { |
299 | p->chunk_const_ib_idx = i; |
300 | /* zero length CONST IB isn't useful */ |
301 | if (p->chunks[i].length_dw == 0) |
302 | return -EINVAL; |
303 | } |
304 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { |
305 | p->chunk_flags_idx = i; |
306 | /* zero length flags aren't useful */ |
307 | if (p->chunks[i].length_dw == 0) |
308 | return -EINVAL; |
309 | } |
310 | |
311 | size = p->chunks[i].length_dw; |
312 | cdata = (void __user *)(unsigned long)user_chunk.chunk_data; |
313 | p->chunks[i].user_ptr = cdata; |
314 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) |
315 | continue; |
316 | |
317 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { |
318 | if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP)) |
319 | continue; |
320 | } |
321 | |
322 | p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); |
323 | size *= sizeof(uint32_t); |
324 | if (p->chunks[i].kdata == NULL) { |
325 | return -ENOMEM; |
326 | } |
327 | if (copy_from_user(p->chunks[i].kdata, cdata, size)) { |
328 | return -EFAULT; |
329 | } |
330 | if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { |
331 | p->cs_flags = p->chunks[i].kdata[0]; |
332 | if (p->chunks[i].length_dw > 1) |
333 | ring = p->chunks[i].kdata[1]; |
334 | if (p->chunks[i].length_dw > 2) |
335 | priority = (s32)p->chunks[i].kdata[2]; |
336 | } |
337 | } |
338 | |
339 | /* these are KMS only */ |
340 | if (p->rdev) { |
341 | if ((p->cs_flags & RADEON_CS_USE_VM) && |
342 | !p->rdev->vm_manager.enabled) { |
343 | DRM_ERROR("VM not active on asic!\n" ); |
344 | return -EINVAL; |
345 | } |
346 | |
347 | if (radeon_cs_get_ring(p, ring, priority)) |
348 | return -EINVAL; |
349 | |
350 | /* we only support VM on some SI+ rings */ |
351 | if ((p->cs_flags & RADEON_CS_USE_VM) == 0) { |
352 | if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) { |
353 | DRM_ERROR("Ring %d requires VM!\n" , p->ring); |
354 | return -EINVAL; |
355 | } |
356 | } else { |
357 | if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) { |
358 | DRM_ERROR("VM not supported on ring %d!\n" , |
359 | p->ring); |
360 | return -EINVAL; |
361 | } |
362 | } |
363 | } |
364 | |
365 | return 0; |
366 | } |
367 | |
368 | static int cmp_size_smaller_first(void *priv, struct list_head *a, |
369 | struct list_head *b) |
370 | { |
371 | struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head); |
372 | struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head); |
373 | |
374 | /* Sort A before B if A is smaller. */ |
375 | return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; |
376 | } |
377 | |
378 | /** |
379 | * cs_parser_fini() - clean parser states |
380 | * @parser: parser structure holding parsing context. |
381 | * @error: error number |
382 | * |
383 | * If error is set than unvalidate buffer, otherwise just free memory |
384 | * used by parsing context. |
385 | **/ |
386 | static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff) |
387 | { |
388 | unsigned i; |
389 | |
390 | if (!error) { |
391 | /* Sort the buffer list from the smallest to largest buffer, |
392 | * which affects the order of buffers in the LRU list. |
393 | * This assures that the smallest buffers are added first |
394 | * to the LRU list, so they are likely to be later evicted |
395 | * first, instead of large buffers whose eviction is more |
396 | * expensive. |
397 | * |
398 | * This slightly lowers the number of bytes moved by TTM |
399 | * per frame under memory pressure. |
400 | */ |
401 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); |
402 | |
403 | ttm_eu_fence_buffer_objects(&parser->ticket, |
404 | &parser->validated, |
405 | parser->ib.fence); |
406 | } else if (backoff) { |
407 | ttm_eu_backoff_reservation(&parser->ticket, |
408 | &parser->validated); |
409 | } |
410 | |
411 | if (parser->relocs != NULL) { |
412 | for (i = 0; i < parser->nrelocs; i++) { |
413 | if (parser->relocs[i].gobj) |
414 | drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); |
415 | } |
416 | } |
417 | kfree(parser->track); |
418 | kfree(parser->relocs); |
419 | kfree(parser->relocs_ptr); |
420 | kfree(parser->vm_bos); |
421 | for (i = 0; i < parser->nchunks; i++) |
422 | drm_free_large(parser->chunks[i].kdata); |
423 | kfree(parser->chunks); |
424 | kfree(parser->chunks_array); |
425 | radeon_ib_free(parser->rdev, &parser->ib); |
426 | radeon_ib_free(parser->rdev, &parser->const_ib); |
427 | } |
428 | |
429 | static int radeon_cs_ib_chunk(struct radeon_device *rdev, |
430 | struct radeon_cs_parser *parser) |
431 | { |
432 | int r; |
433 | |
434 | if (parser->chunk_ib_idx == -1) |
435 | return 0; |
436 | |
437 | if (parser->cs_flags & RADEON_CS_USE_VM) |
438 | return 0; |
439 | |
440 | r = radeon_cs_parse(rdev, parser->ring, parser); |
441 | if (r || parser->parser_error) { |
442 | DRM_ERROR("Invalid command stream !\n" ); |
443 | return r; |
444 | } |
445 | |
446 | if (parser->ring == R600_RING_TYPE_UVD_INDEX) |
447 | radeon_uvd_note_usage(rdev); |
448 | else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) || |
449 | (parser->ring == TN_RING_TYPE_VCE2_INDEX)) |
450 | radeon_vce_note_usage(rdev); |
451 | |
452 | radeon_cs_sync_rings(parser); |
453 | r = radeon_ib_schedule(rdev, &parser->ib, NULL); |
454 | if (r) { |
455 | DRM_ERROR("Failed to schedule IB !\n" ); |
456 | } |
457 | return r; |
458 | } |
459 | |
460 | static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, |
461 | struct radeon_vm *vm) |
462 | { |
463 | struct radeon_device *rdev = p->rdev; |
464 | int i, r; |
465 | |
466 | r = radeon_vm_update_page_directory(rdev, vm); |
467 | if (r) |
468 | return r; |
469 | |
470 | r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, |
471 | &rdev->ring_tmp_bo.bo->tbo.mem); |
472 | if (r) |
473 | return r; |
474 | |
475 | for (i = 0; i < p->nrelocs; i++) { |
476 | struct radeon_bo *bo; |
477 | |
478 | /* ignore duplicates */ |
479 | if (p->relocs_ptr[i] != &p->relocs[i]) |
480 | continue; |
481 | |
482 | bo = p->relocs[i].robj; |
483 | r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem); |
484 | if (r) |
485 | return r; |
486 | } |
487 | return 0; |
488 | } |
489 | |
490 | static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, |
491 | struct radeon_cs_parser *parser) |
492 | { |
493 | struct radeon_fpriv *fpriv = parser->filp->driver_priv; |
494 | struct radeon_vm *vm = &fpriv->vm; |
495 | int r; |
496 | |
497 | if (parser->chunk_ib_idx == -1) |
498 | return 0; |
499 | if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) |
500 | return 0; |
501 | |
502 | if (parser->const_ib.length_dw) { |
503 | r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib); |
504 | if (r) { |
505 | return r; |
506 | } |
507 | } |
508 | |
509 | r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib); |
510 | if (r) { |
511 | return r; |
512 | } |
513 | |
514 | if (parser->ring == R600_RING_TYPE_UVD_INDEX) |
515 | radeon_uvd_note_usage(rdev); |
516 | |
517 | mutex_lock(&vm->mutex); |
518 | r = radeon_bo_vm_update_pte(parser, vm); |
519 | if (r) { |
520 | goto out; |
521 | } |
522 | radeon_cs_sync_rings(parser); |
523 | radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence); |
524 | |
525 | if ((rdev->family >= CHIP_TAHITI) && |
526 | (parser->chunk_const_ib_idx != -1)) { |
527 | r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); |
528 | } else { |
529 | r = radeon_ib_schedule(rdev, &parser->ib, NULL); |
530 | } |
531 | |
532 | out: |
533 | mutex_unlock(&vm->mutex); |
534 | return r; |
535 | } |
536 | |
537 | static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r) |
538 | { |
539 | if (r == -EDEADLK) { |
540 | r = radeon_gpu_reset(rdev); |
541 | if (!r) |
542 | r = -EAGAIN; |
543 | } |
544 | return r; |
545 | } |
546 | |
547 | static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser) |
548 | { |
549 | struct radeon_cs_chunk *ib_chunk; |
550 | struct radeon_vm *vm = NULL; |
551 | int r; |
552 | |
553 | if (parser->chunk_ib_idx == -1) |
554 | return 0; |
555 | |
556 | if (parser->cs_flags & RADEON_CS_USE_VM) { |
557 | struct radeon_fpriv *fpriv = parser->filp->driver_priv; |
558 | vm = &fpriv->vm; |
559 | |
560 | if ((rdev->family >= CHIP_TAHITI) && |
561 | (parser->chunk_const_ib_idx != -1)) { |
562 | ib_chunk = &parser->chunks[parser->chunk_const_ib_idx]; |
563 | if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { |
564 | DRM_ERROR("cs IB CONST too big: %d\n" , ib_chunk->length_dw); |
565 | return -EINVAL; |
566 | } |
567 | r = radeon_ib_get(rdev, parser->ring, &parser->const_ib, |
568 | vm, ib_chunk->length_dw * 4); |
569 | if (r) { |
570 | DRM_ERROR("Failed to get const ib !\n" ); |
571 | return r; |
572 | } |
573 | parser->const_ib.is_const_ib = true; |
574 | parser->const_ib.length_dw = ib_chunk->length_dw; |
575 | if (copy_from_user(parser->const_ib.ptr, |
576 | ib_chunk->user_ptr, |
577 | ib_chunk->length_dw * 4)) |
578 | return -EFAULT; |
579 | } |
580 | |
581 | ib_chunk = &parser->chunks[parser->chunk_ib_idx]; |
582 | if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { |
583 | DRM_ERROR("cs IB too big: %d\n" , ib_chunk->length_dw); |
584 | return -EINVAL; |
585 | } |
586 | } |
587 | ib_chunk = &parser->chunks[parser->chunk_ib_idx]; |
588 | |
589 | r = radeon_ib_get(rdev, parser->ring, &parser->ib, |
590 | vm, ib_chunk->length_dw * 4); |
591 | if (r) { |
592 | DRM_ERROR("Failed to get ib !\n" ); |
593 | return r; |
594 | } |
595 | parser->ib.length_dw = ib_chunk->length_dw; |
596 | if (ib_chunk->kdata) |
597 | memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4); |
598 | else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) |
599 | return -EFAULT; |
600 | return 0; |
601 | } |
602 | |
603 | int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
604 | { |
605 | struct radeon_device *rdev = dev->dev_private; |
606 | struct radeon_cs_parser parser; |
607 | int r; |
608 | |
609 | down_read(&rdev->exclusive_lock); |
610 | if (!rdev->accel_working) { |
611 | up_read(&rdev->exclusive_lock); |
612 | return -EBUSY; |
613 | } |
614 | /* initialize parser */ |
615 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); |
616 | parser.filp = filp; |
617 | parser.rdev = rdev; |
618 | parser.dev = rdev->dev; |
619 | parser.family = rdev->family; |
620 | r = radeon_cs_parser_init(&parser, data); |
621 | if (r) { |
622 | DRM_ERROR("Failed to initialize parser !\n" ); |
623 | radeon_cs_parser_fini(&parser, r, false); |
624 | up_read(&rdev->exclusive_lock); |
625 | r = radeon_cs_handle_lockup(rdev, r); |
626 | return r; |
627 | } |
628 | |
629 | r = radeon_cs_ib_fill(rdev, &parser); |
630 | if (!r) { |
631 | r = radeon_cs_parser_relocs(&parser); |
632 | if (r && r != -ERESTARTSYS) |
633 | DRM_ERROR("Failed to parse relocation %d!\n" , r); |
634 | } |
635 | |
636 | if (r) { |
637 | radeon_cs_parser_fini(&parser, r, false); |
638 | up_read(&rdev->exclusive_lock); |
639 | r = radeon_cs_handle_lockup(rdev, r); |
640 | return r; |
641 | } |
642 | |
643 | trace_radeon_cs(&parser); |
644 | |
645 | r = radeon_cs_ib_chunk(rdev, &parser); |
646 | if (r) { |
647 | goto out; |
648 | } |
649 | r = radeon_cs_ib_vm_chunk(rdev, &parser); |
650 | if (r) { |
651 | goto out; |
652 | } |
653 | out: |
654 | radeon_cs_parser_fini(&parser, r, true); |
655 | up_read(&rdev->exclusive_lock); |
656 | r = radeon_cs_handle_lockup(rdev, r); |
657 | return r; |
658 | } |
659 | |
660 | /** |
661 | * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet |
662 | * @parser: parser structure holding parsing context. |
663 | * @pkt: where to store packet information |
664 | * |
665 | * Assume that chunk_ib_index is properly set. Will return -EINVAL |
666 | * if packet is bigger than remaining ib size. or if packets is unknown. |
667 | **/ |
668 | int radeon_cs_packet_parse(struct radeon_cs_parser *p, |
669 | struct radeon_cs_packet *pkt, |
670 | unsigned idx) |
671 | { |
672 | struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; |
673 | struct radeon_device *rdev = p->rdev; |
674 | uint32_t ; |
675 | |
676 | if (idx >= ib_chunk->length_dw) { |
677 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n" , |
678 | idx, ib_chunk->length_dw); |
679 | return -EINVAL; |
680 | } |
681 | header = radeon_get_ib_value(p, idx); |
682 | pkt->idx = idx; |
683 | pkt->type = RADEON_CP_PACKET_GET_TYPE(header); |
684 | pkt->count = RADEON_CP_PACKET_GET_COUNT(header); |
685 | pkt->one_reg_wr = 0; |
686 | switch (pkt->type) { |
687 | case RADEON_PACKET_TYPE0: |
688 | if (rdev->family < CHIP_R600) { |
689 | pkt->reg = R100_CP_PACKET0_GET_REG(header); |
690 | pkt->one_reg_wr = |
691 | RADEON_CP_PACKET0_GET_ONE_REG_WR(header); |
692 | } else |
693 | pkt->reg = R600_CP_PACKET0_GET_REG(header); |
694 | break; |
695 | case RADEON_PACKET_TYPE3: |
696 | pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header); |
697 | break; |
698 | case RADEON_PACKET_TYPE2: |
699 | pkt->count = -1; |
700 | break; |
701 | default: |
702 | DRM_ERROR("Unknown packet type %d at %d !\n" , pkt->type, idx); |
703 | return -EINVAL; |
704 | } |
705 | if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { |
706 | DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n" , |
707 | pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); |
708 | return -EINVAL; |
709 | } |
710 | return 0; |
711 | } |
712 | |
713 | /** |
714 | * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP |
715 | * @p: structure holding the parser context. |
716 | * |
717 | * Check if the next packet is NOP relocation packet3. |
718 | **/ |
719 | bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) |
720 | { |
721 | struct radeon_cs_packet p3reloc; |
722 | int r; |
723 | |
724 | r = radeon_cs_packet_parse(p, &p3reloc, p->idx); |
725 | if (r) |
726 | return false; |
727 | if (p3reloc.type != RADEON_PACKET_TYPE3) |
728 | return false; |
729 | if (p3reloc.opcode != RADEON_PACKET3_NOP) |
730 | return false; |
731 | return true; |
732 | } |
733 | |
734 | /** |
735 | * radeon_cs_dump_packet() - dump raw packet context |
736 | * @p: structure holding the parser context. |
737 | * @pkt: structure holding the packet. |
738 | * |
739 | * Used mostly for debugging and error reporting. |
740 | **/ |
741 | void radeon_cs_dump_packet(struct radeon_cs_parser *p, |
742 | struct radeon_cs_packet *pkt) |
743 | { |
744 | volatile uint32_t *ib; |
745 | unsigned i; |
746 | unsigned idx; |
747 | |
748 | ib = p->ib.ptr; |
749 | idx = pkt->idx; |
750 | for (i = 0; i <= (pkt->count + 1); i++, idx++) |
751 | DRM_INFO("ib[%d]=0x%08X\n" , idx, ib[idx]); |
752 | } |
753 | |
754 | /** |
755 | * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet |
756 | * @parser: parser structure holding parsing context. |
757 | * @data: pointer to relocation data |
758 | * @offset_start: starting offset |
759 | * @offset_mask: offset mask (to align start offset on) |
760 | * @reloc: reloc informations |
761 | * |
762 | * Check if next packet is relocation packet3, do bo validation and compute |
763 | * GPU offset using the provided start. |
764 | **/ |
765 | int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, |
766 | struct radeon_cs_reloc **cs_reloc, |
767 | int nomm) |
768 | { |
769 | struct radeon_cs_chunk *relocs_chunk; |
770 | struct radeon_cs_packet p3reloc; |
771 | unsigned idx; |
772 | int r; |
773 | |
774 | if (p->chunk_relocs_idx == -1) { |
775 | DRM_ERROR("No relocation chunk !\n" ); |
776 | return -EINVAL; |
777 | } |
778 | *cs_reloc = NULL; |
779 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
780 | r = radeon_cs_packet_parse(p, &p3reloc, p->idx); |
781 | if (r) |
782 | return r; |
783 | p->idx += p3reloc.count + 2; |
784 | if (p3reloc.type != RADEON_PACKET_TYPE3 || |
785 | p3reloc.opcode != RADEON_PACKET3_NOP) { |
786 | DRM_ERROR("No packet3 for relocation for packet at %d.\n" , |
787 | p3reloc.idx); |
788 | radeon_cs_dump_packet(p, &p3reloc); |
789 | return -EINVAL; |
790 | } |
791 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); |
792 | if (idx >= relocs_chunk->length_dw) { |
793 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n" , |
794 | idx, relocs_chunk->length_dw); |
795 | radeon_cs_dump_packet(p, &p3reloc); |
796 | return -EINVAL; |
797 | } |
798 | /* FIXME: we assume reloc size is 4 dwords */ |
799 | if (nomm) { |
800 | *cs_reloc = p->relocs; |
801 | (*cs_reloc)->gpu_offset = |
802 | (u64)relocs_chunk->kdata[idx + 3] << 32; |
803 | (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; |
804 | } else |
805 | *cs_reloc = p->relocs_ptr[(idx / 4)]; |
806 | return 0; |
807 | } |
808 | |