1 | /** |
2 | * \file drm_bufs.c |
3 | * Generic buffer template |
4 | * |
5 | * \author Rickard E. (Rik) Faith <faith@valinux.com> |
6 | * \author Gareth Hughes <gareth@valinux.com> |
7 | */ |
8 | |
9 | /* |
10 | * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com |
11 | * |
12 | * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. |
13 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
14 | * All Rights Reserved. |
15 | * |
16 | * Permission is hereby granted, free of charge, to any person obtaining a |
17 | * copy of this software and associated documentation files (the "Software"), |
18 | * to deal in the Software without restriction, including without limitation |
19 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
20 | * and/or sell copies of the Software, and to permit persons to whom the |
21 | * Software is furnished to do so, subject to the following conditions: |
22 | * |
23 | * The above copyright notice and this permission notice (including the next |
24 | * paragraph) shall be included in all copies or substantial portions of the |
25 | * Software. |
26 | * |
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
28 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
29 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
30 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
31 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
32 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
33 | * OTHER DEALINGS IN THE SOFTWARE. |
34 | */ |
35 | |
36 | #include <linux/vmalloc.h> |
37 | #include <linux/slab.h> |
38 | #include <linux/sched.h> |
39 | #include <linux/log2.h> |
40 | #include <linux/export.h> |
41 | #include <linux/mm.h> |
42 | #include <asm/bug.h> |
43 | #include <asm/io.h> |
44 | #include <asm/shmparam.h> |
45 | #include <drm/drmP.h> |
46 | |
47 | static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, |
48 | struct drm_local_map *map) |
49 | { |
50 | struct drm_map_list *entry; |
51 | list_for_each_entry(entry, &dev->maplist, head) { |
52 | /* |
53 | * Because the kernel-userspace ABI is fixed at a 32-bit offset |
54 | * while PCI resources may live above that, we only compare the |
55 | * lower 32 bits of the map offset for maps of type |
56 | * _DRM_FRAMEBUFFER or _DRM_REGISTERS. |
57 | * It is assumed that if a driver have more than one resource |
58 | * of each type, the lower 32 bits are different. |
59 | */ |
60 | if (!entry->map || |
61 | map->type != entry->map->type || |
62 | entry->master != dev->primary->master) |
63 | continue; |
64 | switch (map->type) { |
65 | case _DRM_SHM: |
66 | if (map->flags != _DRM_CONTAINS_LOCK) |
67 | break; |
68 | return entry; |
69 | case _DRM_REGISTERS: |
70 | case _DRM_FRAME_BUFFER: |
71 | if ((entry->map->offset & 0xffffffff) == |
72 | (map->offset & 0xffffffff)) |
73 | return entry; |
74 | default: /* Make gcc happy */ |
75 | ; |
76 | } |
77 | if (entry->map->offset == map->offset) |
78 | return entry; |
79 | } |
80 | |
81 | return NULL; |
82 | } |
83 | |
84 | static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, |
85 | unsigned long user_token, int hashed_handle, int shm) |
86 | { |
87 | int use_hashed_handle, shift; |
88 | unsigned long add; |
89 | |
90 | use_hashed_handle = (user_token &~ 0xffffffffUL) || hashed_handle; |
91 | if (!use_hashed_handle) { |
92 | int ret; |
93 | hash->key = user_token >> PAGE_SHIFT; |
94 | ret = drm_ht_insert_item(&dev->map_hash, hash); |
95 | if (ret != -EINVAL) |
96 | return ret; |
97 | } |
98 | |
99 | shift = 0; |
100 | add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; |
101 | if (shm && (SHMLBA > PAGE_SIZE)) { |
102 | int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; |
103 | |
104 | /* For shared memory, we have to preserve the SHMLBA |
105 | * bits of the eventual vma->vm_pgoff value during |
106 | * mmap(). Otherwise we run into cache aliasing problems |
107 | * on some platforms. On these platforms, the pgoff of |
108 | * a mmap() request is used to pick a suitable virtual |
109 | * address for the mmap() region such that it will not |
110 | * cause cache aliasing problems. |
111 | * |
112 | * Therefore, make sure the SHMLBA relevant bits of the |
113 | * hash value we use are equal to those in the original |
114 | * kernel virtual address. |
115 | */ |
116 | shift = bits; |
117 | add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); |
118 | } |
119 | |
120 | return drm_ht_just_insert_please(&dev->map_hash, hash, |
121 | user_token, 32 - PAGE_SHIFT - 3, |
122 | shift, add); |
123 | } |
124 | |
125 | /** |
126 | * Core function to create a range of memory available for mapping by a |
127 | * non-root process. |
128 | * |
129 | * Adjusts the memory offset to its absolute value according to the mapping |
130 | * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where |
131 | * applicable and if supported by the kernel. |
132 | */ |
133 | static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, |
134 | unsigned int size, enum drm_map_type type, |
135 | enum drm_map_flags flags, |
136 | struct drm_map_list ** maplist) |
137 | { |
138 | struct drm_local_map *map; |
139 | struct drm_map_list *list; |
140 | drm_dma_handle_t *dmah; |
141 | unsigned long user_token; |
142 | int ret; |
143 | |
144 | map = kmalloc(sizeof(*map), GFP_KERNEL); |
145 | if (!map) |
146 | return -ENOMEM; |
147 | |
148 | map->offset = offset; |
149 | map->size = size; |
150 | map->flags = flags; |
151 | map->type = type; |
152 | |
153 | /* Only allow shared memory to be removable since we only keep enough |
154 | * book keeping information about shared memory to allow for removal |
155 | * when processes fork. |
156 | */ |
157 | if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { |
158 | kfree(map); |
159 | return -EINVAL; |
160 | } |
161 | DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n" , |
162 | (unsigned long long)map->offset, map->size, map->type); |
163 | |
164 | /* page-align _DRM_SHM maps. They are allocated here so there is no security |
165 | * hole created by that and it works around various broken drivers that use |
166 | * a non-aligned quantity to map the SAREA. --BenH |
167 | */ |
168 | if (map->type == _DRM_SHM) |
169 | map->size = PAGE_ALIGN(map->size); |
170 | |
171 | if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { |
172 | kfree(map); |
173 | return -EINVAL; |
174 | } |
175 | map->mtrr = -1; |
176 | map->handle = NULL; |
177 | |
178 | switch (map->type) { |
179 | case _DRM_REGISTERS: |
180 | case _DRM_FRAME_BUFFER: |
181 | #ifndef __NetBSD__ /* XXX No idea what this is for... */ |
182 | #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) |
183 | if (map->offset + (map->size-1) < map->offset || |
184 | map->offset < virt_to_phys(high_memory)) { |
185 | kfree(map); |
186 | return -EINVAL; |
187 | } |
188 | #endif |
189 | #endif |
190 | /* Some drivers preinitialize some maps, without the X Server |
191 | * needing to be aware of it. Therefore, we just return success |
192 | * when the server tries to create a duplicate map. |
193 | */ |
194 | list = drm_find_matching_map(dev, map); |
195 | if (list != NULL) { |
196 | if (list->map->size != map->size) { |
197 | DRM_DEBUG("Matching maps of type %d with " |
198 | "mismatched sizes, (%ld vs %ld)\n" , |
199 | map->type, map->size, |
200 | list->map->size); |
201 | list->map->size = map->size; |
202 | } |
203 | |
204 | kfree(map); |
205 | *maplist = list; |
206 | return 0; |
207 | } |
208 | |
209 | if (map->type == _DRM_FRAME_BUFFER || |
210 | (map->flags & _DRM_WRITE_COMBINING)) { |
211 | map->mtrr = |
212 | arch_phys_wc_add(map->offset, map->size); |
213 | } |
214 | if (map->type == _DRM_REGISTERS) { |
215 | #ifdef __NetBSD__ |
216 | drm_core_ioremap(map, dev); |
217 | #else |
218 | if (map->flags & _DRM_WRITE_COMBINING) |
219 | map->handle = ioremap_wc(map->offset, |
220 | map->size); |
221 | else |
222 | map->handle = ioremap(map->offset, map->size); |
223 | #endif |
224 | if (!map->handle) { |
225 | kfree(map); |
226 | return -ENOMEM; |
227 | } |
228 | } |
229 | |
230 | break; |
231 | case _DRM_SHM: |
232 | list = drm_find_matching_map(dev, map); |
233 | if (list != NULL) { |
234 | if(list->map->size != map->size) { |
235 | DRM_DEBUG("Matching maps of type %d with " |
236 | "mismatched sizes, (%ld vs %ld)\n" , |
237 | map->type, map->size, list->map->size); |
238 | list->map->size = map->size; |
239 | } |
240 | |
241 | kfree(map); |
242 | *maplist = list; |
243 | return 0; |
244 | } |
245 | map->handle = vmalloc_user(map->size); |
246 | DRM_DEBUG("%lu %d %p\n" , |
247 | map->size, order_base_2(map->size), map->handle); |
248 | if (!map->handle) { |
249 | kfree(map); |
250 | return -ENOMEM; |
251 | } |
252 | map->offset = (unsigned long)map->handle; |
253 | if (map->flags & _DRM_CONTAINS_LOCK) { |
254 | /* Prevent a 2nd X Server from creating a 2nd lock */ |
255 | spin_lock(&dev->primary->master->lock.spinlock); |
256 | if (dev->primary->master->lock.hw_lock != NULL) { |
257 | vfree(map->handle); |
258 | kfree(map); |
259 | spin_unlock(&dev->primary->master->lock.spinlock); |
260 | return -EBUSY; |
261 | } |
262 | dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ |
263 | spin_unlock(&dev->primary->master->lock.spinlock); |
264 | } |
265 | break; |
266 | case _DRM_AGP: { |
267 | struct drm_agp_mem *entry; |
268 | int valid = 0; |
269 | |
270 | if (!dev->agp) { |
271 | kfree(map); |
272 | return -EINVAL; |
273 | } |
274 | #ifdef __alpha__ |
275 | map->offset += dev->hose->mem_space->start; |
276 | #endif |
277 | /* In some cases (i810 driver), user space may have already |
278 | * added the AGP base itself, because dev->agp->base previously |
279 | * only got set during AGP enable. So, only add the base |
280 | * address if the map's offset isn't already within the |
281 | * aperture. |
282 | */ |
283 | #ifdef __NetBSD__ |
284 | if (map->offset < dev->agp->base || |
285 | map->offset > dev->agp->base + |
286 | dev->agp->agp_info.aki_info.ai_aperture_size - 1) { |
287 | map->offset += dev->agp->base; |
288 | } |
289 | #else |
290 | if (map->offset < dev->agp->base || |
291 | map->offset > dev->agp->base + |
292 | dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { |
293 | map->offset += dev->agp->base; |
294 | } |
295 | #endif |
296 | map->mtrr = dev->agp->agp_mtrr; /* for getmap */ |
297 | |
298 | /* This assumes the DRM is in total control of AGP space. |
299 | * It's not always the case as AGP can be in the control |
300 | * of user space (i.e. i810 driver). So this loop will get |
301 | * skipped and we double check that dev->agp->memory is |
302 | * actually set as well as being invalid before EPERM'ing |
303 | */ |
304 | list_for_each_entry(entry, &dev->agp->memory, head) { |
305 | if ((map->offset >= entry->bound) && |
306 | (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { |
307 | valid = 1; |
308 | break; |
309 | } |
310 | } |
311 | if (!list_empty(&dev->agp->memory) && !valid) { |
312 | kfree(map); |
313 | return -EPERM; |
314 | } |
315 | DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n" , |
316 | (unsigned long long)map->offset, map->size); |
317 | |
318 | break; |
319 | } |
320 | case _DRM_SCATTER_GATHER: |
321 | if (!dev->sg) { |
322 | kfree(map); |
323 | return -EINVAL; |
324 | } |
325 | map->offset += (unsigned long)dev->sg->virtual; |
326 | break; |
327 | case _DRM_CONSISTENT: |
328 | /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, |
329 | * As we're limiting the address to 2^32-1 (or less), |
330 | * casting it down to 32 bits is no problem, but we |
331 | * need to point to a 64bit variable first. */ |
332 | dmah = drm_pci_alloc(dev, map->size, map->size); |
333 | if (!dmah) { |
334 | kfree(map); |
335 | return -ENOMEM; |
336 | } |
337 | map->handle = dmah->vaddr; |
338 | map->offset = (unsigned long)dmah->busaddr; |
339 | #ifdef __NetBSD__ |
340 | map->lm_data.dmah = dmah; |
341 | #else |
342 | kfree(dmah); |
343 | #endif |
344 | break; |
345 | default: |
346 | kfree(map); |
347 | return -EINVAL; |
348 | } |
349 | |
350 | list = kzalloc(sizeof(*list), GFP_KERNEL); |
351 | if (!list) { |
352 | if (map->type == _DRM_REGISTERS) |
353 | #ifdef __NetBSD__ |
354 | drm_core_ioremapfree(map, dev); |
355 | #else |
356 | iounmap(map->handle); |
357 | #endif |
358 | kfree(map); |
359 | return -EINVAL; |
360 | } |
361 | list->map = map; |
362 | |
363 | mutex_lock(&dev->struct_mutex); |
364 | list_add(&list->head, &dev->maplist); |
365 | |
366 | /* Assign a 32-bit handle */ |
367 | /* We do it here so that dev->struct_mutex protects the increment */ |
368 | user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : |
369 | map->offset; |
370 | ret = drm_map_handle(dev, &list->hash, user_token, 0, |
371 | (map->type == _DRM_SHM)); |
372 | if (ret) { |
373 | if (map->type == _DRM_REGISTERS) |
374 | #ifdef __NetBSD__ /* XXX What about other map types...? */ |
375 | drm_core_ioremapfree(map, dev); |
376 | #else |
377 | iounmap(map->handle); |
378 | #endif |
379 | kfree(map); |
380 | kfree(list); |
381 | mutex_unlock(&dev->struct_mutex); |
382 | return ret; |
383 | } |
384 | |
385 | list->user_token = list->hash.key << PAGE_SHIFT; |
386 | mutex_unlock(&dev->struct_mutex); |
387 | |
388 | if (!(map->flags & _DRM_DRIVER)) |
389 | list->master = dev->primary->master; |
390 | *maplist = list; |
391 | return 0; |
392 | } |
393 | |
394 | int drm_addmap(struct drm_device * dev, resource_size_t offset, |
395 | unsigned int size, enum drm_map_type type, |
396 | enum drm_map_flags flags, struct drm_local_map ** map_ptr) |
397 | { |
398 | struct drm_map_list *list; |
399 | int rc; |
400 | |
401 | rc = drm_addmap_core(dev, offset, size, type, flags, &list); |
402 | if (!rc) |
403 | *map_ptr = list->map; |
404 | return rc; |
405 | } |
406 | |
407 | EXPORT_SYMBOL(drm_addmap); |
408 | |
409 | /** |
410 | * Ioctl to specify a range of memory that is available for mapping by a |
411 | * non-root process. |
412 | * |
413 | * \param inode device inode. |
414 | * \param file_priv DRM file private. |
415 | * \param cmd command. |
416 | * \param arg pointer to a drm_map structure. |
417 | * \return zero on success or a negative value on error. |
418 | * |
419 | */ |
420 | int drm_addmap_ioctl(struct drm_device *dev, void *data, |
421 | struct drm_file *file_priv) |
422 | { |
423 | struct drm_map *map = data; |
424 | struct drm_map_list *maplist; |
425 | int err; |
426 | |
427 | #ifdef __NetBSD__ |
428 | # if 0 /* XXX Old drm did this. */ |
429 | if (!(dev->flags & (FREAD | FWRITE))) |
430 | return -EACCES; |
431 | # endif |
432 | if (!(DRM_SUSER() || map->type == _DRM_AGP || map->type == _DRM_SHM)) |
433 | return -EACCES; /* XXX */ |
434 | #else |
435 | if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) |
436 | return -EPERM; |
437 | #endif |
438 | |
439 | err = drm_addmap_core(dev, map->offset, map->size, map->type, |
440 | map->flags, &maplist); |
441 | |
442 | if (err) |
443 | return err; |
444 | |
445 | /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ |
446 | map->handle = (void *)(unsigned long)maplist->user_token; |
447 | |
448 | /* |
449 | * It appears that there are no users of this value whatsoever -- |
450 | * drmAddMap just discards it. Let's not encourage its use. |
451 | * (Keeping drm_addmap_core's returned mtrr value would be wrong -- |
452 | * it's not a real mtrr index anymore.) |
453 | */ |
454 | map->mtrr = -1; |
455 | |
456 | return 0; |
457 | } |
458 | |
459 | /** |
460 | * Remove a map private from list and deallocate resources if the mapping |
461 | * isn't in use. |
462 | * |
463 | * Searches the map on drm_device::maplist, removes it from the list, see if |
464 | * its being used, and free any associate resource (such as MTRR's) if it's not |
465 | * being on use. |
466 | * |
467 | * \sa drm_addmap |
468 | */ |
469 | int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) |
470 | { |
471 | struct drm_map_list *r_list = NULL, *list_t; |
472 | #ifndef __NetBSD__ |
473 | drm_dma_handle_t dmah; |
474 | #endif |
475 | int found = 0; |
476 | struct drm_master *master; |
477 | |
478 | /* Find the list entry for the map and remove it */ |
479 | list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { |
480 | if (r_list->map == map) { |
481 | master = r_list->master; |
482 | list_del(&r_list->head); |
483 | drm_ht_remove_key(&dev->map_hash, |
484 | r_list->user_token >> PAGE_SHIFT); |
485 | kfree(r_list); |
486 | found = 1; |
487 | break; |
488 | } |
489 | } |
490 | |
491 | if (!found) |
492 | return -EINVAL; |
493 | |
494 | switch (map->type) { |
495 | case _DRM_REGISTERS: |
496 | #ifdef __NetBSD__ |
497 | drm_core_ioremapfree(map, dev); |
498 | #else |
499 | iounmap(map->handle); |
500 | #endif |
501 | /* FALLTHROUGH */ |
502 | case _DRM_FRAME_BUFFER: |
503 | arch_phys_wc_del(map->mtrr); |
504 | break; |
505 | case _DRM_SHM: |
506 | if (master && (map->flags & _DRM_CONTAINS_LOCK)) { |
507 | spin_lock(&master->lock.spinlock); |
508 | /* |
509 | * If we successfully removed this mapping, |
510 | * then the mapping must have been there in the |
511 | * first place, and we must have had a |
512 | * heavyweight lock, so we assert here instead |
513 | * of just checking and failing. |
514 | * |
515 | * XXX What about the _DRM_CONTAINS_LOCK flag? |
516 | * Where is that supposed to be set? Is it |
517 | * equivalent to having a master set? |
518 | * |
519 | * XXX There is copypasta of this in |
520 | * drm_fops.c. |
521 | */ |
522 | BUG_ON(master->lock.hw_lock == NULL); |
523 | if (dev->sigdata.lock == master->lock.hw_lock) |
524 | dev->sigdata.lock = NULL; |
525 | master->lock.hw_lock = NULL; /* SHM removed */ |
526 | master->lock.file_priv = NULL; |
527 | #ifdef __NetBSD__ |
528 | DRM_SPIN_WAKEUP_ALL(&master->lock.lock_queue, |
529 | &master->lock.spinlock); |
530 | #else |
531 | wake_up_interruptible_all(&master->lock.lock_queue); |
532 | #endif |
533 | spin_unlock(&master->lock.spinlock); |
534 | } |
535 | vfree(map->handle); |
536 | break; |
537 | case _DRM_AGP: |
538 | case _DRM_SCATTER_GATHER: |
539 | break; |
540 | case _DRM_CONSISTENT: |
541 | #ifdef __NetBSD__ |
542 | drm_pci_free(dev, map->lm_data.dmah); |
543 | #else |
544 | dmah.vaddr = map->handle; |
545 | dmah.busaddr = map->offset; |
546 | dmah.size = map->size; |
547 | __drm_pci_free(dev, &dmah); |
548 | #endif |
549 | break; |
550 | } |
551 | kfree(map); |
552 | |
553 | return 0; |
554 | } |
555 | EXPORT_SYMBOL(drm_rmmap_locked); |
556 | |
557 | int drm_rmmap(struct drm_device *dev, struct drm_local_map *map) |
558 | { |
559 | int ret; |
560 | |
561 | mutex_lock(&dev->struct_mutex); |
562 | ret = drm_rmmap_locked(dev, map); |
563 | mutex_unlock(&dev->struct_mutex); |
564 | |
565 | return ret; |
566 | } |
567 | EXPORT_SYMBOL(drm_rmmap); |
568 | |
569 | /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on |
570 | * the last close of the device, and this is necessary for cleanup when things |
571 | * exit uncleanly. Therefore, having userland manually remove mappings seems |
572 | * like a pointless exercise since they're going away anyway. |
573 | * |
574 | * One use case might be after addmap is allowed for normal users for SHM and |
575 | * gets used by drivers that the server doesn't need to care about. This seems |
576 | * unlikely. |
577 | * |
578 | * \param inode device inode. |
579 | * \param file_priv DRM file private. |
580 | * \param cmd command. |
581 | * \param arg pointer to a struct drm_map structure. |
582 | * \return zero on success or a negative value on error. |
583 | */ |
584 | int drm_rmmap_ioctl(struct drm_device *dev, void *data, |
585 | struct drm_file *file_priv) |
586 | { |
587 | struct drm_map *request = data; |
588 | struct drm_local_map *map = NULL; |
589 | struct drm_map_list *r_list; |
590 | int ret; |
591 | |
592 | mutex_lock(&dev->struct_mutex); |
593 | list_for_each_entry(r_list, &dev->maplist, head) { |
594 | if (r_list->map && |
595 | r_list->user_token == (unsigned long)request->handle && |
596 | r_list->map->flags & _DRM_REMOVABLE) { |
597 | map = r_list->map; |
598 | break; |
599 | } |
600 | } |
601 | |
602 | /* List has wrapped around to the head pointer, or its empty we didn't |
603 | * find anything. |
604 | */ |
605 | if (list_empty(&dev->maplist) || !map) { |
606 | mutex_unlock(&dev->struct_mutex); |
607 | return -EINVAL; |
608 | } |
609 | |
610 | /* Register and framebuffer maps are permanent */ |
611 | if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { |
612 | mutex_unlock(&dev->struct_mutex); |
613 | return 0; |
614 | } |
615 | |
616 | ret = drm_rmmap_locked(dev, map); |
617 | |
618 | mutex_unlock(&dev->struct_mutex); |
619 | |
620 | return ret; |
621 | } |
622 | |
623 | /** |
624 | * Cleanup after an error on one of the addbufs() functions. |
625 | * |
626 | * \param dev DRM device. |
627 | * \param entry buffer entry where the error occurred. |
628 | * |
629 | * Frees any pages and buffers associated with the given entry. |
630 | */ |
631 | static void drm_cleanup_buf_error(struct drm_device * dev, |
632 | struct drm_buf_entry * entry) |
633 | { |
634 | int i; |
635 | |
636 | if (entry->seg_count) { |
637 | for (i = 0; i < entry->seg_count; i++) { |
638 | if (entry->seglist[i]) { |
639 | drm_pci_free(dev, entry->seglist[i]); |
640 | } |
641 | } |
642 | kfree(entry->seglist); |
643 | |
644 | entry->seg_count = 0; |
645 | } |
646 | |
647 | if (entry->buf_count) { |
648 | for (i = 0; i < entry->buf_count; i++) { |
649 | kfree(entry->buflist[i].dev_private); |
650 | } |
651 | kfree(entry->buflist); |
652 | |
653 | entry->buf_count = 0; |
654 | } |
655 | } |
656 | |
657 | #if __OS_HAS_AGP |
658 | /** |
659 | * Add AGP buffers for DMA transfers. |
660 | * |
661 | * \param dev struct drm_device to which the buffers are to be added. |
662 | * \param request pointer to a struct drm_buf_desc describing the request. |
663 | * \return zero on success or a negative number on failure. |
664 | * |
665 | * After some sanity checks creates a drm_buf structure for each buffer and |
666 | * reallocates the buffer list of the same size order to accommodate the new |
667 | * buffers. |
668 | */ |
669 | int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) |
670 | { |
671 | struct drm_device_dma *dma = dev->dma; |
672 | struct drm_buf_entry *entry; |
673 | struct drm_agp_mem *agp_entry; |
674 | struct drm_buf *buf; |
675 | unsigned long offset; |
676 | unsigned long agp_offset; |
677 | int count; |
678 | int order; |
679 | int size; |
680 | int alignment; |
681 | int page_order; |
682 | int total; |
683 | int byte_count; |
684 | int i, valid; |
685 | struct drm_buf **temp_buflist; |
686 | |
687 | if (!dma) |
688 | return -EINVAL; |
689 | |
690 | count = request->count; |
691 | order = order_base_2(request->size); |
692 | size = 1 << order; |
693 | |
694 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
695 | ? PAGE_ALIGN(size) : size; |
696 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
697 | total = PAGE_SIZE << page_order; |
698 | |
699 | byte_count = 0; |
700 | agp_offset = dev->agp->base + request->agp_start; |
701 | |
702 | DRM_DEBUG("count: %d\n" , count); |
703 | DRM_DEBUG("order: %d\n" , order); |
704 | DRM_DEBUG("size: %d\n" , size); |
705 | DRM_DEBUG("agp_offset: %lx\n" , agp_offset); |
706 | DRM_DEBUG("alignment: %d\n" , alignment); |
707 | DRM_DEBUG("page_order: %d\n" , page_order); |
708 | DRM_DEBUG("total: %d\n" , total); |
709 | |
710 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
711 | return -EINVAL; |
712 | |
713 | /* Make sure buffers are located in AGP memory that we own */ |
714 | valid = 0; |
715 | list_for_each_entry(agp_entry, &dev->agp->memory, head) { |
716 | if ((agp_offset >= agp_entry->bound) && |
717 | (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { |
718 | valid = 1; |
719 | break; |
720 | } |
721 | } |
722 | if (!list_empty(&dev->agp->memory) && !valid) { |
723 | DRM_DEBUG("zone invalid\n" ); |
724 | return -EINVAL; |
725 | } |
726 | spin_lock(&dev->count_lock); |
727 | if (dev->buf_use) { |
728 | spin_unlock(&dev->count_lock); |
729 | return -EBUSY; |
730 | } |
731 | atomic_inc(&dev->buf_alloc); |
732 | spin_unlock(&dev->count_lock); |
733 | |
734 | mutex_lock(&dev->struct_mutex); |
735 | entry = &dma->bufs[order]; |
736 | if (entry->buf_count) { |
737 | mutex_unlock(&dev->struct_mutex); |
738 | atomic_dec(&dev->buf_alloc); |
739 | return -ENOMEM; /* May only call once for each order */ |
740 | } |
741 | |
742 | if (count < 0 || count > 4096) { |
743 | mutex_unlock(&dev->struct_mutex); |
744 | atomic_dec(&dev->buf_alloc); |
745 | return -EINVAL; |
746 | } |
747 | |
748 | entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); |
749 | if (!entry->buflist) { |
750 | mutex_unlock(&dev->struct_mutex); |
751 | atomic_dec(&dev->buf_alloc); |
752 | return -ENOMEM; |
753 | } |
754 | |
755 | entry->buf_size = size; |
756 | entry->page_order = page_order; |
757 | |
758 | offset = 0; |
759 | |
760 | while (entry->buf_count < count) { |
761 | buf = &entry->buflist[entry->buf_count]; |
762 | buf->idx = dma->buf_count + entry->buf_count; |
763 | buf->total = alignment; |
764 | buf->order = order; |
765 | buf->used = 0; |
766 | |
767 | buf->offset = (dma->byte_count + offset); |
768 | buf->bus_address = agp_offset + offset; |
769 | buf->address = (void *)(agp_offset + offset); |
770 | buf->next = NULL; |
771 | buf->waiting = 0; |
772 | buf->pending = 0; |
773 | buf->file_priv = NULL; |
774 | |
775 | buf->dev_priv_size = dev->driver->dev_priv_size; |
776 | buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); |
777 | if (!buf->dev_private) { |
778 | /* Set count correctly so we free the proper amount. */ |
779 | entry->buf_count = count; |
780 | drm_cleanup_buf_error(dev, entry); |
781 | mutex_unlock(&dev->struct_mutex); |
782 | atomic_dec(&dev->buf_alloc); |
783 | return -ENOMEM; |
784 | } |
785 | |
786 | DRM_DEBUG("buffer %d @ %p\n" , entry->buf_count, buf->address); |
787 | |
788 | offset += alignment; |
789 | entry->buf_count++; |
790 | byte_count += PAGE_SIZE << page_order; |
791 | } |
792 | |
793 | DRM_DEBUG("byte_count: %d\n" , byte_count); |
794 | |
795 | temp_buflist = krealloc(dma->buflist, |
796 | (dma->buf_count + entry->buf_count) * |
797 | sizeof(*dma->buflist), GFP_KERNEL); |
798 | if (!temp_buflist) { |
799 | /* Free the entry because it isn't valid */ |
800 | drm_cleanup_buf_error(dev, entry); |
801 | mutex_unlock(&dev->struct_mutex); |
802 | atomic_dec(&dev->buf_alloc); |
803 | return -ENOMEM; |
804 | } |
805 | dma->buflist = temp_buflist; |
806 | |
807 | for (i = 0; i < entry->buf_count; i++) { |
808 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; |
809 | } |
810 | |
811 | dma->buf_count += entry->buf_count; |
812 | dma->seg_count += entry->seg_count; |
813 | dma->page_count += byte_count >> PAGE_SHIFT; |
814 | dma->byte_count += byte_count; |
815 | |
816 | DRM_DEBUG("dma->buf_count : %d\n" , dma->buf_count); |
817 | DRM_DEBUG("entry->buf_count : %d\n" , entry->buf_count); |
818 | |
819 | mutex_unlock(&dev->struct_mutex); |
820 | |
821 | request->count = entry->buf_count; |
822 | request->size = size; |
823 | |
824 | dma->flags = _DRM_DMA_USE_AGP; |
825 | |
826 | atomic_dec(&dev->buf_alloc); |
827 | return 0; |
828 | } |
829 | EXPORT_SYMBOL(drm_addbufs_agp); |
830 | #endif /* __OS_HAS_AGP */ |
831 | |
832 | int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) |
833 | { |
834 | struct drm_device_dma *dma = dev->dma; |
835 | int count; |
836 | int order; |
837 | int size; |
838 | int total; |
839 | int page_order; |
840 | struct drm_buf_entry *entry; |
841 | drm_dma_handle_t *dmah; |
842 | struct drm_buf *buf; |
843 | int alignment; |
844 | unsigned long offset; |
845 | int i; |
846 | int byte_count; |
847 | int page_count; |
848 | unsigned long *temp_pagelist; |
849 | struct drm_buf **temp_buflist; |
850 | |
851 | if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) |
852 | return -EINVAL; |
853 | |
854 | if (!dma) |
855 | return -EINVAL; |
856 | |
857 | #ifdef __NetBSD__ |
858 | if (!DRM_SUSER()) |
859 | return -EACCES; /* XXX */ |
860 | #else |
861 | if (!capable(CAP_SYS_ADMIN)) |
862 | return -EPERM; |
863 | #endif |
864 | |
865 | count = request->count; |
866 | order = order_base_2(request->size); |
867 | size = 1 << order; |
868 | |
869 | DRM_DEBUG("count=%d, size=%d (%d), order=%d\n" , |
870 | request->count, request->size, size, order); |
871 | |
872 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
873 | return -EINVAL; |
874 | |
875 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
876 | ? PAGE_ALIGN(size) : size; |
877 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
878 | total = PAGE_SIZE << page_order; |
879 | |
880 | spin_lock(&dev->count_lock); |
881 | if (dev->buf_use) { |
882 | spin_unlock(&dev->count_lock); |
883 | return -EBUSY; |
884 | } |
885 | atomic_inc(&dev->buf_alloc); |
886 | spin_unlock(&dev->count_lock); |
887 | |
888 | mutex_lock(&dev->struct_mutex); |
889 | entry = &dma->bufs[order]; |
890 | if (entry->buf_count) { |
891 | mutex_unlock(&dev->struct_mutex); |
892 | atomic_dec(&dev->buf_alloc); |
893 | return -ENOMEM; /* May only call once for each order */ |
894 | } |
895 | |
896 | if (count < 0 || count > 4096) { |
897 | mutex_unlock(&dev->struct_mutex); |
898 | atomic_dec(&dev->buf_alloc); |
899 | return -EINVAL; |
900 | } |
901 | |
902 | entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); |
903 | if (!entry->buflist) { |
904 | mutex_unlock(&dev->struct_mutex); |
905 | atomic_dec(&dev->buf_alloc); |
906 | return -ENOMEM; |
907 | } |
908 | |
909 | entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL); |
910 | if (!entry->seglist) { |
911 | kfree(entry->buflist); |
912 | mutex_unlock(&dev->struct_mutex); |
913 | atomic_dec(&dev->buf_alloc); |
914 | return -ENOMEM; |
915 | } |
916 | |
917 | /* Keep the original pagelist until we know all the allocations |
918 | * have succeeded |
919 | */ |
920 | temp_pagelist = kmalloc((dma->page_count + (count << page_order)) * |
921 | sizeof(*dma->pagelist), GFP_KERNEL); |
922 | if (!temp_pagelist) { |
923 | kfree(entry->buflist); |
924 | kfree(entry->seglist); |
925 | mutex_unlock(&dev->struct_mutex); |
926 | atomic_dec(&dev->buf_alloc); |
927 | return -ENOMEM; |
928 | } |
929 | memcpy(temp_pagelist, |
930 | dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); |
931 | DRM_DEBUG("pagelist: %d entries\n" , |
932 | dma->page_count + (count << page_order)); |
933 | |
934 | entry->buf_size = size; |
935 | entry->page_order = page_order; |
936 | byte_count = 0; |
937 | page_count = 0; |
938 | |
939 | while (entry->buf_count < count) { |
940 | |
941 | dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); |
942 | |
943 | if (!dmah) { |
944 | /* Set count correctly so we free the proper amount. */ |
945 | entry->buf_count = count; |
946 | entry->seg_count = count; |
947 | drm_cleanup_buf_error(dev, entry); |
948 | kfree(temp_pagelist); |
949 | mutex_unlock(&dev->struct_mutex); |
950 | atomic_dec(&dev->buf_alloc); |
951 | return -ENOMEM; |
952 | } |
953 | entry->seglist[entry->seg_count++] = dmah; |
954 | for (i = 0; i < (1 << page_order); i++) { |
955 | DRM_DEBUG("page %d @ 0x%08lx\n" , |
956 | dma->page_count + page_count, |
957 | (unsigned long)dmah->vaddr + PAGE_SIZE * i); |
958 | temp_pagelist[dma->page_count + page_count++] |
959 | = (unsigned long)dmah->vaddr + PAGE_SIZE * i; |
960 | } |
961 | for (offset = 0; |
962 | offset + size <= total && entry->buf_count < count; |
963 | offset += alignment, ++entry->buf_count) { |
964 | buf = &entry->buflist[entry->buf_count]; |
965 | buf->idx = dma->buf_count + entry->buf_count; |
966 | buf->total = alignment; |
967 | buf->order = order; |
968 | buf->used = 0; |
969 | buf->offset = (dma->byte_count + byte_count + offset); |
970 | buf->address = (void *)((char *)dmah->vaddr + offset); |
971 | buf->bus_address = dmah->busaddr + offset; |
972 | buf->next = NULL; |
973 | buf->waiting = 0; |
974 | buf->pending = 0; |
975 | buf->file_priv = NULL; |
976 | |
977 | buf->dev_priv_size = dev->driver->dev_priv_size; |
978 | buf->dev_private = kzalloc(buf->dev_priv_size, |
979 | GFP_KERNEL); |
980 | if (!buf->dev_private) { |
981 | /* Set count correctly so we free the proper amount. */ |
982 | entry->buf_count = count; |
983 | entry->seg_count = count; |
984 | drm_cleanup_buf_error(dev, entry); |
985 | kfree(temp_pagelist); |
986 | mutex_unlock(&dev->struct_mutex); |
987 | atomic_dec(&dev->buf_alloc); |
988 | return -ENOMEM; |
989 | } |
990 | |
991 | DRM_DEBUG("buffer %d @ %p\n" , |
992 | entry->buf_count, buf->address); |
993 | } |
994 | byte_count += PAGE_SIZE << page_order; |
995 | } |
996 | |
997 | temp_buflist = krealloc(dma->buflist, |
998 | (dma->buf_count + entry->buf_count) * |
999 | sizeof(*dma->buflist), GFP_KERNEL); |
1000 | if (!temp_buflist) { |
1001 | /* Free the entry because it isn't valid */ |
1002 | drm_cleanup_buf_error(dev, entry); |
1003 | kfree(temp_pagelist); |
1004 | mutex_unlock(&dev->struct_mutex); |
1005 | atomic_dec(&dev->buf_alloc); |
1006 | return -ENOMEM; |
1007 | } |
1008 | dma->buflist = temp_buflist; |
1009 | |
1010 | for (i = 0; i < entry->buf_count; i++) { |
1011 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; |
1012 | } |
1013 | |
1014 | /* No allocations failed, so now we can replace the original pagelist |
1015 | * with the new one. |
1016 | */ |
1017 | if (dma->page_count) { |
1018 | kfree(dma->pagelist); |
1019 | } |
1020 | dma->pagelist = temp_pagelist; |
1021 | |
1022 | dma->buf_count += entry->buf_count; |
1023 | dma->seg_count += entry->seg_count; |
1024 | dma->page_count += entry->seg_count << page_order; |
1025 | dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); |
1026 | |
1027 | mutex_unlock(&dev->struct_mutex); |
1028 | |
1029 | request->count = entry->buf_count; |
1030 | request->size = size; |
1031 | |
1032 | if (request->flags & _DRM_PCI_BUFFER_RO) |
1033 | dma->flags = _DRM_DMA_USE_PCI_RO; |
1034 | |
1035 | atomic_dec(&dev->buf_alloc); |
1036 | return 0; |
1037 | |
1038 | } |
1039 | EXPORT_SYMBOL(drm_addbufs_pci); |
1040 | |
1041 | static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request) |
1042 | { |
1043 | struct drm_device_dma *dma = dev->dma; |
1044 | struct drm_buf_entry *entry; |
1045 | struct drm_buf *buf; |
1046 | unsigned long offset; |
1047 | unsigned long agp_offset; |
1048 | int count; |
1049 | int order; |
1050 | int size; |
1051 | int alignment; |
1052 | int page_order; |
1053 | int total; |
1054 | int byte_count; |
1055 | int i; |
1056 | struct drm_buf **temp_buflist; |
1057 | |
1058 | if (!drm_core_check_feature(dev, DRIVER_SG)) |
1059 | return -EINVAL; |
1060 | |
1061 | if (!dma) |
1062 | return -EINVAL; |
1063 | |
1064 | #ifdef __NetBSD__ |
1065 | if (!DRM_SUSER()) |
1066 | return -EACCES; /* XXX */ |
1067 | #else |
1068 | if (!capable(CAP_SYS_ADMIN)) |
1069 | return -EPERM; |
1070 | #endif |
1071 | |
1072 | count = request->count; |
1073 | order = order_base_2(request->size); |
1074 | size = 1 << order; |
1075 | |
1076 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
1077 | ? PAGE_ALIGN(size) : size; |
1078 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
1079 | total = PAGE_SIZE << page_order; |
1080 | |
1081 | byte_count = 0; |
1082 | agp_offset = request->agp_start; |
1083 | |
1084 | DRM_DEBUG("count: %d\n" , count); |
1085 | DRM_DEBUG("order: %d\n" , order); |
1086 | DRM_DEBUG("size: %d\n" , size); |
1087 | DRM_DEBUG("agp_offset: %lu\n" , agp_offset); |
1088 | DRM_DEBUG("alignment: %d\n" , alignment); |
1089 | DRM_DEBUG("page_order: %d\n" , page_order); |
1090 | DRM_DEBUG("total: %d\n" , total); |
1091 | |
1092 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
1093 | return -EINVAL; |
1094 | |
1095 | spin_lock(&dev->count_lock); |
1096 | if (dev->buf_use) { |
1097 | spin_unlock(&dev->count_lock); |
1098 | return -EBUSY; |
1099 | } |
1100 | atomic_inc(&dev->buf_alloc); |
1101 | spin_unlock(&dev->count_lock); |
1102 | |
1103 | mutex_lock(&dev->struct_mutex); |
1104 | entry = &dma->bufs[order]; |
1105 | if (entry->buf_count) { |
1106 | mutex_unlock(&dev->struct_mutex); |
1107 | atomic_dec(&dev->buf_alloc); |
1108 | return -ENOMEM; /* May only call once for each order */ |
1109 | } |
1110 | |
1111 | if (count < 0 || count > 4096) { |
1112 | mutex_unlock(&dev->struct_mutex); |
1113 | atomic_dec(&dev->buf_alloc); |
1114 | return -EINVAL; |
1115 | } |
1116 | |
1117 | entry->buflist = kzalloc(count * sizeof(*entry->buflist), |
1118 | GFP_KERNEL); |
1119 | if (!entry->buflist) { |
1120 | mutex_unlock(&dev->struct_mutex); |
1121 | atomic_dec(&dev->buf_alloc); |
1122 | return -ENOMEM; |
1123 | } |
1124 | |
1125 | entry->buf_size = size; |
1126 | entry->page_order = page_order; |
1127 | |
1128 | offset = 0; |
1129 | |
1130 | while (entry->buf_count < count) { |
1131 | buf = &entry->buflist[entry->buf_count]; |
1132 | buf->idx = dma->buf_count + entry->buf_count; |
1133 | buf->total = alignment; |
1134 | buf->order = order; |
1135 | buf->used = 0; |
1136 | |
1137 | buf->offset = (dma->byte_count + offset); |
1138 | buf->bus_address = agp_offset + offset; |
1139 | buf->address = (void *)(agp_offset + offset |
1140 | + (unsigned long)dev->sg->virtual); |
1141 | buf->next = NULL; |
1142 | buf->waiting = 0; |
1143 | buf->pending = 0; |
1144 | buf->file_priv = NULL; |
1145 | |
1146 | buf->dev_priv_size = dev->driver->dev_priv_size; |
1147 | buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); |
1148 | if (!buf->dev_private) { |
1149 | /* Set count correctly so we free the proper amount. */ |
1150 | entry->buf_count = count; |
1151 | drm_cleanup_buf_error(dev, entry); |
1152 | mutex_unlock(&dev->struct_mutex); |
1153 | atomic_dec(&dev->buf_alloc); |
1154 | return -ENOMEM; |
1155 | } |
1156 | |
1157 | DRM_DEBUG("buffer %d @ %p\n" , entry->buf_count, buf->address); |
1158 | |
1159 | offset += alignment; |
1160 | entry->buf_count++; |
1161 | byte_count += PAGE_SIZE << page_order; |
1162 | } |
1163 | |
1164 | DRM_DEBUG("byte_count: %d\n" , byte_count); |
1165 | |
1166 | temp_buflist = krealloc(dma->buflist, |
1167 | (dma->buf_count + entry->buf_count) * |
1168 | sizeof(*dma->buflist), GFP_KERNEL); |
1169 | if (!temp_buflist) { |
1170 | /* Free the entry because it isn't valid */ |
1171 | drm_cleanup_buf_error(dev, entry); |
1172 | mutex_unlock(&dev->struct_mutex); |
1173 | atomic_dec(&dev->buf_alloc); |
1174 | return -ENOMEM; |
1175 | } |
1176 | dma->buflist = temp_buflist; |
1177 | |
1178 | for (i = 0; i < entry->buf_count; i++) { |
1179 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; |
1180 | } |
1181 | |
1182 | dma->buf_count += entry->buf_count; |
1183 | dma->seg_count += entry->seg_count; |
1184 | dma->page_count += byte_count >> PAGE_SHIFT; |
1185 | dma->byte_count += byte_count; |
1186 | |
1187 | DRM_DEBUG("dma->buf_count : %d\n" , dma->buf_count); |
1188 | DRM_DEBUG("entry->buf_count : %d\n" , entry->buf_count); |
1189 | |
1190 | mutex_unlock(&dev->struct_mutex); |
1191 | |
1192 | request->count = entry->buf_count; |
1193 | request->size = size; |
1194 | |
1195 | dma->flags = _DRM_DMA_USE_SG; |
1196 | |
1197 | atomic_dec(&dev->buf_alloc); |
1198 | return 0; |
1199 | } |
1200 | |
1201 | /** |
1202 | * Add buffers for DMA transfers (ioctl). |
1203 | * |
1204 | * \param inode device inode. |
1205 | * \param file_priv DRM file private. |
1206 | * \param cmd command. |
1207 | * \param arg pointer to a struct drm_buf_desc request. |
1208 | * \return zero on success or a negative number on failure. |
1209 | * |
1210 | * According with the memory type specified in drm_buf_desc::flags and the |
1211 | * build options, it dispatches the call either to addbufs_agp(), |
1212 | * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent |
1213 | * PCI memory respectively. |
1214 | */ |
1215 | int drm_addbufs(struct drm_device *dev, void *data, |
1216 | struct drm_file *file_priv) |
1217 | { |
1218 | struct drm_buf_desc *request = data; |
1219 | int ret; |
1220 | |
1221 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1222 | return -EINVAL; |
1223 | |
1224 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
1225 | return -EINVAL; |
1226 | |
1227 | #if __OS_HAS_AGP |
1228 | if (request->flags & _DRM_AGP_BUFFER) |
1229 | ret = drm_addbufs_agp(dev, request); |
1230 | else |
1231 | #endif |
1232 | if (request->flags & _DRM_SG_BUFFER) |
1233 | ret = drm_addbufs_sg(dev, request); |
1234 | else if (request->flags & _DRM_FB_BUFFER) |
1235 | ret = -EINVAL; |
1236 | else |
1237 | ret = drm_addbufs_pci(dev, request); |
1238 | |
1239 | return ret; |
1240 | } |
1241 | |
1242 | /** |
1243 | * Get information about the buffer mappings. |
1244 | * |
1245 | * This was originally mean for debugging purposes, or by a sophisticated |
1246 | * client library to determine how best to use the available buffers (e.g., |
1247 | * large buffers can be used for image transfer). |
1248 | * |
1249 | * \param inode device inode. |
1250 | * \param file_priv DRM file private. |
1251 | * \param cmd command. |
1252 | * \param arg pointer to a drm_buf_info structure. |
1253 | * \return zero on success or a negative number on failure. |
1254 | * |
1255 | * Increments drm_device::buf_use while holding the drm_device::count_lock |
1256 | * lock, preventing of allocating more buffers after this call. Information |
1257 | * about each requested buffer is then copied into user space. |
1258 | */ |
1259 | int drm_infobufs(struct drm_device *dev, void *data, |
1260 | struct drm_file *file_priv) |
1261 | { |
1262 | struct drm_device_dma *dma = dev->dma; |
1263 | struct drm_buf_info *request = data; |
1264 | int i; |
1265 | int count; |
1266 | |
1267 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1268 | return -EINVAL; |
1269 | |
1270 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
1271 | return -EINVAL; |
1272 | |
1273 | if (!dma) |
1274 | return -EINVAL; |
1275 | |
1276 | spin_lock(&dev->count_lock); |
1277 | if (atomic_read(&dev->buf_alloc)) { |
1278 | spin_unlock(&dev->count_lock); |
1279 | return -EBUSY; |
1280 | } |
1281 | ++dev->buf_use; /* Can't allocate more after this call */ |
1282 | spin_unlock(&dev->count_lock); |
1283 | |
1284 | for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { |
1285 | if (dma->bufs[i].buf_count) |
1286 | ++count; |
1287 | } |
1288 | |
1289 | DRM_DEBUG("count = %d\n" , count); |
1290 | |
1291 | if (request->count >= count) { |
1292 | for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { |
1293 | if (dma->bufs[i].buf_count) { |
1294 | struct drm_buf_desc __user *to = |
1295 | &request->list[count]; |
1296 | struct drm_buf_entry *from = &dma->bufs[i]; |
1297 | struct drm_freelist *list = &dma->bufs[i].freelist; |
1298 | if (copy_to_user(&to->count, |
1299 | &from->buf_count, |
1300 | sizeof(from->buf_count)) || |
1301 | copy_to_user(&to->size, |
1302 | &from->buf_size, |
1303 | sizeof(from->buf_size)) || |
1304 | copy_to_user(&to->low_mark, |
1305 | &list->low_mark, |
1306 | sizeof(list->low_mark)) || |
1307 | copy_to_user(&to->high_mark, |
1308 | &list->high_mark, |
1309 | sizeof(list->high_mark))) |
1310 | return -EFAULT; |
1311 | |
1312 | DRM_DEBUG("%d %d %d %d %d\n" , |
1313 | i, |
1314 | dma->bufs[i].buf_count, |
1315 | dma->bufs[i].buf_size, |
1316 | dma->bufs[i].freelist.low_mark, |
1317 | dma->bufs[i].freelist.high_mark); |
1318 | ++count; |
1319 | } |
1320 | } |
1321 | } |
1322 | request->count = count; |
1323 | |
1324 | return 0; |
1325 | } |
1326 | |
1327 | /** |
1328 | * Specifies a low and high water mark for buffer allocation |
1329 | * |
1330 | * \param inode device inode. |
1331 | * \param file_priv DRM file private. |
1332 | * \param cmd command. |
1333 | * \param arg a pointer to a drm_buf_desc structure. |
1334 | * \return zero on success or a negative number on failure. |
1335 | * |
1336 | * Verifies that the size order is bounded between the admissible orders and |
1337 | * updates the respective drm_device_dma::bufs entry low and high water mark. |
1338 | * |
1339 | * \note This ioctl is deprecated and mostly never used. |
1340 | */ |
1341 | int drm_markbufs(struct drm_device *dev, void *data, |
1342 | struct drm_file *file_priv) |
1343 | { |
1344 | struct drm_device_dma *dma = dev->dma; |
1345 | struct drm_buf_desc *request = data; |
1346 | int order; |
1347 | struct drm_buf_entry *entry; |
1348 | |
1349 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1350 | return -EINVAL; |
1351 | |
1352 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
1353 | return -EINVAL; |
1354 | |
1355 | if (!dma) |
1356 | return -EINVAL; |
1357 | |
1358 | DRM_DEBUG("%d, %d, %d\n" , |
1359 | request->size, request->low_mark, request->high_mark); |
1360 | order = order_base_2(request->size); |
1361 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) |
1362 | return -EINVAL; |
1363 | entry = &dma->bufs[order]; |
1364 | |
1365 | if (request->low_mark < 0 || request->low_mark > entry->buf_count) |
1366 | return -EINVAL; |
1367 | if (request->high_mark < 0 || request->high_mark > entry->buf_count) |
1368 | return -EINVAL; |
1369 | |
1370 | entry->freelist.low_mark = request->low_mark; |
1371 | entry->freelist.high_mark = request->high_mark; |
1372 | |
1373 | return 0; |
1374 | } |
1375 | |
1376 | /** |
1377 | * Unreserve the buffers in list, previously reserved using drmDMA. |
1378 | * |
1379 | * \param inode device inode. |
1380 | * \param file_priv DRM file private. |
1381 | * \param cmd command. |
1382 | * \param arg pointer to a drm_buf_free structure. |
1383 | * \return zero on success or a negative number on failure. |
1384 | * |
1385 | * Calls free_buffer() for each used buffer. |
1386 | * This function is primarily used for debugging. |
1387 | */ |
1388 | int drm_freebufs(struct drm_device *dev, void *data, |
1389 | struct drm_file *file_priv) |
1390 | { |
1391 | struct drm_device_dma *dma = dev->dma; |
1392 | struct drm_buf_free *request = data; |
1393 | int i; |
1394 | int idx; |
1395 | struct drm_buf *buf; |
1396 | |
1397 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1398 | return -EINVAL; |
1399 | |
1400 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
1401 | return -EINVAL; |
1402 | |
1403 | if (!dma) |
1404 | return -EINVAL; |
1405 | |
1406 | DRM_DEBUG("%d\n" , request->count); |
1407 | for (i = 0; i < request->count; i++) { |
1408 | if (copy_from_user(&idx, &request->list[i], sizeof(idx))) |
1409 | return -EFAULT; |
1410 | if (idx < 0 || idx >= dma->buf_count) { |
1411 | DRM_ERROR("Index %d (of %d max)\n" , |
1412 | idx, dma->buf_count - 1); |
1413 | return -EINVAL; |
1414 | } |
1415 | buf = dma->buflist[idx]; |
1416 | if (buf->file_priv != file_priv) { |
1417 | DRM_ERROR("Process %d freeing buffer not owned\n" , |
1418 | task_pid_nr(current)); |
1419 | return -EINVAL; |
1420 | } |
1421 | drm_free_buffer(dev, buf); |
1422 | } |
1423 | |
1424 | return 0; |
1425 | } |
1426 | |
1427 | /** |
1428 | * Maps all of the DMA buffers into client-virtual space (ioctl). |
1429 | * |
1430 | * \param inode device inode. |
1431 | * \param file_priv DRM file private. |
1432 | * \param cmd command. |
1433 | * \param arg pointer to a drm_buf_map structure. |
1434 | * \return zero on success or a negative number on failure. |
1435 | * |
1436 | * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information |
1437 | * about each buffer into user space. For PCI buffers, it calls vm_mmap() with |
1438 | * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls |
1439 | * drm_mmap_dma(). |
1440 | */ |
1441 | int drm_mapbufs(struct drm_device *dev, void *data, |
1442 | struct drm_file *file_priv) |
1443 | { |
1444 | struct drm_device_dma *dma = dev->dma; |
1445 | int retcode = 0; |
1446 | const int zero = 0; |
1447 | unsigned long virtual; |
1448 | unsigned long address; |
1449 | struct drm_buf_map *request = data; |
1450 | int i; |
1451 | |
1452 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1453 | return -EINVAL; |
1454 | |
1455 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
1456 | return -EINVAL; |
1457 | |
1458 | if (!dma) |
1459 | return -EINVAL; |
1460 | |
1461 | spin_lock(&dev->count_lock); |
1462 | if (atomic_read(&dev->buf_alloc)) { |
1463 | spin_unlock(&dev->count_lock); |
1464 | return -EBUSY; |
1465 | } |
1466 | dev->buf_use++; /* Can't allocate more after this call */ |
1467 | spin_unlock(&dev->count_lock); |
1468 | |
1469 | if (request->count >= dma->buf_count) { |
1470 | if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) |
1471 | || (drm_core_check_feature(dev, DRIVER_SG) |
1472 | && (dma->flags & _DRM_DMA_USE_SG))) { |
1473 | struct drm_local_map *map = dev->agp_buffer_map; |
1474 | unsigned long token = dev->agp_buffer_token; |
1475 | |
1476 | if (!map) { |
1477 | retcode = -EINVAL; |
1478 | goto done; |
1479 | } |
1480 | virtual = vm_mmap(file_priv->filp, 0, map->size, |
1481 | PROT_READ | PROT_WRITE, |
1482 | MAP_SHARED, |
1483 | token); |
1484 | } else { |
1485 | virtual = vm_mmap(file_priv->filp, 0, dma->byte_count, |
1486 | PROT_READ | PROT_WRITE, |
1487 | MAP_SHARED, 0); |
1488 | } |
1489 | if (virtual > -1024UL) { |
1490 | /* Real error */ |
1491 | retcode = (signed long)virtual; |
1492 | goto done; |
1493 | } |
1494 | request->virtual = (void __user *)virtual; |
1495 | |
1496 | for (i = 0; i < dma->buf_count; i++) { |
1497 | if (copy_to_user(&request->list[i].idx, |
1498 | &dma->buflist[i]->idx, |
1499 | sizeof(request->list[0].idx))) { |
1500 | retcode = -EFAULT; |
1501 | goto done; |
1502 | } |
1503 | if (copy_to_user(&request->list[i].total, |
1504 | &dma->buflist[i]->total, |
1505 | sizeof(request->list[0].total))) { |
1506 | retcode = -EFAULT; |
1507 | goto done; |
1508 | } |
1509 | if (copy_to_user(&request->list[i].used, |
1510 | &zero, sizeof(zero))) { |
1511 | retcode = -EFAULT; |
1512 | goto done; |
1513 | } |
1514 | address = virtual + dma->buflist[i]->offset; /* *** */ |
1515 | if (copy_to_user(&request->list[i].address, |
1516 | &address, sizeof(address))) { |
1517 | retcode = -EFAULT; |
1518 | goto done; |
1519 | } |
1520 | } |
1521 | } |
1522 | done: |
1523 | request->count = dma->buf_count; |
1524 | DRM_DEBUG("%d buffers, retcode = %d\n" , request->count, retcode); |
1525 | |
1526 | return retcode; |
1527 | } |
1528 | |
1529 | int drm_dma_ioctl(struct drm_device *dev, void *data, |
1530 | struct drm_file *file_priv) |
1531 | { |
1532 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1533 | return -EINVAL; |
1534 | |
1535 | if (dev->driver->dma_ioctl) |
1536 | return dev->driver->dma_ioctl(dev, data, file_priv); |
1537 | else |
1538 | return -EINVAL; |
1539 | } |
1540 | |
1541 | struct drm_local_map *drm_getsarea(struct drm_device *dev) |
1542 | { |
1543 | struct drm_map_list *entry; |
1544 | |
1545 | list_for_each_entry(entry, &dev->maplist, head) { |
1546 | if (entry->map && entry->map->type == _DRM_SHM && |
1547 | (entry->map->flags & _DRM_CONTAINS_LOCK)) { |
1548 | return entry->map; |
1549 | } |
1550 | } |
1551 | return NULL; |
1552 | } |
1553 | EXPORT_SYMBOL(drm_getsarea); |
1554 | |