1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
37#include <linux/shmem_fs.h>
38#include <linux/dma-buf.h>
39#include <linux/err.h>
40#include <linux/export.h>
41#include <asm/bug.h>
42#include <drm/drmP.h>
43#include <drm/drm_vma_manager.h>
44
45#ifdef __NetBSD__
46#include <uvm/uvm_extern.h>
47#endif
48
49/** @file drm_gem.c
50 *
51 * This file provides some of the base ioctls and library routines for
52 * the graphics memory manager implemented by each device driver.
53 *
54 * Because various devices have different requirements in terms of
55 * synchronization and migration strategies, implementing that is left up to
56 * the driver, and all that the general API provides should be generic --
57 * allocating objects, reading/writing data with the cpu, freeing objects.
58 * Even there, platform-dependent optimizations for reading/writing data with
59 * the CPU mean we'll likely hook those out to driver-specific calls. However,
60 * the DRI2 implementation wants to have at least allocate/mmap be generic.
61 *
62 * The goal was to have swap-backed object allocation managed through
63 * struct file. However, file descriptors as handles to a struct file have
64 * two major failings:
65 * - Process limits prevent more than 1024 or so being used at a time by
66 * default.
67 * - Inability to allocate high fds will aggravate the X Server's select()
68 * handling, and likely that of many GL client applications as well.
69 *
70 * This led to a plan of using our own integer IDs (called handles, following
71 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
72 * ioctls. The objects themselves will still include the struct file so
73 * that we can transition to fds if the required kernel infrastructure shows
74 * up at a later date, and as our interface with shmfs for memory allocation.
75 */
76
77/*
78 * We make up offsets for buffer objects so we can recognize them at
79 * mmap time.
80 */
81
82/* pgoff in mmap is an unsigned long, so we need to make sure that
83 * the faked up offset will fit
84 */
85
86#if BITS_PER_LONG == 64
87#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
88#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
89#else
90#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
91#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
92#endif
93
94/**
95 * drm_gem_init - Initialize the GEM device fields
96 * @dev: drm_devic structure to initialize
97 */
98int
99drm_gem_init(struct drm_device *dev)
100{
101 struct drm_vma_offset_manager *vma_offset_manager;
102
103#ifdef __NetBSD__
104 linux_mutex_init(&dev->object_name_lock);
105#else
106 mutex_init(&dev->object_name_lock);
107#endif
108 idr_init(&dev->object_name_idr);
109
110 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
111 if (!vma_offset_manager) {
112 DRM_ERROR("out of memory\n");
113 return -ENOMEM;
114 }
115
116 dev->vma_offset_manager = vma_offset_manager;
117 drm_vma_offset_manager_init(vma_offset_manager,
118 DRM_FILE_PAGE_OFFSET_START,
119 DRM_FILE_PAGE_OFFSET_SIZE);
120
121 return 0;
122}
123
124void
125drm_gem_destroy(struct drm_device *dev)
126{
127
128 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
129 kfree(dev->vma_offset_manager);
130 dev->vma_offset_manager = NULL;
131
132 idr_destroy(&dev->object_name_idr);
133#ifdef __NetBSD__
134 linux_mutex_destroy(&dev->object_name_lock);
135#endif
136}
137
138/**
139 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
140 * @dev: drm_device the object should be initialized for
141 * @obj: drm_gem_object to initialize
142 * @size: object size
143 *
144 * Initialize an already allocated GEM object of the specified size with
145 * shmfs backing store.
146 */
147int drm_gem_object_init(struct drm_device *dev,
148 struct drm_gem_object *obj, size_t size)
149{
150#ifndef __NetBSD__
151 struct file *filp;
152#endif
153
154 drm_gem_private_object_init(dev, obj, size);
155
156#ifdef __NetBSD__
157 /*
158 * A uao may not have size 0, but a gem object may. Allocate a
159 * spurious page so we needn't teach uao how to have size 0.
160 */
161 obj->gemo_shm_uao = uao_create(MAX(size, PAGE_SIZE), 0);
162 /*
163 * XXX This is gross. We ought to do it the other way around:
164 * set the uao to have the main uvm object's lock. However,
165 * uvm_obj_setlock is not safe on uvm_aobjs.
166 */
167 mutex_obj_hold(obj->gemo_shm_uao->vmobjlock);
168 uvm_obj_setlock(&obj->gemo_uvmobj, obj->gemo_shm_uao->vmobjlock);
169#else
170 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
171 if (IS_ERR(filp))
172 return PTR_ERR(filp);
173
174 obj->filp = filp;
175#endif
176
177 return 0;
178}
179EXPORT_SYMBOL(drm_gem_object_init);
180
181/**
182 * drm_gem_object_init - initialize an allocated private GEM object
183 * @dev: drm_device the object should be initialized for
184 * @obj: drm_gem_object to initialize
185 * @size: object size
186 *
187 * Initialize an already allocated GEM object of the specified size with
188 * no GEM provided backing store. Instead the caller is responsible for
189 * backing the object and handling it.
190 */
191void drm_gem_private_object_init(struct drm_device *dev,
192 struct drm_gem_object *obj, size_t size)
193{
194 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
195
196 obj->dev = dev;
197#ifdef __NetBSD__
198 obj->gemo_shm_uao = NULL;
199 KASSERT(drm_core_check_feature(dev, DRIVER_GEM));
200 KASSERT(dev->driver->gem_uvm_ops != NULL);
201 uvm_obj_init(&obj->gemo_uvmobj, dev->driver->gem_uvm_ops, true, 1);
202#else
203 obj->filp = NULL;
204#endif
205
206 kref_init(&obj->refcount);
207 obj->handle_count = 0;
208 obj->size = size;
209#ifdef __NetBSD__
210 drm_vma_node_init(&obj->vma_node);
211#else
212 drm_vma_node_reset(&obj->vma_node);
213#endif
214}
215EXPORT_SYMBOL(drm_gem_private_object_init);
216
217static void
218drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
219{
220#ifndef __NetBSD__ /* XXX drm prime */
221 /*
222 * Note: obj->dma_buf can't disappear as long as we still hold a
223 * handle reference in obj->handle_count.
224 */
225 mutex_lock(&filp->prime.lock);
226 if (obj->dma_buf) {
227 drm_prime_remove_buf_handle_locked(&filp->prime,
228 obj->dma_buf);
229 }
230 mutex_unlock(&filp->prime.lock);
231#endif
232}
233
234/**
235 * drm_gem_object_free - release resources bound to userspace handles
236 * @obj: GEM object to clean up.
237 *
238 * Called after the last handle to the object has been closed
239 *
240 * Removes any name for the object. Note that this must be
241 * called before drm_gem_object_free or we'll be touching
242 * freed memory
243 */
244static void drm_gem_object_handle_free(struct drm_gem_object *obj)
245{
246 struct drm_device *dev = obj->dev;
247
248 /* Remove any name for this object */
249 if (obj->name) {
250 idr_remove(&dev->object_name_idr, obj->name);
251 obj->name = 0;
252 }
253}
254
255static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
256{
257#ifndef __NetBSD__
258 /* Unbreak the reference cycle if we have an exported dma_buf. */
259 if (obj->dma_buf) {
260 dma_buf_put(obj->dma_buf);
261 obj->dma_buf = NULL;
262 }
263#endif
264}
265
266static void
267drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
268{
269 if (WARN_ON(obj->handle_count == 0))
270 return;
271
272 /*
273 * Must bump handle count first as this may be the last
274 * ref, in which case the object would disappear before we
275 * checked for a name
276 */
277
278 mutex_lock(&obj->dev->object_name_lock);
279 if (--obj->handle_count == 0) {
280 drm_gem_object_handle_free(obj);
281 drm_gem_object_exported_dma_buf_free(obj);
282 }
283 mutex_unlock(&obj->dev->object_name_lock);
284
285 drm_gem_object_unreference_unlocked(obj);
286}
287
288/**
289 * drm_gem_handle_delete - deletes the given file-private handle
290 * @filp: drm file-private structure to use for the handle look up
291 * @handle: userspace handle to delete
292 *
293 * Removes the GEM handle from the @filp lookup table and if this is the last
294 * handle also cleans up linked resources like GEM names.
295 */
296int
297drm_gem_handle_delete(struct drm_file *filp, u32 handle)
298{
299 struct drm_device *dev;
300 struct drm_gem_object *obj;
301
302 /* This is gross. The idr system doesn't let us try a delete and
303 * return an error code. It just spews if you fail at deleting.
304 * So, we have to grab a lock around finding the object and then
305 * doing the delete on it and dropping the refcount, or the user
306 * could race us to double-decrement the refcount and cause a
307 * use-after-free later. Given the frequency of our handle lookups,
308 * we may want to use ida for number allocation and a hash table
309 * for the pointers, anyway.
310 */
311 spin_lock(&filp->table_lock);
312
313 /* Check if we currently have a reference on the object */
314 obj = idr_find(&filp->object_idr, handle);
315 if (obj == NULL) {
316 spin_unlock(&filp->table_lock);
317 return -EINVAL;
318 }
319 dev = obj->dev;
320
321 /* Release reference and decrement refcount. */
322 idr_remove(&filp->object_idr, handle);
323 spin_unlock(&filp->table_lock);
324
325 if (drm_core_check_feature(dev, DRIVER_PRIME))
326 drm_gem_remove_prime_handles(obj, filp);
327 drm_vma_node_revoke(&obj->vma_node, filp->filp);
328
329 if (dev->driver->gem_close_object)
330 dev->driver->gem_close_object(obj, filp);
331 drm_gem_object_handle_unreference_unlocked(obj);
332
333 return 0;
334}
335EXPORT_SYMBOL(drm_gem_handle_delete);
336
337/**
338 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
339 * @file: drm file-private structure to remove the dumb handle from
340 * @dev: corresponding drm_device
341 * @handle: the dumb handle to remove
342 *
343 * This implements the ->dumb_destroy kms driver callback for drivers which use
344 * gem to manage their backing storage.
345 */
346int drm_gem_dumb_destroy(struct drm_file *file,
347 struct drm_device *dev,
348 uint32_t handle)
349{
350 return drm_gem_handle_delete(file, handle);
351}
352EXPORT_SYMBOL(drm_gem_dumb_destroy);
353
354/**
355 * drm_gem_handle_create_tail - internal functions to create a handle
356 * @file_priv: drm file-private structure to register the handle for
357 * @obj: object to register
358 * @handlep: pionter to return the created handle to the caller
359 *
360 * This expects the dev->object_name_lock to be held already and will drop it
361 * before returning. Used to avoid races in establishing new handles when
362 * importing an object from either an flink name or a dma-buf.
363 */
364int
365drm_gem_handle_create_tail(struct drm_file *file_priv,
366 struct drm_gem_object *obj,
367 u32 *handlep)
368{
369 struct drm_device *dev = obj->dev;
370 int ret;
371
372 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
373
374 /*
375 * Get the user-visible handle using idr. Preload and perform
376 * allocation under our spinlock.
377 */
378 idr_preload(GFP_KERNEL);
379 spin_lock(&file_priv->table_lock);
380
381 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
382 drm_gem_object_reference(obj);
383 obj->handle_count++;
384 spin_unlock(&file_priv->table_lock);
385 idr_preload_end();
386 mutex_unlock(&dev->object_name_lock);
387 if (ret < 0) {
388 drm_gem_object_handle_unreference_unlocked(obj);
389 return ret;
390 }
391 *handlep = ret;
392
393 ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
394 if (ret) {
395 drm_gem_handle_delete(file_priv, *handlep);
396 return ret;
397 }
398
399 if (dev->driver->gem_open_object) {
400 ret = dev->driver->gem_open_object(obj, file_priv);
401 if (ret) {
402 drm_gem_handle_delete(file_priv, *handlep);
403 return ret;
404 }
405 }
406
407 return 0;
408}
409
410/**
411 * gem_handle_create - create a gem handle for an object
412 * @file_priv: drm file-private structure to register the handle for
413 * @obj: object to register
414 * @handlep: pionter to return the created handle to the caller
415 *
416 * Create a handle for this object. This adds a handle reference
417 * to the object, which includes a regular reference count. Callers
418 * will likely want to dereference the object afterwards.
419 */
420int
421drm_gem_handle_create(struct drm_file *file_priv,
422 struct drm_gem_object *obj,
423 u32 *handlep)
424{
425 mutex_lock(&obj->dev->object_name_lock);
426
427 return drm_gem_handle_create_tail(file_priv, obj, handlep);
428}
429EXPORT_SYMBOL(drm_gem_handle_create);
430
431
432/**
433 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
434 * @obj: obj in question
435 *
436 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
437 */
438void
439drm_gem_free_mmap_offset(struct drm_gem_object *obj)
440{
441 struct drm_device *dev = obj->dev;
442
443 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
444}
445EXPORT_SYMBOL(drm_gem_free_mmap_offset);
446
447/**
448 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
449 * @obj: obj in question
450 * @size: the virtual size
451 *
452 * GEM memory mapping works by handing back to userspace a fake mmap offset
453 * it can use in a subsequent mmap(2) call. The DRM core code then looks
454 * up the object based on the offset and sets up the various memory mapping
455 * structures.
456 *
457 * This routine allocates and attaches a fake offset for @obj, in cases where
458 * the virtual size differs from the physical size (ie. obj->size). Otherwise
459 * just use drm_gem_create_mmap_offset().
460 */
461int
462drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
463{
464 struct drm_device *dev = obj->dev;
465
466 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
467 size / PAGE_SIZE);
468}
469EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
470
471/**
472 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
473 * @obj: obj in question
474 *
475 * GEM memory mapping works by handing back to userspace a fake mmap offset
476 * it can use in a subsequent mmap(2) call. The DRM core code then looks
477 * up the object based on the offset and sets up the various memory mapping
478 * structures.
479 *
480 * This routine allocates and attaches a fake offset for @obj.
481 */
482int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
483{
484 return drm_gem_create_mmap_offset_size(obj, obj->size);
485}
486EXPORT_SYMBOL(drm_gem_create_mmap_offset);
487
488/**
489 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
490 * from shmem
491 * @obj: obj in question
492 * @gfpmask: gfp mask of requested pages
493 */
494#ifdef __NetBSD__
495struct page **
496drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
497{
498 struct pglist pglist;
499 struct vm_page *vm_page;
500 struct page **pages;
501 unsigned i;
502 int ret;
503
504 KASSERT((obj->size & (PAGE_SIZE - 1)) != 0);
505
506 pages = drm_malloc_ab(obj->size >> PAGE_SHIFT, sizeof(*pages));
507 if (pages == NULL) {
508 ret = -ENOMEM;
509 goto fail0;
510 }
511
512 TAILQ_INIT(&pglist);
513 /* XXX errno NetBSD->Linux */
514 ret = -uvm_obj_wirepages(obj->gemo_shm_uao, 0, obj->size, &pglist);
515 if (ret)
516 goto fail1;
517
518 i = 0;
519 TAILQ_FOREACH(vm_page, &pglist, pageq.queue)
520 pages[i++] = container_of(vm_page, struct page, p_vmp);
521
522 return pages;
523
524fail1: drm_free_large(pages);
525fail0: return ERR_PTR(ret);
526}
527#else
528struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
529{
530 struct inode *inode;
531 struct address_space *mapping;
532 struct page *p, **pages;
533 int i, npages;
534
535 /* This is the shared memory object that backs the GEM resource */
536 inode = file_inode(obj->filp);
537 mapping = inode->i_mapping;
538
539 /* We already BUG_ON() for non-page-aligned sizes in
540 * drm_gem_object_init(), so we should never hit this unless
541 * driver author is doing something really wrong:
542 */
543 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
544
545 npages = obj->size >> PAGE_SHIFT;
546
547 pages = drm_malloc_ab(npages, sizeof(struct page *));
548 if (pages == NULL)
549 return ERR_PTR(-ENOMEM);
550
551 gfpmask |= mapping_gfp_mask(mapping);
552
553 for (i = 0; i < npages; i++) {
554 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
555 if (IS_ERR(p))
556 goto fail;
557 pages[i] = p;
558
559 /* There is a hypothetical issue w/ drivers that require
560 * buffer memory in the low 4GB.. if the pages are un-
561 * pinned, and swapped out, they can end up swapped back
562 * in above 4GB. If pages are already in memory, then
563 * shmem_read_mapping_page_gfp will ignore the gfpmask,
564 * even if the already in-memory page disobeys the mask.
565 *
566 * It is only a theoretical issue today, because none of
567 * the devices with this limitation can be populated with
568 * enough memory to trigger the issue. But this BUG_ON()
569 * is here as a reminder in case the problem with
570 * shmem_read_mapping_page_gfp() isn't solved by the time
571 * it does become a real issue.
572 *
573 * See this thread: http://lkml.org/lkml/2011/7/11/238
574 */
575 BUG_ON((gfpmask & __GFP_DMA32) &&
576 (page_to_pfn(p) >= 0x00100000UL));
577 }
578
579 return pages;
580
581fail:
582 while (i--)
583 page_cache_release(pages[i]);
584
585 drm_free_large(pages);
586 return ERR_CAST(p);
587}
588#endif
589EXPORT_SYMBOL(drm_gem_get_pages);
590
591/**
592 * drm_gem_put_pages - helper to free backing pages for a GEM object
593 * @obj: obj in question
594 * @pages: pages to free
595 * @dirty: if true, pages will be marked as dirty
596 * @accessed: if true, the pages will be marked as accessed
597 */
598#ifdef __NetBSD__
599void
600drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty,
601 bool accessed __unused /* XXX */)
602{
603 unsigned i;
604
605 for (i = 0; i < (obj->size >> PAGE_SHIFT); i++) {
606 if (dirty)
607 pages[i]->p_vmp.flags &= ~PG_CLEAN;
608 }
609
610 uvm_obj_unwirepages(obj->gemo_shm_uao, 0, obj->size);
611}
612#else
613void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
614 bool dirty, bool accessed)
615{
616 int i, npages;
617
618 /* We already BUG_ON() for non-page-aligned sizes in
619 * drm_gem_object_init(), so we should never hit this unless
620 * driver author is doing something really wrong:
621 */
622 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
623
624 npages = obj->size >> PAGE_SHIFT;
625
626 for (i = 0; i < npages; i++) {
627 if (dirty)
628 set_page_dirty(pages[i]);
629
630 if (accessed)
631 mark_page_accessed(pages[i]);
632
633 /* Undo the reference we took when populating the table */
634 page_cache_release(pages[i]);
635 }
636
637 drm_free_large(pages);
638}
639#endif
640EXPORT_SYMBOL(drm_gem_put_pages);
641
642/** Returns a reference to the object named by the handle. */
643struct drm_gem_object *
644drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
645 u32 handle)
646{
647 struct drm_gem_object *obj;
648
649 spin_lock(&filp->table_lock);
650
651 /* Check if we currently have a reference on the object */
652 obj = idr_find(&filp->object_idr, handle);
653 if (obj == NULL) {
654 spin_unlock(&filp->table_lock);
655 return NULL;
656 }
657
658 drm_gem_object_reference(obj);
659
660 spin_unlock(&filp->table_lock);
661
662 return obj;
663}
664EXPORT_SYMBOL(drm_gem_object_lookup);
665
666/**
667 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
668 * @dev: drm_device
669 * @data: ioctl data
670 * @file_priv: drm file-private structure
671 *
672 * Releases the handle to an mm object.
673 */
674int
675drm_gem_close_ioctl(struct drm_device *dev, void *data,
676 struct drm_file *file_priv)
677{
678 struct drm_gem_close *args = data;
679 int ret;
680
681 if (!(dev->driver->driver_features & DRIVER_GEM))
682 return -ENODEV;
683
684 ret = drm_gem_handle_delete(file_priv, args->handle);
685
686 return ret;
687}
688
689/**
690 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
691 * @dev: drm_device
692 * @data: ioctl data
693 * @file_priv: drm file-private structure
694 *
695 * Create a global name for an object, returning the name.
696 *
697 * Note that the name does not hold a reference; when the object
698 * is freed, the name goes away.
699 */
700int
701drm_gem_flink_ioctl(struct drm_device *dev, void *data,
702 struct drm_file *file_priv)
703{
704 struct drm_gem_flink *args = data;
705 struct drm_gem_object *obj;
706 int ret;
707
708 if (!(dev->driver->driver_features & DRIVER_GEM))
709 return -ENODEV;
710
711 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
712 if (obj == NULL)
713 return -ENOENT;
714
715 mutex_lock(&dev->object_name_lock);
716 idr_preload(GFP_KERNEL);
717 /* prevent races with concurrent gem_close. */
718 if (obj->handle_count == 0) {
719 ret = -ENOENT;
720 goto err;
721 }
722
723 if (!obj->name) {
724 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
725 if (ret < 0)
726 goto err;
727
728 obj->name = ret;
729 }
730
731 args->name = (uint64_t) obj->name;
732 ret = 0;
733
734err:
735 idr_preload_end();
736 mutex_unlock(&dev->object_name_lock);
737 drm_gem_object_unreference_unlocked(obj);
738 return ret;
739}
740
741/**
742 * drm_gem_open - implementation of the GEM_OPEN ioctl
743 * @dev: drm_device
744 * @data: ioctl data
745 * @file_priv: drm file-private structure
746 *
747 * Open an object using the global name, returning a handle and the size.
748 *
749 * This handle (of course) holds a reference to the object, so the object
750 * will not go away until the handle is deleted.
751 */
752int
753drm_gem_open_ioctl(struct drm_device *dev, void *data,
754 struct drm_file *file_priv)
755{
756 struct drm_gem_open *args = data;
757 struct drm_gem_object *obj;
758 int ret;
759 u32 handle;
760
761 if (!(dev->driver->driver_features & DRIVER_GEM))
762 return -ENODEV;
763
764 mutex_lock(&dev->object_name_lock);
765 obj = idr_find(&dev->object_name_idr, (int) args->name);
766 if (obj) {
767 drm_gem_object_reference(obj);
768 } else {
769 mutex_unlock(&dev->object_name_lock);
770 return -ENOENT;
771 }
772
773 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
774 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
775 drm_gem_object_unreference_unlocked(obj);
776 if (ret)
777 return ret;
778
779 args->handle = handle;
780 args->size = obj->size;
781
782 return 0;
783}
784
785/**
786 * gem_gem_open - initalizes GEM file-private structures at devnode open time
787 * @dev: drm_device which is being opened by userspace
788 * @file_private: drm file-private structure to set up
789 *
790 * Called at device open time, sets up the structure for handling refcounting
791 * of mm objects.
792 */
793void
794drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
795{
796 idr_init(&file_private->object_idr);
797 spin_lock_init(&file_private->table_lock);
798}
799
800/*
801 * Called at device close to release the file's
802 * handle references on objects.
803 */
804static int
805drm_gem_object_release_handle(int id, void *ptr, void *data)
806{
807 struct drm_file *file_priv = data;
808 struct drm_gem_object *obj = ptr;
809 struct drm_device *dev = obj->dev;
810
811#ifndef __NetBSD__ /* XXX drm prime */
812 if (drm_core_check_feature(dev, DRIVER_PRIME))
813 drm_gem_remove_prime_handles(obj, file_priv);
814#endif
815 drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
816
817 if (dev->driver->gem_close_object)
818 dev->driver->gem_close_object(obj, file_priv);
819
820 drm_gem_object_handle_unreference_unlocked(obj);
821
822 return 0;
823}
824
825/**
826 * drm_gem_release - release file-private GEM resources
827 * @dev: drm_device which is being closed by userspace
828 * @file_private: drm file-private structure to clean up
829 *
830 * Called at close time when the filp is going away.
831 *
832 * Releases any remaining references on objects by this filp.
833 */
834void
835drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
836{
837 idr_for_each(&file_private->object_idr,
838 &drm_gem_object_release_handle, file_private);
839 idr_destroy(&file_private->object_idr);
840#ifdef __NetBSD__
841 spin_lock_destroy(&file_private->table_lock);
842#endif
843}
844
845void
846drm_gem_object_release(struct drm_gem_object *obj)
847{
848#ifndef __NetBSD__
849 WARN_ON(obj->dma_buf);
850#endif
851
852#ifdef __NetBSD__
853 drm_vma_node_destroy(&obj->vma_node);
854 if (obj->gemo_shm_uao)
855 uao_detach(obj->gemo_shm_uao);
856 uvm_obj_destroy(&obj->gemo_uvmobj, true);
857#else
858 if (obj->filp)
859 fput(obj->filp);
860#endif
861
862 drm_gem_free_mmap_offset(obj);
863}
864EXPORT_SYMBOL(drm_gem_object_release);
865
866/**
867 * drm_gem_object_free - free a GEM object
868 * @kref: kref of the object to free
869 *
870 * Called after the last reference to the object has been lost.
871 * Must be called holding struct_ mutex
872 *
873 * Frees the object
874 */
875void
876drm_gem_object_free(struct kref *kref)
877{
878 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
879 struct drm_device *dev = obj->dev;
880
881 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
882
883 if (dev->driver->gem_free_object != NULL)
884 dev->driver->gem_free_object(obj);
885}
886EXPORT_SYMBOL(drm_gem_object_free);
887
888#ifndef __NetBSD__
889
890void drm_gem_vm_open(struct vm_area_struct *vma)
891{
892 struct drm_gem_object *obj = vma->vm_private_data;
893
894 drm_gem_object_reference(obj);
895
896 mutex_lock(&obj->dev->struct_mutex);
897 drm_vm_open_locked(obj->dev, vma);
898 mutex_unlock(&obj->dev->struct_mutex);
899}
900EXPORT_SYMBOL(drm_gem_vm_open);
901
902void drm_gem_vm_close(struct vm_area_struct *vma)
903{
904 struct drm_gem_object *obj = vma->vm_private_data;
905 struct drm_device *dev = obj->dev;
906
907 mutex_lock(&dev->struct_mutex);
908 drm_vm_close_locked(obj->dev, vma);
909 drm_gem_object_unreference(obj);
910 mutex_unlock(&dev->struct_mutex);
911}
912EXPORT_SYMBOL(drm_gem_vm_close);
913
914/**
915 * drm_gem_mmap_obj - memory map a GEM object
916 * @obj: the GEM object to map
917 * @obj_size: the object size to be mapped, in bytes
918 * @vma: VMA for the area to be mapped
919 *
920 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
921 * provided by the driver. Depending on their requirements, drivers can either
922 * provide a fault handler in their gem_vm_ops (in which case any accesses to
923 * the object will be trapped, to perform migration, GTT binding, surface
924 * register allocation, or performance monitoring), or mmap the buffer memory
925 * synchronously after calling drm_gem_mmap_obj.
926 *
927 * This function is mainly intended to implement the DMABUF mmap operation, when
928 * the GEM object is not looked up based on its fake offset. To implement the
929 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
930 *
931 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
932 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
933 * callers must verify access restrictions before calling this helper.
934 *
935 * NOTE: This function has to be protected with dev->struct_mutex
936 *
937 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
938 * size, or if no gem_vm_ops are provided.
939 */
940int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
941 struct vm_area_struct *vma)
942{
943 struct drm_device *dev = obj->dev;
944
945 lockdep_assert_held(&dev->struct_mutex);
946
947 /* Check for valid size. */
948 if (obj_size < vma->vm_end - vma->vm_start)
949 return -EINVAL;
950
951 if (!dev->driver->gem_vm_ops)
952 return -EINVAL;
953
954 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
955 vma->vm_ops = dev->driver->gem_vm_ops;
956 vma->vm_private_data = obj;
957 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
958
959 /* Take a ref for this mapping of the object, so that the fault
960 * handler can dereference the mmap offset's pointer to the object.
961 * This reference is cleaned up by the corresponding vm_close
962 * (which should happen whether the vma was created by this call, or
963 * by a vm_open due to mremap or partial unmap or whatever).
964 */
965 drm_gem_object_reference(obj);
966
967 drm_vm_open_locked(dev, vma);
968 return 0;
969}
970EXPORT_SYMBOL(drm_gem_mmap_obj);
971
972/**
973 * drm_gem_mmap - memory map routine for GEM objects
974 * @filp: DRM file pointer
975 * @vma: VMA for the area to be mapped
976 *
977 * If a driver supports GEM object mapping, mmap calls on the DRM file
978 * descriptor will end up here.
979 *
980 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
981 * contain the fake offset we created when the GTT map ioctl was called on
982 * the object) and map it with a call to drm_gem_mmap_obj().
983 *
984 * If the caller is not granted access to the buffer object, the mmap will fail
985 * with EACCES. Please see the vma manager for more information.
986 */
987int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
988{
989 struct drm_file *priv = filp->private_data;
990 struct drm_device *dev = priv->minor->dev;
991 struct drm_gem_object *obj;
992 struct drm_vma_offset_node *node;
993 int ret;
994
995 if (drm_device_is_unplugged(dev))
996 return -ENODEV;
997
998 mutex_lock(&dev->struct_mutex);
999
1000 node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
1001 vma->vm_pgoff,
1002 vma_pages(vma));
1003 if (!node) {
1004 mutex_unlock(&dev->struct_mutex);
1005 return drm_mmap(filp, vma);
1006 } else if (!drm_vma_node_is_allowed(node, filp)) {
1007 mutex_unlock(&dev->struct_mutex);
1008 return -EACCES;
1009 }
1010
1011 obj = container_of(node, struct drm_gem_object, vma_node);
1012 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
1013
1014 mutex_unlock(&dev->struct_mutex);
1015
1016 return ret;
1017}
1018EXPORT_SYMBOL(drm_gem_mmap);
1019
1020#endif /* defined(__NetBSD__) */
1021