1 | /************************************************************************** |
2 | * |
3 | * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
26 | **************************************************************************/ |
27 | /* |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ |
30 | #ifndef _TTM_BO_DRIVER_H_ |
31 | #define _TTM_BO_DRIVER_H_ |
32 | |
33 | #include <ttm/ttm_bo_api.h> |
34 | #include <ttm/ttm_memory.h> |
35 | #include <ttm/ttm_module.h> |
36 | #include <ttm/ttm_placement.h> |
37 | #include <drm/drm_agpsupport.h> |
38 | #include <drm/drm_mm.h> |
39 | #include <drm/drm_global.h> |
40 | #include <drm/drm_vma_manager.h> |
41 | #include <linux/workqueue.h> |
42 | #include <linux/fs.h> |
43 | #include <linux/spinlock.h> |
44 | #include <linux/reservation.h> |
45 | #include <asm/page.h> |
46 | |
47 | struct ttm_backend_func { |
48 | /** |
49 | * struct ttm_backend_func member bind |
50 | * |
51 | * @ttm: Pointer to a struct ttm_tt. |
52 | * @bo_mem: Pointer to a struct ttm_mem_reg describing the |
53 | * memory type and location for binding. |
54 | * |
55 | * Bind the backend pages into the aperture in the location |
56 | * indicated by @bo_mem. This function should be able to handle |
57 | * differences between aperture and system page sizes. |
58 | */ |
59 | int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); |
60 | |
61 | /** |
62 | * struct ttm_backend_func member unbind |
63 | * |
64 | * @ttm: Pointer to a struct ttm_tt. |
65 | * |
66 | * Unbind previously bound backend pages. This function should be |
67 | * able to handle differences between aperture and system page sizes. |
68 | */ |
69 | int (*unbind) (struct ttm_tt *ttm); |
70 | |
71 | /** |
72 | * struct ttm_backend_func member destroy |
73 | * |
74 | * @ttm: Pointer to a struct ttm_tt. |
75 | * |
76 | * Destroy the backend. This will be call back from ttm_tt_destroy so |
77 | * don't call ttm_tt_destroy from the callback or infinite loop. |
78 | */ |
79 | void (*destroy) (struct ttm_tt *ttm); |
80 | }; |
81 | |
82 | #define TTM_PAGE_FLAG_WRITE (1 << 3) |
83 | #define TTM_PAGE_FLAG_SWAPPED (1 << 4) |
84 | #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5) |
85 | #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) |
86 | #define TTM_PAGE_FLAG_DMA32 (1 << 7) |
87 | #define TTM_PAGE_FLAG_SG (1 << 8) |
88 | |
89 | enum ttm_caching_state { |
90 | tt_uncached, |
91 | tt_wc, |
92 | tt_cached |
93 | }; |
94 | |
95 | /** |
96 | * struct ttm_tt |
97 | * |
98 | * @bdev: Pointer to a struct ttm_bo_device. |
99 | * @func: Pointer to a struct ttm_backend_func that describes |
100 | * the backend methods. |
101 | * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL |
102 | * pointer. |
103 | * @pages: Array of pages backing the data. |
104 | * @num_pages: Number of pages in the page array. |
105 | * @bdev: Pointer to the current struct ttm_bo_device. |
106 | * @be: Pointer to the ttm backend. |
107 | * @swap_storage: Pointer to shmem struct file for swap storage. |
108 | * @caching_state: The current caching state of the pages. |
109 | * @state: The current binding state of the pages. |
110 | * |
111 | * This is a structure holding the pages, caching- and aperture binding |
112 | * status for a buffer object that isn't backed by fixed (VRAM / AGP) |
113 | * memory. |
114 | */ |
115 | |
116 | struct ttm_tt { |
117 | struct ttm_bo_device *bdev; |
118 | const struct ttm_backend_func *func; |
119 | struct page *dummy_read_page; |
120 | struct page **pages; |
121 | uint32_t page_flags; |
122 | unsigned long num_pages; |
123 | struct sg_table *sg; /* for SG objects via dma-buf */ |
124 | struct ttm_bo_global *glob; |
125 | #ifdef __NetBSD__ |
126 | struct uvm_object *swap_storage; |
127 | struct pglist pglist; |
128 | #else |
129 | struct file *swap_storage; |
130 | #endif |
131 | enum ttm_caching_state caching_state; |
132 | enum { |
133 | tt_bound, |
134 | tt_unbound, |
135 | tt_unpopulated, |
136 | } state; |
137 | }; |
138 | |
139 | /** |
140 | * struct ttm_dma_tt |
141 | * |
142 | * @ttm: Base ttm_tt struct. |
143 | * @dma_address: The DMA (bus) addresses of the pages |
144 | * @pages_list: used by some page allocation backend |
145 | * |
146 | * This is a structure holding the pages, caching- and aperture binding |
147 | * status for a buffer object that isn't backed by fixed (VRAM / AGP) |
148 | * memory. |
149 | */ |
150 | struct ttm_dma_tt { |
151 | struct ttm_tt ttm; |
152 | #ifdef __NetBSD__ |
153 | bus_dma_segment_t *dma_segs; |
154 | bus_dmamap_t dma_address; |
155 | #else |
156 | dma_addr_t *dma_address; |
157 | #endif |
158 | struct list_head pages_list; |
159 | }; |
160 | |
161 | #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ |
162 | #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ |
163 | #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ |
164 | |
165 | struct ttm_mem_type_manager; |
166 | |
167 | struct ttm_mem_type_manager_func { |
168 | /** |
169 | * struct ttm_mem_type_manager member init |
170 | * |
171 | * @man: Pointer to a memory type manager. |
172 | * @p_size: Implementation dependent, but typically the size of the |
173 | * range to be managed in pages. |
174 | * |
175 | * Called to initialize a private range manager. The function is |
176 | * expected to initialize the man::priv member. |
177 | * Returns 0 on success, negative error code on failure. |
178 | */ |
179 | int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); |
180 | |
181 | /** |
182 | * struct ttm_mem_type_manager member takedown |
183 | * |
184 | * @man: Pointer to a memory type manager. |
185 | * |
186 | * Called to undo the setup done in init. All allocated resources |
187 | * should be freed. |
188 | */ |
189 | int (*takedown)(struct ttm_mem_type_manager *man); |
190 | |
191 | /** |
192 | * struct ttm_mem_type_manager member get_node |
193 | * |
194 | * @man: Pointer to a memory type manager. |
195 | * @bo: Pointer to the buffer object we're allocating space for. |
196 | * @placement: Placement details. |
197 | * @mem: Pointer to a struct ttm_mem_reg to be filled in. |
198 | * |
199 | * This function should allocate space in the memory type managed |
200 | * by @man. Placement details if |
201 | * applicable are given by @placement. If successful, |
202 | * @mem::mm_node should be set to a non-null value, and |
203 | * @mem::start should be set to a value identifying the beginning |
204 | * of the range allocated, and the function should return zero. |
205 | * If the memory region accommodate the buffer object, @mem::mm_node |
206 | * should be set to NULL, and the function should return 0. |
207 | * If a system error occurred, preventing the request to be fulfilled, |
208 | * the function should return a negative error code. |
209 | * |
210 | * Note that @mem::mm_node will only be dereferenced by |
211 | * struct ttm_mem_type_manager functions and optionally by the driver, |
212 | * which has knowledge of the underlying type. |
213 | * |
214 | * This function may not be called from within atomic context, so |
215 | * an implementation can and must use either a mutex or a spinlock to |
216 | * protect any data structures managing the space. |
217 | */ |
218 | int (*get_node)(struct ttm_mem_type_manager *man, |
219 | struct ttm_buffer_object *bo, |
220 | struct ttm_placement *placement, |
221 | struct ttm_mem_reg *mem); |
222 | |
223 | /** |
224 | * struct ttm_mem_type_manager member put_node |
225 | * |
226 | * @man: Pointer to a memory type manager. |
227 | * @mem: Pointer to a struct ttm_mem_reg to be filled in. |
228 | * |
229 | * This function frees memory type resources previously allocated |
230 | * and that are identified by @mem::mm_node and @mem::start. May not |
231 | * be called from within atomic context. |
232 | */ |
233 | void (*put_node)(struct ttm_mem_type_manager *man, |
234 | struct ttm_mem_reg *mem); |
235 | |
236 | /** |
237 | * struct ttm_mem_type_manager member debug |
238 | * |
239 | * @man: Pointer to a memory type manager. |
240 | * @prefix: Prefix to be used in printout to identify the caller. |
241 | * |
242 | * This function is called to print out the state of the memory |
243 | * type manager to aid debugging of out-of-memory conditions. |
244 | * It may not be called from within atomic context. |
245 | */ |
246 | void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); |
247 | }; |
248 | |
249 | /** |
250 | * struct ttm_mem_type_manager |
251 | * |
252 | * @has_type: The memory type has been initialized. |
253 | * @use_type: The memory type is enabled. |
254 | * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory |
255 | * managed by this memory type. |
256 | * @gpu_offset: If used, the GPU offset of the first managed page of |
257 | * fixed memory or the first managed location in an aperture. |
258 | * @size: Size of the managed region. |
259 | * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, |
260 | * as defined in ttm_placement_common.h |
261 | * @default_caching: The default caching policy used for a buffer object |
262 | * placed in this memory type if the user doesn't provide one. |
263 | * @func: structure pointer implementing the range manager. See above |
264 | * @priv: Driver private closure for @func. |
265 | * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures |
266 | * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions |
267 | * reserved by the TTM vm system. |
268 | * @io_reserve_lru: Optional lru list for unreserving io mem regions. |
269 | * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain |
270 | * static information. bdev::driver::io_mem_free is never used. |
271 | * @lru: The lru list for this memory type. |
272 | * |
273 | * This structure is used to identify and manage memory types for a device. |
274 | * It's set up by the ttm_bo_driver::init_mem_type method. |
275 | */ |
276 | |
277 | |
278 | |
279 | struct ttm_mem_type_manager { |
280 | struct ttm_bo_device *bdev; |
281 | |
282 | /* |
283 | * No protection. Constant from start. |
284 | */ |
285 | |
286 | bool has_type; |
287 | bool use_type; |
288 | uint32_t flags; |
289 | unsigned long gpu_offset; |
290 | uint64_t size; |
291 | uint32_t available_caching; |
292 | uint32_t default_caching; |
293 | const struct ttm_mem_type_manager_func *func; |
294 | void *priv; |
295 | struct mutex io_reserve_mutex; |
296 | bool use_io_reserve_lru; |
297 | bool io_reserve_fastpath; |
298 | |
299 | /* |
300 | * Protected by @io_reserve_mutex: |
301 | */ |
302 | |
303 | struct list_head io_reserve_lru; |
304 | |
305 | /* |
306 | * Protected by the global->lru_lock. |
307 | */ |
308 | |
309 | struct list_head lru; |
310 | }; |
311 | |
312 | /** |
313 | * struct ttm_bo_driver |
314 | * |
315 | * @create_ttm_backend_entry: Callback to create a struct ttm_backend. |
316 | * @invalidate_caches: Callback to invalidate read caches when a buffer object |
317 | * has been evicted. |
318 | * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager |
319 | * structure. |
320 | * @evict_flags: Callback to obtain placement flags when a buffer is evicted. |
321 | * @move: Callback for a driver to hook in accelerated functions to |
322 | * move a buffer. |
323 | * If set to NULL, a potentially slow memcpy() move is used. |
324 | * @sync_obj_signaled: See ttm_fence_api.h |
325 | * @sync_obj_wait: See ttm_fence_api.h |
326 | * @sync_obj_flush: See ttm_fence_api.h |
327 | * @sync_obj_unref: See ttm_fence_api.h |
328 | * @sync_obj_ref: See ttm_fence_api.h |
329 | */ |
330 | |
331 | struct ttm_bo_driver { |
332 | /** |
333 | * ttm_tt_create |
334 | * |
335 | * @bdev: pointer to a struct ttm_bo_device: |
336 | * @size: Size of the data needed backing. |
337 | * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. |
338 | * @dummy_read_page: See struct ttm_bo_device. |
339 | * |
340 | * Create a struct ttm_tt to back data with system memory pages. |
341 | * No pages are actually allocated. |
342 | * Returns: |
343 | * NULL: Out of memory. |
344 | */ |
345 | struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev, |
346 | unsigned long size, |
347 | uint32_t page_flags, |
348 | struct page *dummy_read_page); |
349 | |
350 | /** |
351 | * ttm_tt_populate |
352 | * |
353 | * @ttm: The struct ttm_tt to contain the backing pages. |
354 | * |
355 | * Allocate all backing pages |
356 | * Returns: |
357 | * -ENOMEM: Out of memory. |
358 | */ |
359 | int (*ttm_tt_populate)(struct ttm_tt *ttm); |
360 | |
361 | /** |
362 | * ttm_tt_unpopulate |
363 | * |
364 | * @ttm: The struct ttm_tt to contain the backing pages. |
365 | * |
366 | * Free all backing page |
367 | */ |
368 | void (*ttm_tt_unpopulate)(struct ttm_tt *ttm); |
369 | |
370 | /** |
371 | * ttm_tt_swapout |
372 | * |
373 | * @ttm: The struct ttm_tt to contain the backing pages. |
374 | * |
375 | * Deactivate all backing pages, but don't free them |
376 | */ |
377 | void (*ttm_tt_swapout)(struct ttm_tt *ttm); |
378 | |
379 | /** |
380 | * struct ttm_bo_driver member invalidate_caches |
381 | * |
382 | * @bdev: the buffer object device. |
383 | * @flags: new placement of the rebound buffer object. |
384 | * |
385 | * A previosly evicted buffer has been rebound in a |
386 | * potentially new location. Tell the driver that it might |
387 | * consider invalidating read (texture) caches on the next command |
388 | * submission as a consequence. |
389 | */ |
390 | |
391 | int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags); |
392 | int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type, |
393 | struct ttm_mem_type_manager *man); |
394 | /** |
395 | * struct ttm_bo_driver member evict_flags: |
396 | * |
397 | * @bo: the buffer object to be evicted |
398 | * |
399 | * Return the bo flags for a buffer which is not mapped to the hardware. |
400 | * These will be placed in proposed_flags so that when the move is |
401 | * finished, they'll end up in bo->mem.flags |
402 | */ |
403 | |
404 | void(*evict_flags) (struct ttm_buffer_object *bo, |
405 | struct ttm_placement *placement); |
406 | /** |
407 | * struct ttm_bo_driver member move: |
408 | * |
409 | * @bo: the buffer to move |
410 | * @evict: whether this motion is evicting the buffer from |
411 | * the graphics address space |
412 | * @interruptible: Use interruptible sleeps if possible when sleeping. |
413 | * @no_wait: whether this should give up and return -EBUSY |
414 | * if this move would require sleeping |
415 | * @new_mem: the new memory region receiving the buffer |
416 | * |
417 | * Move a buffer between two memory regions. |
418 | */ |
419 | int (*move) (struct ttm_buffer_object *bo, |
420 | bool evict, bool interruptible, |
421 | bool no_wait_gpu, |
422 | struct ttm_mem_reg *new_mem); |
423 | |
424 | /** |
425 | * struct ttm_bo_driver_member verify_access |
426 | * |
427 | * @bo: Pointer to a buffer object. |
428 | * @filp: Pointer to a struct file trying to access the object. |
429 | * |
430 | * Called from the map / write / read methods to verify that the |
431 | * caller is permitted to access the buffer object. |
432 | * This member may be set to NULL, which will refuse this kind of |
433 | * access for all buffer objects. |
434 | * This function should return 0 if access is granted, -EPERM otherwise. |
435 | */ |
436 | int (*verify_access) (struct ttm_buffer_object *bo, |
437 | struct file *filp); |
438 | |
439 | /** |
440 | * In case a driver writer dislikes the TTM fence objects, |
441 | * the driver writer can replace those with sync objects of |
442 | * his / her own. If it turns out that no driver writer is |
443 | * using these. I suggest we remove these hooks and plug in |
444 | * fences directly. The bo driver needs the following functionality: |
445 | * See the corresponding functions in the fence object API |
446 | * documentation. |
447 | */ |
448 | |
449 | bool (*sync_obj_signaled) (void *sync_obj); |
450 | int (*sync_obj_wait) (void *sync_obj, |
451 | bool lazy, bool interruptible); |
452 | int (*sync_obj_flush) (void *sync_obj); |
453 | void (*sync_obj_unref) (void **sync_obj); |
454 | void *(*sync_obj_ref) (void *sync_obj); |
455 | |
456 | /* hook to notify driver about a driver move so it |
457 | * can do tiling things */ |
458 | void (*move_notify)(struct ttm_buffer_object *bo, |
459 | struct ttm_mem_reg *new_mem); |
460 | /* notify the driver we are taking a fault on this BO |
461 | * and have reserved it */ |
462 | int (*fault_reserve_notify)(struct ttm_buffer_object *bo); |
463 | |
464 | /** |
465 | * notify the driver that we're about to swap out this bo |
466 | */ |
467 | void (*swap_notify) (struct ttm_buffer_object *bo); |
468 | |
469 | /** |
470 | * Driver callback on when mapping io memory (for bo_move_memcpy |
471 | * for instance). TTM will take care to call io_mem_free whenever |
472 | * the mapping is not use anymore. io_mem_reserve & io_mem_free |
473 | * are balanced. |
474 | */ |
475 | int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); |
476 | void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); |
477 | |
478 | #ifdef __NetBSD__ |
479 | const struct uvm_pagerops *ttm_uvm_ops; |
480 | #endif |
481 | }; |
482 | |
483 | /** |
484 | * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global. |
485 | */ |
486 | |
487 | struct ttm_bo_global_ref { |
488 | struct drm_global_reference ref; |
489 | struct ttm_mem_global *mem_glob; |
490 | }; |
491 | |
492 | /** |
493 | * struct ttm_bo_global - Buffer object driver global data. |
494 | * |
495 | * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. |
496 | * @dummy_read_page: Pointer to a dummy page used for mapping requests |
497 | * of unpopulated pages. |
498 | * @shrink: A shrink callback object used for buffer object swap. |
499 | * @device_list_mutex: Mutex protecting the device list. |
500 | * This mutex is held while traversing the device list for pm options. |
501 | * @lru_lock: Spinlock protecting the bo subsystem lru lists. |
502 | * @device_list: List of buffer object devices. |
503 | * @swap_lru: Lru list of buffer objects used for swapping. |
504 | */ |
505 | |
506 | struct ttm_bo_global { |
507 | |
508 | /** |
509 | * Constant after init. |
510 | */ |
511 | |
512 | #ifndef __NetBSD__ |
513 | struct kobject kobj; |
514 | #endif |
515 | struct ttm_mem_global *mem_glob; |
516 | struct page *dummy_read_page; |
517 | struct ttm_mem_shrink shrink; |
518 | struct mutex device_list_mutex; |
519 | spinlock_t lru_lock; |
520 | |
521 | /** |
522 | * Protected by device_list_mutex. |
523 | */ |
524 | struct list_head device_list; |
525 | |
526 | /** |
527 | * Protected by the lru_lock. |
528 | */ |
529 | struct list_head swap_lru; |
530 | |
531 | /** |
532 | * Internal protection. |
533 | */ |
534 | atomic_t bo_count; |
535 | }; |
536 | |
537 | |
538 | #define TTM_NUM_MEM_TYPES 8 |
539 | |
540 | #define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs |
541 | idling before CPU mapping */ |
542 | #define TTM_BO_PRIV_FLAG_MAX 1 |
543 | /** |
544 | * struct ttm_bo_device - Buffer object driver device-specific data. |
545 | * |
546 | * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. |
547 | * @man: An array of mem_type_managers. |
548 | * @fence_lock: Protects the synchronizing members on *all* bos belonging |
549 | * to this device. |
550 | * @vma_manager: Address space manager |
551 | * lru_lock: Spinlock that protects the buffer+device lru lists and |
552 | * ddestroy lists. |
553 | * @val_seq: Current validation sequence. |
554 | * @dev_mapping: A pointer to the struct address_space representing the |
555 | * device address space. |
556 | * @wq: Work queue structure for the delayed delete workqueue. |
557 | * |
558 | */ |
559 | |
560 | struct ttm_bo_device { |
561 | |
562 | /* |
563 | * Constant after bo device init / atomic. |
564 | */ |
565 | struct list_head device_list; |
566 | struct ttm_bo_global *glob; |
567 | struct ttm_bo_driver *driver; |
568 | struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; |
569 | spinlock_t fence_lock; |
570 | |
571 | /* |
572 | * Protected by internal locks. |
573 | */ |
574 | struct drm_vma_offset_manager vma_manager; |
575 | |
576 | /* |
577 | * Protected by the global:lru lock. |
578 | */ |
579 | struct list_head ddestroy; |
580 | uint32_t val_seq; |
581 | |
582 | /* |
583 | * Protected by load / firstopen / lastclose /unload sync. |
584 | */ |
585 | |
586 | #ifdef __NetBSD__ |
587 | bus_space_tag_t memt; |
588 | bus_dma_tag_t dmat; |
589 | #else |
590 | struct address_space *dev_mapping; |
591 | #endif |
592 | |
593 | /* |
594 | * Internal protection. |
595 | */ |
596 | |
597 | struct delayed_work wq; |
598 | |
599 | bool need_dma32; |
600 | }; |
601 | |
602 | /** |
603 | * ttm_flag_masked |
604 | * |
605 | * @old: Pointer to the result and original value. |
606 | * @new: New value of bits. |
607 | * @mask: Mask of bits to change. |
608 | * |
609 | * Convenience function to change a number of bits identified by a mask. |
610 | */ |
611 | |
612 | static inline uint32_t |
613 | ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) |
614 | { |
615 | *old ^= (*old ^ new) & mask; |
616 | return *old; |
617 | } |
618 | |
619 | /** |
620 | * ttm_tt_init |
621 | * |
622 | * @ttm: The struct ttm_tt. |
623 | * @bdev: pointer to a struct ttm_bo_device: |
624 | * @size: Size of the data needed backing. |
625 | * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. |
626 | * @dummy_read_page: See struct ttm_bo_device. |
627 | * |
628 | * Create a struct ttm_tt to back data with system memory pages. |
629 | * No pages are actually allocated. |
630 | * Returns: |
631 | * NULL: Out of memory. |
632 | */ |
633 | extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, |
634 | unsigned long size, uint32_t page_flags, |
635 | struct page *dummy_read_page); |
636 | extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, |
637 | unsigned long size, uint32_t page_flags, |
638 | struct page *dummy_read_page); |
639 | |
640 | /** |
641 | * ttm_tt_fini |
642 | * |
643 | * @ttm: the ttm_tt structure. |
644 | * |
645 | * Free memory of ttm_tt structure |
646 | */ |
647 | extern void ttm_tt_fini(struct ttm_tt *ttm); |
648 | extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); |
649 | |
650 | /** |
651 | * ttm_ttm_bind: |
652 | * |
653 | * @ttm: The struct ttm_tt containing backing pages. |
654 | * @bo_mem: The struct ttm_mem_reg identifying the binding location. |
655 | * |
656 | * Bind the pages of @ttm to an aperture location identified by @bo_mem |
657 | */ |
658 | extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); |
659 | |
660 | /** |
661 | * ttm_ttm_destroy: |
662 | * |
663 | * @ttm: The struct ttm_tt. |
664 | * |
665 | * Unbind, unpopulate and destroy common struct ttm_tt. |
666 | */ |
667 | extern void ttm_tt_destroy(struct ttm_tt *ttm); |
668 | |
669 | /** |
670 | * ttm_ttm_unbind: |
671 | * |
672 | * @ttm: The struct ttm_tt. |
673 | * |
674 | * Unbind a struct ttm_tt. |
675 | */ |
676 | extern void ttm_tt_unbind(struct ttm_tt *ttm); |
677 | |
678 | #ifdef __NetBSD__ |
679 | /** |
680 | * ttm_tt_wire |
681 | * |
682 | * @ttm The struct ttm_tt. |
683 | * |
684 | * Wire the pages of a ttm_tt, allocating pages for it if necessary. |
685 | */ |
686 | extern int ttm_tt_wire(struct ttm_tt *ttm); |
687 | |
688 | /** |
689 | * ttm_tt_unwire |
690 | * |
691 | * @ttm The struct ttm_tt. |
692 | * |
693 | * Unwire the pages of a ttm_tt. |
694 | */ |
695 | extern void ttm_tt_unwire(struct ttm_tt *ttm); |
696 | #else |
697 | /** |
698 | * ttm_tt_swapin: |
699 | * |
700 | * @ttm: The struct ttm_tt. |
701 | * |
702 | * Swap in a previously swap out ttm_tt. |
703 | */ |
704 | extern int ttm_tt_swapin(struct ttm_tt *ttm); |
705 | #endif |
706 | |
707 | /** |
708 | * ttm_tt_cache_flush: |
709 | * |
710 | * @pages: An array of pointers to struct page:s to flush. |
711 | * @num_pages: Number of pages to flush. |
712 | * |
713 | * Flush the data of the indicated pages from the cpu caches. |
714 | * This is used when changing caching attributes of the pages from |
715 | * cache-coherent. |
716 | */ |
717 | extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages); |
718 | |
719 | /** |
720 | * ttm_tt_set_placement_caching: |
721 | * |
722 | * @ttm A struct ttm_tt the backing pages of which will change caching policy. |
723 | * @placement: Flag indicating the desired caching policy. |
724 | * |
725 | * This function will change caching policy of any default kernel mappings of |
726 | * the pages backing @ttm. If changing from cached to uncached or |
727 | * write-combined, |
728 | * all CPU caches will first be flushed to make sure the data of the pages |
729 | * hit RAM. This function may be very costly as it involves global TLB |
730 | * and cache flushes and potential page splitting / combining. |
731 | */ |
732 | extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); |
733 | extern int ttm_tt_swapout(struct ttm_tt *ttm, |
734 | struct file *persistent_swap_storage); |
735 | |
736 | /** |
737 | * ttm_tt_unpopulate - free pages from a ttm |
738 | * |
739 | * @ttm: Pointer to the ttm_tt structure |
740 | * |
741 | * Calls the driver method to free all pages from a ttm |
742 | */ |
743 | extern void ttm_tt_unpopulate(struct ttm_tt *ttm); |
744 | |
745 | /* |
746 | * ttm_bo.c |
747 | */ |
748 | |
749 | /** |
750 | * ttm_mem_reg_is_pci |
751 | * |
752 | * @bdev: Pointer to a struct ttm_bo_device. |
753 | * @mem: A valid struct ttm_mem_reg. |
754 | * |
755 | * Returns true if the memory described by @mem is PCI memory, |
756 | * false otherwise. |
757 | */ |
758 | extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, |
759 | struct ttm_mem_reg *mem); |
760 | |
761 | /** |
762 | * ttm_bo_mem_space |
763 | * |
764 | * @bo: Pointer to a struct ttm_buffer_object. the data of which |
765 | * we want to allocate space for. |
766 | * @proposed_placement: Proposed new placement for the buffer object. |
767 | * @mem: A struct ttm_mem_reg. |
768 | * @interruptible: Sleep interruptible when sliping. |
769 | * @no_wait_gpu: Return immediately if the GPU is busy. |
770 | * |
771 | * Allocate memory space for the buffer object pointed to by @bo, using |
772 | * the placement flags in @mem, potentially evicting other idle buffer objects. |
773 | * This function may sleep while waiting for space to become available. |
774 | * Returns: |
775 | * -EBUSY: No space available (only if no_wait == 1). |
776 | * -ENOMEM: Could not allocate memory for the buffer object, either due to |
777 | * fragmentation or concurrent allocators. |
778 | * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. |
779 | */ |
780 | extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
781 | struct ttm_placement *placement, |
782 | struct ttm_mem_reg *mem, |
783 | bool interruptible, |
784 | bool no_wait_gpu); |
785 | |
786 | extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, |
787 | struct ttm_mem_reg *mem); |
788 | extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, |
789 | struct ttm_mem_reg *mem); |
790 | |
791 | extern void ttm_bo_global_release(struct drm_global_reference *ref); |
792 | extern int ttm_bo_global_init(struct drm_global_reference *ref); |
793 | |
794 | extern int ttm_bo_device_release(struct ttm_bo_device *bdev); |
795 | |
796 | /** |
797 | * ttm_bo_device_init |
798 | * |
799 | * @bdev: A pointer to a struct ttm_bo_device to initialize. |
800 | * @glob: A pointer to an initialized struct ttm_bo_global. |
801 | * @driver: A pointer to a struct ttm_bo_driver set up by the caller. |
802 | * @mapping: The address space to use for this bo. |
803 | * @file_page_offset: Offset into the device address space that is available |
804 | * for buffer data. This ensures compatibility with other users of the |
805 | * address space. |
806 | * |
807 | * Initializes a struct ttm_bo_device: |
808 | * Returns: |
809 | * !0: Failure. |
810 | */ |
811 | extern int ttm_bo_device_init(struct ttm_bo_device *bdev, |
812 | struct ttm_bo_global *glob, |
813 | struct ttm_bo_driver *driver, |
814 | #ifdef __NetBSD__ |
815 | bus_space_tag_t memt, |
816 | bus_dma_tag_t dmat, |
817 | #else |
818 | struct address_space *mapping, |
819 | #endif |
820 | uint64_t file_page_offset, bool need_dma32); |
821 | |
822 | /** |
823 | * ttm_bo_unmap_virtual |
824 | * |
825 | * @bo: tear down the virtual mappings for this BO |
826 | */ |
827 | extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); |
828 | |
829 | /** |
830 | * ttm_bo_unmap_virtual |
831 | * |
832 | * @bo: tear down the virtual mappings for this BO |
833 | * |
834 | * The caller must take ttm_mem_io_lock before calling this function. |
835 | */ |
836 | extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); |
837 | |
838 | extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); |
839 | extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); |
840 | extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man, |
841 | bool interruptible); |
842 | extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); |
843 | |
844 | extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); |
845 | extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); |
846 | |
847 | /** |
848 | * __ttm_bo_reserve: |
849 | * |
850 | * @bo: A pointer to a struct ttm_buffer_object. |
851 | * @interruptible: Sleep interruptible if waiting. |
852 | * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. |
853 | * @use_ticket: If @bo is already reserved, Only sleep waiting for |
854 | * it to become unreserved if @ticket->stamp is older. |
855 | * |
856 | * Will not remove reserved buffers from the lru lists. |
857 | * Otherwise identical to ttm_bo_reserve. |
858 | * |
859 | * Returns: |
860 | * -EDEADLK: The reservation may cause a deadlock. |
861 | * Release all buffer reservations, wait for @bo to become unreserved and |
862 | * try again. (only if use_sequence == 1). |
863 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
864 | * a signal. Release all buffer reservations and return to user-space. |
865 | * -EBUSY: The function needed to sleep, but @no_wait was true |
866 | * -EALREADY: Bo already reserved using @ticket. This error code will only |
867 | * be returned if @use_ticket is set to true. |
868 | */ |
869 | static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, |
870 | bool interruptible, |
871 | bool no_wait, bool use_ticket, |
872 | struct ww_acquire_ctx *ticket) |
873 | { |
874 | int ret = 0; |
875 | |
876 | if (no_wait) { |
877 | bool success; |
878 | if (WARN_ON(ticket)) |
879 | return -EBUSY; |
880 | |
881 | success = ww_mutex_trylock(&bo->resv->lock); |
882 | return success ? 0 : -EBUSY; |
883 | } |
884 | |
885 | if (interruptible) |
886 | ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket); |
887 | else |
888 | ret = ww_mutex_lock(&bo->resv->lock, ticket); |
889 | if (ret == -EINTR) |
890 | return -ERESTARTSYS; |
891 | return ret; |
892 | } |
893 | |
894 | /** |
895 | * ttm_bo_reserve: |
896 | * |
897 | * @bo: A pointer to a struct ttm_buffer_object. |
898 | * @interruptible: Sleep interruptible if waiting. |
899 | * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. |
900 | * @use_ticket: If @bo is already reserved, Only sleep waiting for |
901 | * it to become unreserved if @ticket->stamp is older. |
902 | * |
903 | * Locks a buffer object for validation. (Or prevents other processes from |
904 | * locking it for validation) and removes it from lru lists, while taking |
905 | * a number of measures to prevent deadlocks. |
906 | * |
907 | * Deadlocks may occur when two processes try to reserve multiple buffers in |
908 | * different order, either by will or as a result of a buffer being evicted |
909 | * to make room for a buffer already reserved. (Buffers are reserved before |
910 | * they are evicted). The following algorithm prevents such deadlocks from |
911 | * occurring: |
912 | * Processes attempting to reserve multiple buffers other than for eviction, |
913 | * (typically execbuf), should first obtain a unique 32-bit |
914 | * validation sequence number, |
915 | * and call this function with @use_ticket == 1 and @ticket->stamp == the unique |
916 | * sequence number. If upon call of this function, the buffer object is already |
917 | * reserved, the validation sequence is checked against the validation |
918 | * sequence of the process currently reserving the buffer, |
919 | * and if the current validation sequence is greater than that of the process |
920 | * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps |
921 | * waiting for the buffer to become unreserved, after which it retries |
922 | * reserving. |
923 | * The caller should, when receiving an -EAGAIN error |
924 | * release all its buffer reservations, wait for @bo to become unreserved, and |
925 | * then rerun the validation with the same validation sequence. This procedure |
926 | * will always guarantee that the process with the lowest validation sequence |
927 | * will eventually succeed, preventing both deadlocks and starvation. |
928 | * |
929 | * Returns: |
930 | * -EDEADLK: The reservation may cause a deadlock. |
931 | * Release all buffer reservations, wait for @bo to become unreserved and |
932 | * try again. (only if use_sequence == 1). |
933 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
934 | * a signal. Release all buffer reservations and return to user-space. |
935 | * -EBUSY: The function needed to sleep, but @no_wait was true |
936 | * -EALREADY: Bo already reserved using @ticket. This error code will only |
937 | * be returned if @use_ticket is set to true. |
938 | */ |
939 | static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, |
940 | bool interruptible, |
941 | bool no_wait, bool use_ticket, |
942 | struct ww_acquire_ctx *ticket) |
943 | { |
944 | int ret; |
945 | |
946 | WARN_ON(!kref_referenced_p(&bo->kref)); |
947 | |
948 | ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket); |
949 | if (likely(ret == 0)) |
950 | ttm_bo_del_sub_from_lru(bo); |
951 | |
952 | return ret; |
953 | } |
954 | |
955 | /** |
956 | * ttm_bo_reserve_slowpath: |
957 | * @bo: A pointer to a struct ttm_buffer_object. |
958 | * @interruptible: Sleep interruptible if waiting. |
959 | * @sequence: Set (@bo)->sequence to this value after lock |
960 | * |
961 | * This is called after ttm_bo_reserve returns -EAGAIN and we backed off |
962 | * from all our other reservations. Because there are no other reservations |
963 | * held by us, this function cannot deadlock any more. |
964 | */ |
965 | static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, |
966 | bool interruptible, |
967 | struct ww_acquire_ctx *ticket) |
968 | { |
969 | int ret = 0; |
970 | |
971 | WARN_ON(!kref_referenced_p(&bo->kref)); |
972 | |
973 | if (interruptible) |
974 | ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, |
975 | ticket); |
976 | else |
977 | ww_mutex_lock_slow(&bo->resv->lock, ticket); |
978 | |
979 | if (likely(ret == 0)) |
980 | ttm_bo_del_sub_from_lru(bo); |
981 | else if (ret == -EINTR) |
982 | ret = -ERESTARTSYS; |
983 | |
984 | return ret; |
985 | } |
986 | |
987 | /** |
988 | * __ttm_bo_unreserve |
989 | * @bo: A pointer to a struct ttm_buffer_object. |
990 | * |
991 | * Unreserve a previous reservation of @bo where the buffer object is |
992 | * already on lru lists. |
993 | */ |
994 | static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo) |
995 | { |
996 | ww_mutex_unlock(&bo->resv->lock); |
997 | } |
998 | |
999 | /** |
1000 | * ttm_bo_unreserve |
1001 | * |
1002 | * @bo: A pointer to a struct ttm_buffer_object. |
1003 | * |
1004 | * Unreserve a previous reservation of @bo. |
1005 | */ |
1006 | static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) |
1007 | { |
1008 | if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
1009 | spin_lock(&bo->glob->lru_lock); |
1010 | ttm_bo_add_to_lru(bo); |
1011 | spin_unlock(&bo->glob->lru_lock); |
1012 | } |
1013 | __ttm_bo_unreserve(bo); |
1014 | } |
1015 | |
1016 | /** |
1017 | * ttm_bo_unreserve_ticket |
1018 | * @bo: A pointer to a struct ttm_buffer_object. |
1019 | * @ticket: ww_acquire_ctx used for reserving |
1020 | * |
1021 | * Unreserve a previous reservation of @bo made with @ticket. |
1022 | */ |
1023 | static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, |
1024 | struct ww_acquire_ctx *t) |
1025 | { |
1026 | ttm_bo_unreserve(bo); |
1027 | } |
1028 | |
1029 | /* |
1030 | * ttm_bo_util.c |
1031 | */ |
1032 | |
1033 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, |
1034 | struct ttm_mem_reg *mem); |
1035 | void ttm_mem_io_free(struct ttm_bo_device *bdev, |
1036 | struct ttm_mem_reg *mem); |
1037 | /** |
1038 | * ttm_bo_move_ttm |
1039 | * |
1040 | * @bo: A pointer to a struct ttm_buffer_object. |
1041 | * @evict: 1: This is an eviction. Don't try to pipeline. |
1042 | * @no_wait_gpu: Return immediately if the GPU is busy. |
1043 | * @new_mem: struct ttm_mem_reg indicating where to move. |
1044 | * |
1045 | * Optimized move function for a buffer object with both old and |
1046 | * new placement backed by a TTM. The function will, if successful, |
1047 | * free any old aperture space, and set (@new_mem)->mm_node to NULL, |
1048 | * and update the (@bo)->mem placement flags. If unsuccessful, the old |
1049 | * data remains untouched, and it's up to the caller to free the |
1050 | * memory space indicated by @new_mem. |
1051 | * Returns: |
1052 | * !0: Failure. |
1053 | */ |
1054 | |
1055 | extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, |
1056 | bool evict, bool no_wait_gpu, |
1057 | struct ttm_mem_reg *new_mem); |
1058 | |
1059 | /** |
1060 | * ttm_bo_move_memcpy |
1061 | * |
1062 | * @bo: A pointer to a struct ttm_buffer_object. |
1063 | * @evict: 1: This is an eviction. Don't try to pipeline. |
1064 | * @no_wait_gpu: Return immediately if the GPU is busy. |
1065 | * @new_mem: struct ttm_mem_reg indicating where to move. |
1066 | * |
1067 | * Fallback move function for a mappable buffer object in mappable memory. |
1068 | * The function will, if successful, |
1069 | * free any old aperture space, and set (@new_mem)->mm_node to NULL, |
1070 | * and update the (@bo)->mem placement flags. If unsuccessful, the old |
1071 | * data remains untouched, and it's up to the caller to free the |
1072 | * memory space indicated by @new_mem. |
1073 | * Returns: |
1074 | * !0: Failure. |
1075 | */ |
1076 | |
1077 | extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, |
1078 | bool evict, bool no_wait_gpu, |
1079 | struct ttm_mem_reg *new_mem); |
1080 | |
1081 | /** |
1082 | * ttm_bo_free_old_node |
1083 | * |
1084 | * @bo: A pointer to a struct ttm_buffer_object. |
1085 | * |
1086 | * Utility function to free an old placement after a successful move. |
1087 | */ |
1088 | extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); |
1089 | |
1090 | /** |
1091 | * ttm_bo_move_accel_cleanup. |
1092 | * |
1093 | * @bo: A pointer to a struct ttm_buffer_object. |
1094 | * @sync_obj: A sync object that signals when moving is complete. |
1095 | * @evict: This is an evict move. Don't return until the buffer is idle. |
1096 | * @no_wait_gpu: Return immediately if the GPU is busy. |
1097 | * @new_mem: struct ttm_mem_reg indicating where to move. |
1098 | * |
1099 | * Accelerated move function to be called when an accelerated move |
1100 | * has been scheduled. The function will create a new temporary buffer object |
1101 | * representing the old placement, and put the sync object on both buffer |
1102 | * objects. After that the newly created buffer object is unref'd to be |
1103 | * destroyed when the move is complete. This will help pipeline |
1104 | * buffer moves. |
1105 | */ |
1106 | |
1107 | extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
1108 | void *sync_obj, |
1109 | bool evict, bool no_wait_gpu, |
1110 | struct ttm_mem_reg *new_mem); |
1111 | /** |
1112 | * ttm_io_prot |
1113 | * |
1114 | * @c_state: Caching state. |
1115 | * @tmp: Page protection flag for a normal, cached mapping. |
1116 | * |
1117 | * Utility function that returns the pgprot_t that should be used for |
1118 | * setting up a PTE with the caching model indicated by @c_state. |
1119 | */ |
1120 | extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); |
1121 | |
1122 | extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; |
1123 | |
1124 | #if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) |
1125 | #define TTM_HAS_AGP |
1126 | #include <linux/agp_backend.h> |
1127 | |
1128 | /** |
1129 | * ttm_agp_tt_create |
1130 | * |
1131 | * @bdev: Pointer to a struct ttm_bo_device. |
1132 | * @bridge: The agp bridge this device is sitting on. |
1133 | * @size: Size of the data needed backing. |
1134 | * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. |
1135 | * @dummy_read_page: See struct ttm_bo_device. |
1136 | * |
1137 | * |
1138 | * Create a TTM backend that uses the indicated AGP bridge as an aperture |
1139 | * for TT memory. This function uses the linux agpgart interface to |
1140 | * bind and unbind memory backing a ttm_tt. |
1141 | */ |
1142 | extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, |
1143 | struct agp_bridge_data *bridge, |
1144 | unsigned long size, uint32_t page_flags, |
1145 | struct page *dummy_read_page); |
1146 | int ttm_agp_tt_populate(struct ttm_tt *ttm); |
1147 | void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); |
1148 | #endif |
1149 | |
1150 | #endif |
1151 | |