1 | #ifndef __NOUVEAU_BO_H__ |
2 | #define __NOUVEAU_BO_H__ |
3 | |
4 | #include <ttm/ttm_bo_api.h> |
5 | |
6 | struct nouveau_channel; |
7 | struct nouveau_drm; |
8 | struct nouveau_fence; |
9 | struct nouveau_vm; |
10 | struct nouveau_vma; |
11 | |
12 | struct nouveau_bo { |
13 | struct ttm_buffer_object bo; |
14 | struct ttm_placement placement; |
15 | u32 valid_domains; |
16 | u32 placements[3]; |
17 | u32 busy_placements[3]; |
18 | struct ttm_bo_kmap_obj kmap; |
19 | struct list_head head; |
20 | |
21 | /* protected by ttm_bo_reserve() */ |
22 | struct drm_file *reserved_by; |
23 | struct list_head entry; |
24 | int pbbo_index; |
25 | bool validate_mapped; |
26 | |
27 | struct list_head vma_list; |
28 | unsigned page_shift; |
29 | |
30 | u32 tile_mode; |
31 | u32 tile_flags; |
32 | struct nouveau_drm_tile *tile; |
33 | |
34 | /* Only valid if allocated via nouveau_gem_new() and iff you hold a |
35 | * gem reference to it! For debugging, use gem.filp != NULL to test |
36 | * whether it is valid. */ |
37 | struct drm_gem_object gem; |
38 | |
39 | /* protect by the ttm reservation lock */ |
40 | int pin_refcnt; |
41 | |
42 | struct ttm_bo_kmap_obj dma_buf_vmap; |
43 | }; |
44 | |
45 | static inline struct nouveau_bo * |
46 | nouveau_bo(struct ttm_buffer_object *bo) |
47 | { |
48 | return container_of(bo, struct nouveau_bo, bo); |
49 | } |
50 | |
51 | static inline int |
52 | nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) |
53 | { |
54 | struct nouveau_bo *prev; |
55 | |
56 | if (!pnvbo) |
57 | return -EINVAL; |
58 | prev = *pnvbo; |
59 | |
60 | *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL; |
61 | if (prev) { |
62 | struct ttm_buffer_object *bo = &prev->bo; |
63 | |
64 | ttm_bo_unref(&bo); |
65 | } |
66 | |
67 | return 0; |
68 | } |
69 | |
70 | extern struct ttm_bo_driver nouveau_bo_driver; |
71 | |
72 | void nouveau_bo_move_init(struct nouveau_drm *); |
73 | int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags, |
74 | u32 tile_mode, u32 tile_flags, struct sg_table *sg, |
75 | struct nouveau_bo **); |
76 | int nouveau_bo_pin(struct nouveau_bo *, u32 flags); |
77 | int nouveau_bo_unpin(struct nouveau_bo *); |
78 | int nouveau_bo_map(struct nouveau_bo *); |
79 | void nouveau_bo_unmap(struct nouveau_bo *); |
80 | void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy); |
81 | u16 nouveau_bo_rd16(struct nouveau_bo *, unsigned index); |
82 | void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val); |
83 | u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index); |
84 | void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val); |
85 | void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); |
86 | int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, |
87 | bool no_wait_gpu); |
88 | |
89 | struct nouveau_vma * |
90 | nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *); |
91 | |
92 | int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *, |
93 | struct nouveau_vma *); |
94 | void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *); |
95 | |
96 | #ifdef __NetBSD__ |
97 | # define __iomem volatile |
98 | # define __force |
99 | #endif |
100 | |
101 | /* TODO: submit equivalent to TTM generic API upstream? */ |
102 | static inline void __iomem * |
103 | nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo) |
104 | { |
105 | bool is_iomem; |
106 | void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual( |
107 | &nvbo->kmap, &is_iomem); |
108 | WARN_ON_ONCE(ioptr && !is_iomem); |
109 | return ioptr; |
110 | } |
111 | |
112 | #ifdef __NetBSD__ |
113 | # undef __iomem |
114 | # undef __force |
115 | #endif |
116 | |
117 | #endif |
118 | |