1 | /* $NetBSD: bus_dma_hacks.h,v 1.8 2015/10/17 21:11:06 jmcneill Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2013 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Taylor R. Campbell. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | #ifndef _DRM_BUS_DMA_HACKS_H_ |
33 | #define _DRM_BUS_DMA_HACKS_H_ |
34 | |
35 | #include <sys/cdefs.h> |
36 | #include <sys/bus.h> |
37 | #include <sys/kmem.h> |
38 | #include <sys/queue.h> |
39 | |
40 | #include <uvm/uvm.h> |
41 | #include <uvm/uvm_extern.h> |
42 | |
43 | #if defined(__i386__) || defined(__x86_64__) |
44 | #include <x86/bus_private.h> |
45 | #include <x86/machdep.h> |
46 | #elif defined(__arm__) |
47 | #else |
48 | #error DRM GEM/TTM need new MI bus_dma APIs! Halp! |
49 | #endif |
50 | |
51 | static inline int |
52 | bus_dmamem_wire_uvm_object(bus_dma_tag_t tag, struct uvm_object *uobj, |
53 | off_t start, bus_size_t size, struct pglist *pages, bus_size_t alignment, |
54 | bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, |
55 | int flags) |
56 | { |
57 | struct pglist pageq; |
58 | struct vm_page *page; |
59 | unsigned i; |
60 | int error; |
61 | |
62 | /* |
63 | * XXX `#ifdef __x86_64__' is a horrible way to work around a |
64 | * completely stupid GCC warning that encourages unsafe, |
65 | * nonportable code and has no obvious way to be selectively |
66 | * suppressed. |
67 | */ |
68 | #if __x86_64__ |
69 | KASSERT(size <= __type_max(off_t)); |
70 | #endif |
71 | |
72 | KASSERT(start <= (__type_max(off_t) - size)); |
73 | KASSERT(alignment == PAGE_SIZE); /* XXX */ |
74 | KASSERT(0 < nsegs); |
75 | |
76 | if (pages == NULL) { |
77 | TAILQ_INIT(&pageq); |
78 | pages = &pageq; |
79 | } |
80 | |
81 | error = uvm_obj_wirepages(uobj, start, (start + size), pages); |
82 | if (error) |
83 | goto fail0; |
84 | |
85 | page = TAILQ_FIRST(pages); |
86 | KASSERT(page != NULL); |
87 | |
88 | for (i = 0; i < nsegs; i++) { |
89 | if (page == NULL) { |
90 | error = EFBIG; |
91 | goto fail1; |
92 | } |
93 | segs[i].ds_addr = VM_PAGE_TO_PHYS(page); |
94 | segs[i].ds_len = MIN(PAGE_SIZE, size); |
95 | size -= PAGE_SIZE; |
96 | page = TAILQ_NEXT(page, pageq.queue); |
97 | } |
98 | KASSERT(page == NULL); |
99 | |
100 | /* Success! */ |
101 | *rsegs = nsegs; |
102 | return 0; |
103 | |
104 | fail1: uvm_obj_unwirepages(uobj, start, (start + size)); |
105 | fail0: return error; |
106 | } |
107 | |
108 | static inline void |
109 | bus_dmamem_unwire_uvm_object(bus_dma_tag_t tag __unused, |
110 | struct uvm_object *uobj, off_t start, bus_size_t size, |
111 | bus_dma_segment_t *segs __unused, int nsegs __unused) |
112 | { |
113 | uvm_obj_unwirepages(uobj, start, (start + size)); |
114 | } |
115 | |
116 | static inline int |
117 | bus_dmamem_pgfl(bus_dma_tag_t tag) |
118 | { |
119 | #if defined(__i386__) || defined(__x86_64__) |
120 | return x86_select_freelist(tag->_bounce_alloc_hi - 1); |
121 | #else |
122 | return VM_FREELIST_DEFAULT; |
123 | #endif |
124 | } |
125 | |
126 | static inline int |
127 | bus_dmamap_load_pglist(bus_dma_tag_t tag, bus_dmamap_t map, |
128 | struct pglist *pglist, bus_size_t size, int flags) |
129 | { |
130 | km_flag_t kmflags; |
131 | bus_dma_segment_t *segs; |
132 | int nsegs, seg; |
133 | struct vm_page *page; |
134 | int error; |
135 | |
136 | nsegs = 0; |
137 | TAILQ_FOREACH(page, pglist, pageq.queue) { |
138 | if (nsegs == MIN(INT_MAX, (SIZE_MAX / sizeof(segs[0])))) |
139 | return ENOMEM; |
140 | nsegs++; |
141 | } |
142 | |
143 | KASSERT(nsegs <= (SIZE_MAX / sizeof(segs[0]))); |
144 | switch (flags & (BUS_DMA_WAITOK|BUS_DMA_NOWAIT)) { |
145 | case BUS_DMA_WAITOK: kmflags = KM_SLEEP; break; |
146 | case BUS_DMA_NOWAIT: kmflags = KM_NOSLEEP; break; |
147 | default: panic("invalid flags: %d" , flags); |
148 | } |
149 | segs = kmem_alloc((nsegs * sizeof(segs[0])), kmflags); |
150 | if (segs == NULL) |
151 | return ENOMEM; |
152 | |
153 | seg = 0; |
154 | TAILQ_FOREACH(page, pglist, pageq.queue) { |
155 | segs[seg].ds_addr = VM_PAGE_TO_PHYS(page); |
156 | segs[seg].ds_len = PAGE_SIZE; |
157 | seg++; |
158 | } |
159 | |
160 | error = bus_dmamap_load_raw(tag, map, segs, nsegs, size, flags); |
161 | if (error) |
162 | goto fail0; |
163 | |
164 | /* Success! */ |
165 | error = 0; |
166 | goto out; |
167 | |
168 | fail1: __unused |
169 | bus_dmamap_unload(tag, map); |
170 | fail0: KASSERT(error); |
171 | out: kmem_free(segs, (nsegs * sizeof(segs[0]))); |
172 | return error; |
173 | } |
174 | |
175 | #endif /* _DRM_BUS_DMA_HACKS_H_ */ |
176 | |