1 | /* $NetBSD: linux_idr.c,v 1.5 2015/01/01 01:15:43 mrg Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2013 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Taylor R. Campbell. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | #include <sys/cdefs.h> |
33 | __KERNEL_RCSID(0, "$NetBSD: linux_idr.c,v 1.5 2015/01/01 01:15:43 mrg Exp $" ); |
34 | |
35 | #include <sys/param.h> |
36 | #include <sys/atomic.h> |
37 | #include <sys/rbtree.h> |
38 | |
39 | #include <linux/err.h> |
40 | #include <linux/idr.h> |
41 | #include <linux/slab.h> |
42 | |
43 | struct idr_node { |
44 | rb_node_t in_rb_node; |
45 | int in_index; |
46 | void *in_data; |
47 | SIMPLEQ_ENTRY(idr_node) in_list; |
48 | }; |
49 | SIMPLEQ_HEAD(idr_head, idr_node); |
50 | |
51 | static struct { |
52 | kmutex_t lock; |
53 | struct idr_head preloaded_nodes; |
54 | struct idr_head discarded_nodes; |
55 | } idr_cache __cacheline_aligned; |
56 | |
57 | int |
58 | linux_idr_module_init(void) |
59 | { |
60 | |
61 | mutex_init(&idr_cache.lock, MUTEX_DEFAULT, IPL_VM); |
62 | SIMPLEQ_INIT(&idr_cache.preloaded_nodes); |
63 | SIMPLEQ_INIT(&idr_cache.discarded_nodes); |
64 | return 0; |
65 | } |
66 | |
67 | void |
68 | linux_idr_module_fini(void) |
69 | { |
70 | |
71 | KASSERT(SIMPLEQ_EMPTY(&idr_cache.discarded_nodes)); |
72 | KASSERT(SIMPLEQ_EMPTY(&idr_cache.preloaded_nodes)); |
73 | mutex_destroy(&idr_cache.lock); |
74 | } |
75 | |
76 | static signed int idr_tree_compare_nodes(void *, const void *, const void *); |
77 | static signed int idr_tree_compare_key(void *, const void *, const void *); |
78 | |
79 | static const rb_tree_ops_t idr_rb_ops = { |
80 | .rbto_compare_nodes = &idr_tree_compare_nodes, |
81 | .rbto_compare_key = &idr_tree_compare_key, |
82 | .rbto_node_offset = offsetof(struct idr_node, in_rb_node), |
83 | .rbto_context = NULL, |
84 | }; |
85 | |
86 | static signed int |
87 | idr_tree_compare_nodes(void *ctx __unused, const void *na, const void *nb) |
88 | { |
89 | const int a = ((const struct idr_node *)na)->in_index; |
90 | const int b = ((const struct idr_node *)nb)->in_index; |
91 | |
92 | if (a < b) |
93 | return -1; |
94 | else if (b < a) |
95 | return +1; |
96 | else |
97 | return 0; |
98 | } |
99 | |
100 | static signed int |
101 | idr_tree_compare_key(void *ctx __unused, const void *n, const void *key) |
102 | { |
103 | const int a = ((const struct idr_node *)n)->in_index; |
104 | const int b = *(const int *)key; |
105 | |
106 | if (a < b) |
107 | return -1; |
108 | else if (b < a) |
109 | return +1; |
110 | else |
111 | return 0; |
112 | } |
113 | |
114 | void |
115 | idr_init(struct idr *idr) |
116 | { |
117 | |
118 | mutex_init(&idr->idr_lock, MUTEX_DEFAULT, IPL_VM); |
119 | rb_tree_init(&idr->idr_tree, &idr_rb_ops); |
120 | } |
121 | |
122 | void |
123 | idr_destroy(struct idr *idr) |
124 | { |
125 | |
126 | #if 0 /* XXX No rb_tree_destroy? */ |
127 | rb_tree_destroy(&idr->idr_tree); |
128 | #endif |
129 | mutex_destroy(&idr->idr_lock); |
130 | } |
131 | |
132 | bool |
133 | idr_is_empty(struct idr *idr) |
134 | { |
135 | |
136 | return (RB_TREE_MIN(&idr->idr_tree) == NULL); |
137 | } |
138 | |
139 | void * |
140 | idr_find(struct idr *idr, int id) |
141 | { |
142 | const struct idr_node *node; |
143 | void *data; |
144 | |
145 | mutex_spin_enter(&idr->idr_lock); |
146 | node = rb_tree_find_node(&idr->idr_tree, &id); |
147 | data = (node == NULL? NULL : node->in_data); |
148 | mutex_spin_exit(&idr->idr_lock); |
149 | |
150 | return data; |
151 | } |
152 | |
153 | void * |
154 | idr_replace(struct idr *idr, void *replacement, int id) |
155 | { |
156 | struct idr_node *node; |
157 | void *result; |
158 | |
159 | mutex_spin_enter(&idr->idr_lock); |
160 | node = rb_tree_find_node(&idr->idr_tree, &id); |
161 | if (node == NULL) { |
162 | result = ERR_PTR(-ENOENT); |
163 | } else { |
164 | result = node->in_data; |
165 | node->in_data = replacement; |
166 | } |
167 | mutex_spin_exit(&idr->idr_lock); |
168 | |
169 | return result; |
170 | } |
171 | |
172 | void |
173 | idr_remove(struct idr *idr, int id) |
174 | { |
175 | struct idr_node *node; |
176 | |
177 | mutex_spin_enter(&idr->idr_lock); |
178 | node = rb_tree_find_node(&idr->idr_tree, &id); |
179 | KASSERTMSG((node != NULL), "idr %p has no entry for id %d" , idr, id); |
180 | rb_tree_remove_node(&idr->idr_tree, node); |
181 | mutex_spin_exit(&idr->idr_lock); |
182 | kfree(node); |
183 | } |
184 | |
185 | void |
186 | idr_preload(gfp_t gfp) |
187 | { |
188 | struct idr_node *node; |
189 | |
190 | if (ISSET(gfp, __GFP_WAIT)) |
191 | ASSERT_SLEEPABLE(); |
192 | |
193 | node = kmalloc(sizeof(*node), gfp); |
194 | if (node == NULL) |
195 | return; |
196 | |
197 | mutex_spin_enter(&idr_cache.lock); |
198 | SIMPLEQ_INSERT_TAIL(&idr_cache.preloaded_nodes, node, in_list); |
199 | mutex_spin_exit(&idr_cache.lock); |
200 | } |
201 | |
202 | int |
203 | idr_alloc(struct idr *idr, void *data, int start, int end, gfp_t gfp) |
204 | { |
205 | int maximum = (end <= 0? INT_MAX : (end - 1)); |
206 | struct idr_node *node, *search, *collision __diagused; |
207 | int id = start; |
208 | |
209 | /* Sanity-check inputs. */ |
210 | if (ISSET(gfp, __GFP_WAIT)) |
211 | ASSERT_SLEEPABLE(); |
212 | if (__predict_false(start < 0)) |
213 | return -EINVAL; |
214 | if (__predict_false(maximum < start)) |
215 | return -ENOSPC; |
216 | |
217 | /* Grab a node allocated by idr_preload. */ |
218 | mutex_spin_enter(&idr_cache.lock); |
219 | KASSERTMSG(!SIMPLEQ_EMPTY(&idr_cache.preloaded_nodes), |
220 | "missing call to idr_preload" ); |
221 | node = SIMPLEQ_FIRST(&idr_cache.preloaded_nodes); |
222 | SIMPLEQ_REMOVE_HEAD(&idr_cache.preloaded_nodes, in_list); |
223 | mutex_spin_exit(&idr_cache.lock); |
224 | |
225 | /* Find an id. */ |
226 | mutex_spin_enter(&idr->idr_lock); |
227 | search = rb_tree_find_node_geq(&idr->idr_tree, &start); |
228 | while ((search != NULL) && (search->in_index == id)) { |
229 | if (maximum <= id) { |
230 | id = -ENOSPC; |
231 | goto out; |
232 | } |
233 | search = rb_tree_iterate(&idr->idr_tree, search, RB_DIR_RIGHT); |
234 | id++; |
235 | } |
236 | node->in_index = id; |
237 | node->in_data = data; |
238 | collision = rb_tree_insert_node(&idr->idr_tree, node); |
239 | KASSERT(collision == node); |
240 | out: mutex_spin_exit(&idr->idr_lock); |
241 | |
242 | if (id < 0) { |
243 | /* Discard the node on failure. */ |
244 | mutex_spin_enter(&idr_cache.lock); |
245 | SIMPLEQ_INSERT_HEAD(&idr_cache.discarded_nodes, node, in_list); |
246 | mutex_spin_exit(&idr_cache.lock); |
247 | } |
248 | return id; |
249 | } |
250 | |
251 | void |
252 | idr_preload_end(void) |
253 | { |
254 | struct idr_head temp = SIMPLEQ_HEAD_INITIALIZER(temp); |
255 | struct idr_node *node, *next; |
256 | |
257 | mutex_spin_enter(&idr_cache.lock); |
258 | SIMPLEQ_FOREACH_SAFE(node, &idr_cache.discarded_nodes, in_list, next) { |
259 | SIMPLEQ_REMOVE_HEAD(&idr_cache.discarded_nodes, in_list); |
260 | SIMPLEQ_INSERT_HEAD(&temp, node, in_list); |
261 | } |
262 | mutex_spin_exit(&idr_cache.lock); |
263 | |
264 | SIMPLEQ_FOREACH_SAFE(node, &temp, in_list, next) { |
265 | SIMPLEQ_REMOVE_HEAD(&temp, in_list); |
266 | kfree(node); |
267 | } |
268 | } |
269 | |
270 | int |
271 | idr_for_each(struct idr *idr, int (*proc)(int, void *, void *), void *arg) |
272 | { |
273 | struct idr_node *node; |
274 | int error = 0; |
275 | |
276 | /* XXX Caller must exclude modifications. */ |
277 | membar_consumer(); |
278 | RB_TREE_FOREACH(node, &idr->idr_tree) { |
279 | error = (*proc)(node->in_index, node->in_data, arg); |
280 | if (error) |
281 | break; |
282 | } |
283 | |
284 | return error; |
285 | } |
286 | |