1 | /************************************************************************** |
2 | * |
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. |
4 | * All Rights Reserved. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
26 | * |
27 | **************************************************************************/ |
28 | |
29 | /* |
30 | * Generic simple memory manager implementation. Intended to be used as a base |
31 | * class implementation for more advanced memory managers. |
32 | * |
33 | * Note that the algorithm used is quite simple and there might be substantial |
34 | * performance gains if a smarter free list is implemented. Currently it is just an |
35 | * unordered stack of free regions. This could easily be improved if an RB-tree |
36 | * is used instead. At least if we expect heavy fragmentation. |
37 | * |
38 | * Aligned allocations can also see improvement. |
39 | * |
40 | * Authors: |
41 | * Thomas Hellström <thomas-at-tungstengraphics-dot-com> |
42 | */ |
43 | |
44 | #include <drm/drmP.h> |
45 | #include <drm/drm_mm.h> |
46 | #include <linux/slab.h> |
47 | #include <linux/seq_file.h> |
48 | #include <linux/export.h> |
49 | #include <linux/printk.h> |
50 | #include <asm/bug.h> |
51 | |
52 | /** |
53 | * DOC: Overview |
54 | * |
55 | * drm_mm provides a simple range allocator. The drivers are free to use the |
56 | * resource allocator from the linux core if it suits them, the upside of drm_mm |
57 | * is that it's in the DRM core. Which means that it's easier to extend for |
58 | * some of the crazier special purpose needs of gpus. |
59 | * |
60 | * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. |
61 | * Drivers are free to embed either of them into their own suitable |
62 | * datastructures. drm_mm itself will not do any allocations of its own, so if |
63 | * drivers choose not to embed nodes they need to still allocate them |
64 | * themselves. |
65 | * |
66 | * The range allocator also supports reservation of preallocated blocks. This is |
67 | * useful for taking over initial mode setting configurations from the firmware, |
68 | * where an object needs to be created which exactly matches the firmware's |
69 | * scanout target. As long as the range is still free it can be inserted anytime |
70 | * after the allocator is initialized, which helps with avoiding looped |
71 | * depencies in the driver load sequence. |
72 | * |
73 | * drm_mm maintains a stack of most recently freed holes, which of all |
74 | * simplistic datastructures seems to be a fairly decent approach to clustering |
75 | * allocations and avoiding too much fragmentation. This means free space |
76 | * searches are O(num_holes). Given that all the fancy features drm_mm supports |
77 | * something better would be fairly complex and since gfx thrashing is a fairly |
78 | * steep cliff not a real concern. Removing a node again is O(1). |
79 | * |
80 | * drm_mm supports a few features: Alignment and range restrictions can be |
81 | * supplied. Further more every &drm_mm_node has a color value (which is just an |
82 | * opaqua unsigned long) which in conjunction with a driver callback can be used |
83 | * to implement sophisticated placement restrictions. The i915 DRM driver uses |
84 | * this to implement guard pages between incompatible caching domains in the |
85 | * graphics TT. |
86 | * |
87 | * Two behaviors are supported for searching and allocating: bottom-up and top-down. |
88 | * The default is bottom-up. Top-down allocation can be used if the memory area |
89 | * has different restrictions, or just to reduce fragmentation. |
90 | * |
91 | * Finally iteration helpers to walk all nodes and all holes are provided as are |
92 | * some basic allocator dumpers for debugging. |
93 | */ |
94 | |
95 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
96 | unsigned long size, |
97 | unsigned alignment, |
98 | unsigned long color, |
99 | enum drm_mm_search_flags flags); |
100 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
101 | unsigned long size, |
102 | unsigned alignment, |
103 | unsigned long color, |
104 | unsigned long start, |
105 | unsigned long end, |
106 | enum drm_mm_search_flags flags); |
107 | |
108 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, |
109 | struct drm_mm_node *node, |
110 | unsigned long size, unsigned alignment, |
111 | unsigned long color, |
112 | enum drm_mm_allocator_flags flags) |
113 | { |
114 | struct drm_mm *mm = hole_node->mm; |
115 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); |
116 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); |
117 | unsigned long adj_start = hole_start; |
118 | unsigned long adj_end = hole_end; |
119 | |
120 | BUG_ON(node->allocated); |
121 | |
122 | if (mm->color_adjust) |
123 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
124 | |
125 | if (flags & DRM_MM_CREATE_TOP) |
126 | adj_start = adj_end - size; |
127 | |
128 | if (alignment) { |
129 | unsigned tmp = adj_start % alignment; |
130 | if (tmp) { |
131 | if (flags & DRM_MM_CREATE_TOP) |
132 | adj_start -= tmp; |
133 | else |
134 | adj_start += alignment - tmp; |
135 | } |
136 | } |
137 | |
138 | BUG_ON(adj_start < hole_start); |
139 | BUG_ON(adj_end > hole_end); |
140 | |
141 | if (adj_start == hole_start) { |
142 | hole_node->hole_follows = 0; |
143 | list_del(&hole_node->hole_stack); |
144 | } |
145 | |
146 | node->start = adj_start; |
147 | node->size = size; |
148 | node->mm = mm; |
149 | node->color = color; |
150 | node->allocated = 1; |
151 | |
152 | INIT_LIST_HEAD(&node->hole_stack); |
153 | list_add(&node->node_list, &hole_node->node_list); |
154 | |
155 | BUG_ON(node->start + node->size > adj_end); |
156 | |
157 | node->hole_follows = 0; |
158 | if (__drm_mm_hole_node_start(node) < hole_end) { |
159 | list_add(&node->hole_stack, &mm->hole_stack); |
160 | node->hole_follows = 1; |
161 | } |
162 | } |
163 | |
164 | /** |
165 | * drm_mm_reserve_node - insert an pre-initialized node |
166 | * @mm: drm_mm allocator to insert @node into |
167 | * @node: drm_mm_node to insert |
168 | * |
169 | * This functions inserts an already set-up drm_mm_node into the allocator, |
170 | * meaning that start, size and color must be set by the caller. This is useful |
171 | * to initialize the allocator with preallocated objects which must be set-up |
172 | * before the range allocator can be set-up, e.g. when taking over a firmware |
173 | * framebuffer. |
174 | * |
175 | * Returns: |
176 | * 0 on success, -ENOSPC if there's no hole where @node is. |
177 | */ |
178 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
179 | { |
180 | struct drm_mm_node *hole; |
181 | unsigned long end = node->start + node->size; |
182 | unsigned long hole_start; |
183 | unsigned long hole_end; |
184 | |
185 | BUG_ON(node == NULL); |
186 | |
187 | /* Find the relevant hole to add our node to */ |
188 | drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { |
189 | if (hole_start > node->start || hole_end < end) |
190 | continue; |
191 | |
192 | node->mm = mm; |
193 | node->allocated = 1; |
194 | |
195 | INIT_LIST_HEAD(&node->hole_stack); |
196 | list_add(&node->node_list, &hole->node_list); |
197 | |
198 | if (node->start == hole_start) { |
199 | hole->hole_follows = 0; |
200 | list_del_init(&hole->hole_stack); |
201 | } |
202 | |
203 | node->hole_follows = 0; |
204 | if (end != hole_end) { |
205 | list_add(&node->hole_stack, &mm->hole_stack); |
206 | node->hole_follows = 1; |
207 | } |
208 | |
209 | return 0; |
210 | } |
211 | |
212 | return -ENOSPC; |
213 | } |
214 | EXPORT_SYMBOL(drm_mm_reserve_node); |
215 | |
216 | /** |
217 | * drm_mm_insert_node_generic - search for space and insert @node |
218 | * @mm: drm_mm to allocate from |
219 | * @node: preallocate node to insert |
220 | * @size: size of the allocation |
221 | * @alignment: alignment of the allocation |
222 | * @color: opaque tag value to use for this node |
223 | * @sflags: flags to fine-tune the allocation search |
224 | * @aflags: flags to fine-tune the allocation behavior |
225 | * |
226 | * The preallocated node must be cleared to 0. |
227 | * |
228 | * Returns: |
229 | * 0 on success, -ENOSPC if there's no suitable hole. |
230 | */ |
231 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, |
232 | unsigned long size, unsigned alignment, |
233 | unsigned long color, |
234 | enum drm_mm_search_flags sflags, |
235 | enum drm_mm_allocator_flags aflags) |
236 | { |
237 | struct drm_mm_node *hole_node; |
238 | |
239 | hole_node = drm_mm_search_free_generic(mm, size, alignment, |
240 | color, sflags); |
241 | if (!hole_node) |
242 | return -ENOSPC; |
243 | |
244 | drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags); |
245 | return 0; |
246 | } |
247 | EXPORT_SYMBOL(drm_mm_insert_node_generic); |
248 | |
249 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, |
250 | struct drm_mm_node *node, |
251 | unsigned long size, unsigned alignment, |
252 | unsigned long color, |
253 | unsigned long start, unsigned long end, |
254 | enum drm_mm_allocator_flags flags) |
255 | { |
256 | struct drm_mm *mm = hole_node->mm; |
257 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); |
258 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); |
259 | unsigned long adj_start = hole_start; |
260 | unsigned long adj_end = hole_end; |
261 | |
262 | BUG_ON(!hole_node->hole_follows || node->allocated); |
263 | |
264 | if (adj_start < start) |
265 | adj_start = start; |
266 | if (adj_end > end) |
267 | adj_end = end; |
268 | |
269 | if (flags & DRM_MM_CREATE_TOP) |
270 | adj_start = adj_end - size; |
271 | |
272 | if (mm->color_adjust) |
273 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
274 | |
275 | if (alignment) { |
276 | unsigned tmp = adj_start % alignment; |
277 | if (tmp) { |
278 | if (flags & DRM_MM_CREATE_TOP) |
279 | adj_start -= tmp; |
280 | else |
281 | adj_start += alignment - tmp; |
282 | } |
283 | } |
284 | |
285 | if (adj_start == hole_start) { |
286 | hole_node->hole_follows = 0; |
287 | list_del(&hole_node->hole_stack); |
288 | } |
289 | |
290 | node->start = adj_start; |
291 | node->size = size; |
292 | node->mm = mm; |
293 | node->color = color; |
294 | node->allocated = 1; |
295 | |
296 | INIT_LIST_HEAD(&node->hole_stack); |
297 | list_add(&node->node_list, &hole_node->node_list); |
298 | |
299 | BUG_ON(node->start < start); |
300 | BUG_ON(node->start < adj_start); |
301 | BUG_ON(node->start + node->size > adj_end); |
302 | BUG_ON(node->start + node->size > end); |
303 | |
304 | node->hole_follows = 0; |
305 | if (__drm_mm_hole_node_start(node) < hole_end) { |
306 | list_add(&node->hole_stack, &mm->hole_stack); |
307 | node->hole_follows = 1; |
308 | } |
309 | } |
310 | |
311 | /** |
312 | * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node |
313 | * @mm: drm_mm to allocate from |
314 | * @node: preallocate node to insert |
315 | * @size: size of the allocation |
316 | * @alignment: alignment of the allocation |
317 | * @color: opaque tag value to use for this node |
318 | * @start: start of the allowed range for this node |
319 | * @end: end of the allowed range for this node |
320 | * @sflags: flags to fine-tune the allocation search |
321 | * @aflags: flags to fine-tune the allocation behavior |
322 | * |
323 | * The preallocated node must be cleared to 0. |
324 | * |
325 | * Returns: |
326 | * 0 on success, -ENOSPC if there's no suitable hole. |
327 | */ |
328 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, |
329 | unsigned long size, unsigned alignment, |
330 | unsigned long color, |
331 | unsigned long start, unsigned long end, |
332 | enum drm_mm_search_flags sflags, |
333 | enum drm_mm_allocator_flags aflags) |
334 | { |
335 | struct drm_mm_node *hole_node; |
336 | |
337 | hole_node = drm_mm_search_free_in_range_generic(mm, |
338 | size, alignment, color, |
339 | start, end, sflags); |
340 | if (!hole_node) |
341 | return -ENOSPC; |
342 | |
343 | drm_mm_insert_helper_range(hole_node, node, |
344 | size, alignment, color, |
345 | start, end, aflags); |
346 | return 0; |
347 | } |
348 | EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); |
349 | |
350 | /** |
351 | * drm_mm_remove_node - Remove a memory node from the allocator. |
352 | * @node: drm_mm_node to remove |
353 | * |
354 | * This just removes a node from its drm_mm allocator. The node does not need to |
355 | * be cleared again before it can be re-inserted into this or any other drm_mm |
356 | * allocator. It is a bug to call this function on a un-allocated node. |
357 | */ |
358 | void drm_mm_remove_node(struct drm_mm_node *node) |
359 | { |
360 | struct drm_mm *mm = node->mm; |
361 | struct drm_mm_node *prev_node; |
362 | |
363 | if (WARN_ON(!node->allocated)) |
364 | return; |
365 | |
366 | BUG_ON(node->scanned_block || node->scanned_prev_free |
367 | || node->scanned_next_free); |
368 | |
369 | prev_node = |
370 | list_entry(node->node_list.prev, struct drm_mm_node, node_list); |
371 | |
372 | if (node->hole_follows) { |
373 | BUG_ON(__drm_mm_hole_node_start(node) == |
374 | __drm_mm_hole_node_end(node)); |
375 | list_del(&node->hole_stack); |
376 | } else |
377 | BUG_ON(__drm_mm_hole_node_start(node) != |
378 | __drm_mm_hole_node_end(node)); |
379 | |
380 | |
381 | if (!prev_node->hole_follows) { |
382 | prev_node->hole_follows = 1; |
383 | list_add(&prev_node->hole_stack, &mm->hole_stack); |
384 | } else |
385 | list_move(&prev_node->hole_stack, &mm->hole_stack); |
386 | |
387 | list_del(&node->node_list); |
388 | node->allocated = 0; |
389 | } |
390 | EXPORT_SYMBOL(drm_mm_remove_node); |
391 | |
392 | static int check_free_hole(unsigned long start, unsigned long end, |
393 | unsigned long size, unsigned alignment) |
394 | { |
395 | if (end - start < size) |
396 | return 0; |
397 | |
398 | if (alignment) { |
399 | unsigned tmp = start % alignment; |
400 | if (tmp) |
401 | start += alignment - tmp; |
402 | } |
403 | |
404 | return end >= start + size; |
405 | } |
406 | |
407 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
408 | unsigned long size, |
409 | unsigned alignment, |
410 | unsigned long color, |
411 | enum drm_mm_search_flags flags) |
412 | { |
413 | struct drm_mm_node *entry; |
414 | struct drm_mm_node *best; |
415 | unsigned long adj_start; |
416 | unsigned long adj_end; |
417 | unsigned long best_size; |
418 | |
419 | BUG_ON(mm->scanned_blocks); |
420 | |
421 | best = NULL; |
422 | best_size = ~0UL; |
423 | |
424 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
425 | flags & DRM_MM_SEARCH_BELOW) { |
426 | unsigned long hole_size = adj_end - adj_start; |
427 | |
428 | if (mm->color_adjust) { |
429 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
430 | if (adj_end <= adj_start) |
431 | continue; |
432 | } |
433 | |
434 | if (!check_free_hole(adj_start, adj_end, size, alignment)) |
435 | continue; |
436 | |
437 | if (!(flags & DRM_MM_SEARCH_BEST)) |
438 | return entry; |
439 | |
440 | if (hole_size < best_size) { |
441 | best = entry; |
442 | best_size = hole_size; |
443 | } |
444 | } |
445 | |
446 | return best; |
447 | } |
448 | |
449 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
450 | unsigned long size, |
451 | unsigned alignment, |
452 | unsigned long color, |
453 | unsigned long start, |
454 | unsigned long end, |
455 | enum drm_mm_search_flags flags) |
456 | { |
457 | struct drm_mm_node *entry; |
458 | struct drm_mm_node *best; |
459 | unsigned long adj_start; |
460 | unsigned long adj_end; |
461 | unsigned long best_size; |
462 | |
463 | BUG_ON(mm->scanned_blocks); |
464 | |
465 | best = NULL; |
466 | best_size = ~0UL; |
467 | |
468 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
469 | flags & DRM_MM_SEARCH_BELOW) { |
470 | unsigned long hole_size = adj_end - adj_start; |
471 | |
472 | if (adj_start < start) |
473 | adj_start = start; |
474 | if (adj_end > end) |
475 | adj_end = end; |
476 | |
477 | if (mm->color_adjust) { |
478 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
479 | if (adj_end <= adj_start) |
480 | continue; |
481 | } |
482 | |
483 | if (!check_free_hole(adj_start, adj_end, size, alignment)) |
484 | continue; |
485 | |
486 | if (!(flags & DRM_MM_SEARCH_BEST)) |
487 | return entry; |
488 | |
489 | if (hole_size < best_size) { |
490 | best = entry; |
491 | best_size = hole_size; |
492 | } |
493 | } |
494 | |
495 | return best; |
496 | } |
497 | |
498 | /** |
499 | * drm_mm_replace_node - move an allocation from @old to @new |
500 | * @old: drm_mm_node to remove from the allocator |
501 | * @new: drm_mm_node which should inherit @old's allocation |
502 | * |
503 | * This is useful for when drivers embed the drm_mm_node structure and hence |
504 | * can't move allocations by reassigning pointers. It's a combination of remove |
505 | * and insert with the guarantee that the allocation start will match. |
506 | */ |
507 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) |
508 | { |
509 | list_replace(&old->node_list, &new->node_list); |
510 | list_replace(&old->hole_stack, &new->hole_stack); |
511 | new->hole_follows = old->hole_follows; |
512 | new->mm = old->mm; |
513 | new->start = old->start; |
514 | new->size = old->size; |
515 | new->color = old->color; |
516 | |
517 | old->allocated = 0; |
518 | new->allocated = 1; |
519 | } |
520 | EXPORT_SYMBOL(drm_mm_replace_node); |
521 | |
522 | /** |
523 | * DOC: lru scan roaster |
524 | * |
525 | * Very often GPUs need to have continuous allocations for a given object. When |
526 | * evicting objects to make space for a new one it is therefore not most |
527 | * efficient when we simply start to select all objects from the tail of an LRU |
528 | * until there's a suitable hole: Especially for big objects or nodes that |
529 | * otherwise have special allocation constraints there's a good chance we evict |
530 | * lots of (smaller) objects unecessarily. |
531 | * |
532 | * The DRM range allocator supports this use-case through the scanning |
533 | * interfaces. First a scan operation needs to be initialized with |
534 | * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds |
535 | * objects to the roaster (probably by walking an LRU list, but this can be |
536 | * freely implemented) until a suitable hole is found or there's no further |
537 | * evitable object. |
538 | * |
539 | * The the driver must walk through all objects again in exactly the reverse |
540 | * order to restore the allocator state. Note that while the allocator is used |
541 | * in the scan mode no other operation is allowed. |
542 | * |
543 | * Finally the driver evicts all objects selected in the scan. Adding and |
544 | * removing an object is O(1), and since freeing a node is also O(1) the overall |
545 | * complexity is O(scanned_objects). So like the free stack which needs to be |
546 | * walked before a scan operation even begins this is linear in the number of |
547 | * objects. It doesn't seem to hurt badly. |
548 | */ |
549 | |
550 | /** |
551 | * drm_mm_init_scan - initialize lru scanning |
552 | * @mm: drm_mm to scan |
553 | * @size: size of the allocation |
554 | * @alignment: alignment of the allocation |
555 | * @color: opaque tag value to use for the allocation |
556 | * |
557 | * This simply sets up the scanning routines with the parameters for the desired |
558 | * hole. Note that there's no need to specify allocation flags, since they only |
559 | * change the place a node is allocated from within a suitable hole. |
560 | * |
561 | * Warning: |
562 | * As long as the scan list is non-empty, no other operations than |
563 | * adding/removing nodes to/from the scan list are allowed. |
564 | */ |
565 | void drm_mm_init_scan(struct drm_mm *mm, |
566 | unsigned long size, |
567 | unsigned alignment, |
568 | unsigned long color) |
569 | { |
570 | mm->scan_color = color; |
571 | mm->scan_alignment = alignment; |
572 | mm->scan_size = size; |
573 | mm->scanned_blocks = 0; |
574 | mm->scan_hit_start = 0; |
575 | mm->scan_hit_end = 0; |
576 | mm->scan_check_range = 0; |
577 | mm->prev_scanned_node = NULL; |
578 | } |
579 | EXPORT_SYMBOL(drm_mm_init_scan); |
580 | |
581 | /** |
582 | * drm_mm_init_scan - initialize range-restricted lru scanning |
583 | * @mm: drm_mm to scan |
584 | * @size: size of the allocation |
585 | * @alignment: alignment of the allocation |
586 | * @color: opaque tag value to use for the allocation |
587 | * @start: start of the allowed range for the allocation |
588 | * @end: end of the allowed range for the allocation |
589 | * |
590 | * This simply sets up the scanning routines with the parameters for the desired |
591 | * hole. Note that there's no need to specify allocation flags, since they only |
592 | * change the place a node is allocated from within a suitable hole. |
593 | * |
594 | * Warning: |
595 | * As long as the scan list is non-empty, no other operations than |
596 | * adding/removing nodes to/from the scan list are allowed. |
597 | */ |
598 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
599 | unsigned long size, |
600 | unsigned alignment, |
601 | unsigned long color, |
602 | unsigned long start, |
603 | unsigned long end) |
604 | { |
605 | mm->scan_color = color; |
606 | mm->scan_alignment = alignment; |
607 | mm->scan_size = size; |
608 | mm->scanned_blocks = 0; |
609 | mm->scan_hit_start = 0; |
610 | mm->scan_hit_end = 0; |
611 | mm->scan_start = start; |
612 | mm->scan_end = end; |
613 | mm->scan_check_range = 1; |
614 | mm->prev_scanned_node = NULL; |
615 | } |
616 | EXPORT_SYMBOL(drm_mm_init_scan_with_range); |
617 | |
618 | /** |
619 | * drm_mm_scan_add_block - add a node to the scan list |
620 | * @node: drm_mm_node to add |
621 | * |
622 | * Add a node to the scan list that might be freed to make space for the desired |
623 | * hole. |
624 | * |
625 | * Returns: |
626 | * True if a hole has been found, false otherwise. |
627 | */ |
628 | bool drm_mm_scan_add_block(struct drm_mm_node *node) |
629 | { |
630 | struct drm_mm *mm = node->mm; |
631 | struct drm_mm_node *prev_node; |
632 | unsigned long hole_start, hole_end; |
633 | unsigned long adj_start, adj_end; |
634 | |
635 | mm->scanned_blocks++; |
636 | |
637 | BUG_ON(node->scanned_block); |
638 | node->scanned_block = 1; |
639 | |
640 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
641 | node_list); |
642 | |
643 | node->scanned_preceeds_hole = prev_node->hole_follows; |
644 | prev_node->hole_follows = 1; |
645 | list_del(&node->node_list); |
646 | node->node_list.prev = &prev_node->node_list; |
647 | node->node_list.next = &mm->prev_scanned_node->node_list; |
648 | mm->prev_scanned_node = node; |
649 | |
650 | adj_start = hole_start = drm_mm_hole_node_start(prev_node); |
651 | adj_end = hole_end = drm_mm_hole_node_end(prev_node); |
652 | |
653 | if (mm->scan_check_range) { |
654 | if (adj_start < mm->scan_start) |
655 | adj_start = mm->scan_start; |
656 | if (adj_end > mm->scan_end) |
657 | adj_end = mm->scan_end; |
658 | } |
659 | |
660 | if (mm->color_adjust) |
661 | mm->color_adjust(prev_node, mm->scan_color, |
662 | &adj_start, &adj_end); |
663 | |
664 | if (check_free_hole(adj_start, adj_end, |
665 | mm->scan_size, mm->scan_alignment)) { |
666 | mm->scan_hit_start = hole_start; |
667 | mm->scan_hit_end = hole_end; |
668 | return true; |
669 | } |
670 | |
671 | return false; |
672 | } |
673 | EXPORT_SYMBOL(drm_mm_scan_add_block); |
674 | |
675 | /** |
676 | * drm_mm_scan_remove_block - remove a node from the scan list |
677 | * @node: drm_mm_node to remove |
678 | * |
679 | * Nodes _must_ be removed in the exact same order from the scan list as they |
680 | * have been added, otherwise the internal state of the memory manager will be |
681 | * corrupted. |
682 | * |
683 | * When the scan list is empty, the selected memory nodes can be freed. An |
684 | * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then |
685 | * return the just freed block (because its at the top of the free_stack list). |
686 | * |
687 | * Returns: |
688 | * True if this block should be evicted, false otherwise. Will always |
689 | * return false when no hole has been found. |
690 | */ |
691 | bool drm_mm_scan_remove_block(struct drm_mm_node *node) |
692 | { |
693 | struct drm_mm *mm = node->mm; |
694 | struct drm_mm_node *prev_node; |
695 | |
696 | mm->scanned_blocks--; |
697 | |
698 | BUG_ON(!node->scanned_block); |
699 | node->scanned_block = 0; |
700 | |
701 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
702 | node_list); |
703 | |
704 | prev_node->hole_follows = node->scanned_preceeds_hole; |
705 | list_add(&node->node_list, &prev_node->node_list); |
706 | |
707 | return (drm_mm_hole_node_end(node) > mm->scan_hit_start && |
708 | node->start < mm->scan_hit_end); |
709 | } |
710 | EXPORT_SYMBOL(drm_mm_scan_remove_block); |
711 | |
712 | /** |
713 | * drm_mm_clean - checks whether an allocator is clean |
714 | * @mm: drm_mm allocator to check |
715 | * |
716 | * Returns: |
717 | * True if the allocator is completely free, false if there's still a node |
718 | * allocated in it. |
719 | */ |
720 | bool drm_mm_clean(struct drm_mm * mm) |
721 | { |
722 | struct list_head *head = &mm->head_node.node_list; |
723 | |
724 | return (head->next->next == head); |
725 | } |
726 | EXPORT_SYMBOL(drm_mm_clean); |
727 | |
728 | /** |
729 | * drm_mm_init - initialize a drm-mm allocator |
730 | * @mm: the drm_mm structure to initialize |
731 | * @start: start of the range managed by @mm |
732 | * @size: end of the range managed by @mm |
733 | * |
734 | * Note that @mm must be cleared to 0 before calling this function. |
735 | */ |
736 | void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) |
737 | { |
738 | INIT_LIST_HEAD(&mm->hole_stack); |
739 | mm->scanned_blocks = 0; |
740 | |
741 | /* Clever trick to avoid a special case in the free hole tracking. */ |
742 | INIT_LIST_HEAD(&mm->head_node.node_list); |
743 | INIT_LIST_HEAD(&mm->head_node.hole_stack); |
744 | mm->head_node.hole_follows = 1; |
745 | mm->head_node.scanned_block = 0; |
746 | mm->head_node.scanned_prev_free = 0; |
747 | mm->head_node.scanned_next_free = 0; |
748 | mm->head_node.mm = mm; |
749 | mm->head_node.start = start + size; |
750 | mm->head_node.size = start - mm->head_node.start; |
751 | list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); |
752 | |
753 | mm->color_adjust = NULL; |
754 | } |
755 | EXPORT_SYMBOL(drm_mm_init); |
756 | |
757 | /** |
758 | * drm_mm_takedown - clean up a drm_mm allocator |
759 | * @mm: drm_mm allocator to clean up |
760 | * |
761 | * Note that it is a bug to call this function on an allocator which is not |
762 | * clean. |
763 | */ |
764 | void drm_mm_takedown(struct drm_mm * mm) |
765 | { |
766 | WARN(!list_empty(&mm->head_node.node_list), |
767 | "Memory manager not clean during takedown.\n" ); |
768 | } |
769 | EXPORT_SYMBOL(drm_mm_takedown); |
770 | |
771 | static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, |
772 | const char *prefix) |
773 | { |
774 | unsigned long hole_start, hole_end, hole_size; |
775 | |
776 | if (entry->hole_follows) { |
777 | hole_start = drm_mm_hole_node_start(entry); |
778 | hole_end = drm_mm_hole_node_end(entry); |
779 | hole_size = hole_end - hole_start; |
780 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n" , |
781 | prefix, hole_start, hole_end, |
782 | hole_size); |
783 | return hole_size; |
784 | } |
785 | |
786 | return 0; |
787 | } |
788 | |
789 | /** |
790 | * drm_mm_debug_table - dump allocator state to dmesg |
791 | * @mm: drm_mm allocator to dump |
792 | * @prefix: prefix to use for dumping to dmesg |
793 | */ |
794 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) |
795 | { |
796 | struct drm_mm_node *entry; |
797 | unsigned long total_used = 0, total_free = 0, total = 0; |
798 | |
799 | total_free += drm_mm_debug_hole(&mm->head_node, prefix); |
800 | |
801 | drm_mm_for_each_node(entry, mm) { |
802 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n" , |
803 | prefix, entry->start, entry->start + entry->size, |
804 | entry->size); |
805 | total_used += entry->size; |
806 | total_free += drm_mm_debug_hole(entry, prefix); |
807 | } |
808 | total = total_free + total_used; |
809 | |
810 | printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n" , prefix, total, |
811 | total_used, total_free); |
812 | } |
813 | EXPORT_SYMBOL(drm_mm_debug_table); |
814 | |
815 | #if defined(CONFIG_DEBUG_FS) |
816 | static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) |
817 | { |
818 | unsigned long hole_start, hole_end, hole_size; |
819 | |
820 | if (entry->hole_follows) { |
821 | hole_start = drm_mm_hole_node_start(entry); |
822 | hole_end = drm_mm_hole_node_end(entry); |
823 | hole_size = hole_end - hole_start; |
824 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n" , |
825 | hole_start, hole_end, hole_size); |
826 | return hole_size; |
827 | } |
828 | |
829 | return 0; |
830 | } |
831 | |
832 | /** |
833 | * drm_mm_dump_table - dump allocator state to a seq_file |
834 | * @m: seq_file to dump to |
835 | * @mm: drm_mm allocator to dump |
836 | */ |
837 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) |
838 | { |
839 | struct drm_mm_node *entry; |
840 | unsigned long total_used = 0, total_free = 0, total = 0; |
841 | |
842 | total_free += drm_mm_dump_hole(m, &mm->head_node); |
843 | |
844 | drm_mm_for_each_node(entry, mm) { |
845 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n" , |
846 | entry->start, entry->start + entry->size, |
847 | entry->size); |
848 | total_used += entry->size; |
849 | total_free += drm_mm_dump_hole(m, entry); |
850 | } |
851 | total = total_free + total_used; |
852 | |
853 | seq_printf(m, "total: %lu, used %lu free %lu\n" , total, total_used, total_free); |
854 | return 0; |
855 | } |
856 | EXPORT_SYMBOL(drm_mm_dump_table); |
857 | #endif |
858 | |