1 | /************************************************************************** |
2 | * |
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
26 | **************************************************************************/ |
27 | |
28 | #define pr_fmt(fmt) "[TTM] " fmt |
29 | |
30 | #include <drm/drmP.h> |
31 | #include <drm/ttm/ttm_memory.h> |
32 | #include <drm/ttm/ttm_module.h> |
33 | #include <drm/ttm/ttm_page_alloc.h> |
34 | #include <linux/spinlock.h> |
35 | #include <linux/sched.h> |
36 | #include <linux/wait.h> |
37 | #include <linux/mm.h> |
38 | #include <linux/module.h> |
39 | #include <linux/slab.h> |
40 | #include <linux/printk.h> |
41 | #include <linux/export.h> |
42 | |
43 | #define TTM_MEMORY_ALLOC_RETRIES 4 |
44 | |
45 | struct ttm_mem_zone { |
46 | #ifndef __NetBSD__ |
47 | struct kobject kobj; |
48 | #endif |
49 | struct ttm_mem_global *glob; |
50 | const char *name; |
51 | uint64_t zone_mem; |
52 | uint64_t emer_mem; |
53 | uint64_t max_mem; |
54 | uint64_t swap_limit; |
55 | uint64_t used_mem; |
56 | }; |
57 | |
58 | #ifndef __NetBSD__ |
59 | static struct attribute ttm_mem_sys = { |
60 | .name = "zone_memory" , |
61 | .mode = S_IRUGO |
62 | }; |
63 | static struct attribute ttm_mem_emer = { |
64 | .name = "emergency_memory" , |
65 | .mode = S_IRUGO | S_IWUSR |
66 | }; |
67 | static struct attribute ttm_mem_max = { |
68 | .name = "available_memory" , |
69 | .mode = S_IRUGO | S_IWUSR |
70 | }; |
71 | static struct attribute ttm_mem_swap = { |
72 | .name = "swap_limit" , |
73 | .mode = S_IRUGO | S_IWUSR |
74 | }; |
75 | static struct attribute ttm_mem_used = { |
76 | .name = "used_memory" , |
77 | .mode = S_IRUGO |
78 | }; |
79 | |
80 | static void ttm_mem_zone_kobj_release(struct kobject *kobj) |
81 | { |
82 | struct ttm_mem_zone *zone = |
83 | container_of(kobj, struct ttm_mem_zone, kobj); |
84 | |
85 | pr_info("Zone %7s: Used memory at exit: %llu kiB\n" , |
86 | zone->name, (unsigned long long)zone->used_mem >> 10); |
87 | kfree(zone); |
88 | } |
89 | |
90 | static ssize_t ttm_mem_zone_show(struct kobject *kobj, |
91 | struct attribute *attr, |
92 | char *buffer) |
93 | { |
94 | struct ttm_mem_zone *zone = |
95 | container_of(kobj, struct ttm_mem_zone, kobj); |
96 | uint64_t val = 0; |
97 | |
98 | spin_lock(&zone->glob->lock); |
99 | if (attr == &ttm_mem_sys) |
100 | val = zone->zone_mem; |
101 | else if (attr == &ttm_mem_emer) |
102 | val = zone->emer_mem; |
103 | else if (attr == &ttm_mem_max) |
104 | val = zone->max_mem; |
105 | else if (attr == &ttm_mem_swap) |
106 | val = zone->swap_limit; |
107 | else if (attr == &ttm_mem_used) |
108 | val = zone->used_mem; |
109 | spin_unlock(&zone->glob->lock); |
110 | |
111 | return snprintf(buffer, PAGE_SIZE, "%llu\n" , |
112 | (unsigned long long) val >> 10); |
113 | } |
114 | |
115 | static void ttm_check_swapping(struct ttm_mem_global *glob); |
116 | |
117 | static ssize_t ttm_mem_zone_store(struct kobject *kobj, |
118 | struct attribute *attr, |
119 | const char *buffer, |
120 | size_t size) |
121 | { |
122 | struct ttm_mem_zone *zone = |
123 | container_of(kobj, struct ttm_mem_zone, kobj); |
124 | int chars; |
125 | unsigned long val; |
126 | uint64_t val64; |
127 | |
128 | chars = sscanf(buffer, "%lu" , &val); |
129 | if (chars == 0) |
130 | return size; |
131 | |
132 | val64 = val; |
133 | val64 <<= 10; |
134 | |
135 | spin_lock(&zone->glob->lock); |
136 | if (val64 > zone->zone_mem) |
137 | val64 = zone->zone_mem; |
138 | if (attr == &ttm_mem_emer) { |
139 | zone->emer_mem = val64; |
140 | if (zone->max_mem > val64) |
141 | zone->max_mem = val64; |
142 | } else if (attr == &ttm_mem_max) { |
143 | zone->max_mem = val64; |
144 | if (zone->emer_mem < val64) |
145 | zone->emer_mem = val64; |
146 | } else if (attr == &ttm_mem_swap) |
147 | zone->swap_limit = val64; |
148 | spin_unlock(&zone->glob->lock); |
149 | |
150 | ttm_check_swapping(zone->glob); |
151 | |
152 | return size; |
153 | } |
154 | |
155 | static struct attribute *ttm_mem_zone_attrs[] = { |
156 | &ttm_mem_sys, |
157 | &ttm_mem_emer, |
158 | &ttm_mem_max, |
159 | &ttm_mem_swap, |
160 | &ttm_mem_used, |
161 | NULL |
162 | }; |
163 | |
164 | static const struct sysfs_ops ttm_mem_zone_ops = { |
165 | .show = &ttm_mem_zone_show, |
166 | .store = &ttm_mem_zone_store |
167 | }; |
168 | |
169 | static struct kobj_type ttm_mem_zone_kobj_type = { |
170 | .release = &ttm_mem_zone_kobj_release, |
171 | .sysfs_ops = &ttm_mem_zone_ops, |
172 | .default_attrs = ttm_mem_zone_attrs, |
173 | }; |
174 | |
175 | static void ttm_mem_global_kobj_release(struct kobject *kobj) |
176 | { |
177 | struct ttm_mem_global *glob = |
178 | container_of(kobj, struct ttm_mem_global, kobj); |
179 | |
180 | kfree(glob); |
181 | } |
182 | |
183 | static struct kobj_type ttm_mem_glob_kobj_type = { |
184 | .release = &ttm_mem_global_kobj_release, |
185 | }; |
186 | #endif |
187 | |
188 | static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, |
189 | bool from_wq, uint64_t ) |
190 | { |
191 | unsigned int i; |
192 | struct ttm_mem_zone *zone; |
193 | uint64_t target; |
194 | |
195 | for (i = 0; i < glob->num_zones; ++i) { |
196 | zone = glob->zones[i]; |
197 | |
198 | if (from_wq) |
199 | target = zone->swap_limit; |
200 | #ifdef __NetBSD__ |
201 | else if (DRM_SUSER()) |
202 | #else |
203 | else if (capable(CAP_SYS_ADMIN)) |
204 | #endif |
205 | target = zone->emer_mem; |
206 | else |
207 | target = zone->max_mem; |
208 | |
209 | target = (extra > target) ? 0ULL : target; |
210 | |
211 | if (zone->used_mem > target) |
212 | return true; |
213 | } |
214 | return false; |
215 | } |
216 | |
217 | /** |
218 | * At this point we only support a single shrink callback. |
219 | * Extend this if needed, perhaps using a linked list of callbacks. |
220 | * Note that this function is reentrant: |
221 | * many threads may try to swap out at any given time. |
222 | */ |
223 | |
224 | static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, |
225 | uint64_t ) |
226 | { |
227 | int ret; |
228 | struct ttm_mem_shrink *shrink; |
229 | |
230 | spin_lock(&glob->lock); |
231 | if (glob->shrink == NULL) |
232 | goto out; |
233 | |
234 | while (ttm_zones_above_swap_target(glob, from_wq, extra)) { |
235 | shrink = glob->shrink; |
236 | spin_unlock(&glob->lock); |
237 | ret = shrink->do_shrink(shrink); |
238 | spin_lock(&glob->lock); |
239 | if (unlikely(ret != 0)) |
240 | goto out; |
241 | } |
242 | out: |
243 | spin_unlock(&glob->lock); |
244 | } |
245 | |
246 | |
247 | |
248 | static void ttm_shrink_work(struct work_struct *work) |
249 | { |
250 | struct ttm_mem_global *glob = |
251 | container_of(work, struct ttm_mem_global, work); |
252 | |
253 | ttm_shrink(glob, true, 0ULL); |
254 | } |
255 | |
256 | static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, |
257 | const struct sysinfo *si) |
258 | { |
259 | struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); |
260 | uint64_t mem; |
261 | #ifndef __NetBSD__ |
262 | int ret; |
263 | #endif |
264 | |
265 | if (unlikely(!zone)) |
266 | return -ENOMEM; |
267 | |
268 | mem = si->totalram - si->totalhigh; |
269 | mem *= si->mem_unit; |
270 | |
271 | zone->name = "kernel" ; |
272 | zone->zone_mem = mem; |
273 | zone->max_mem = mem >> 1; |
274 | zone->emer_mem = (mem >> 1) + (mem >> 2); |
275 | zone->swap_limit = zone->max_mem - (mem >> 3); |
276 | zone->used_mem = 0; |
277 | zone->glob = glob; |
278 | glob->zone_kernel = zone; |
279 | #ifndef __NetBSD__ |
280 | ret = kobject_init_and_add( |
281 | &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); |
282 | if (unlikely(ret != 0)) { |
283 | kobject_put(&zone->kobj); |
284 | return ret; |
285 | } |
286 | #endif |
287 | glob->zones[glob->num_zones++] = zone; |
288 | return 0; |
289 | } |
290 | |
291 | #ifdef CONFIG_HIGHMEM |
292 | static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, |
293 | const struct sysinfo *si) |
294 | { |
295 | struct ttm_mem_zone *zone; |
296 | uint64_t mem; |
297 | #ifndef __NetBSD__ |
298 | int ret; |
299 | #endif |
300 | |
301 | if (si->totalhigh == 0) |
302 | return 0; |
303 | |
304 | zone = kzalloc(sizeof(*zone), GFP_KERNEL); |
305 | if (unlikely(!zone)) |
306 | return -ENOMEM; |
307 | |
308 | mem = si->totalram; |
309 | mem *= si->mem_unit; |
310 | |
311 | zone->name = "highmem" ; |
312 | zone->zone_mem = mem; |
313 | zone->max_mem = mem >> 1; |
314 | zone->emer_mem = (mem >> 1) + (mem >> 2); |
315 | zone->swap_limit = zone->max_mem - (mem >> 3); |
316 | zone->used_mem = 0; |
317 | zone->glob = glob; |
318 | glob->zone_highmem = zone; |
319 | #ifndef __NetBSD__ |
320 | ret = kobject_init_and_add( |
321 | &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); |
322 | if (unlikely(ret != 0)) { |
323 | kobject_put(&zone->kobj); |
324 | return ret; |
325 | } |
326 | #endif |
327 | glob->zones[glob->num_zones++] = zone; |
328 | return 0; |
329 | } |
330 | #else |
331 | static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, |
332 | const struct sysinfo *si) |
333 | { |
334 | struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); |
335 | uint64_t mem; |
336 | #ifndef __NetBSD__ |
337 | int ret; |
338 | #endif |
339 | |
340 | if (unlikely(!zone)) |
341 | return -ENOMEM; |
342 | |
343 | mem = si->totalram; |
344 | mem *= si->mem_unit; |
345 | |
346 | /** |
347 | * No special dma32 zone needed. |
348 | */ |
349 | |
350 | if (mem <= ((uint64_t) 1ULL << 32)) { |
351 | kfree(zone); |
352 | return 0; |
353 | } |
354 | |
355 | /* |
356 | * Limit max dma32 memory to 4GB for now |
357 | * until we can figure out how big this |
358 | * zone really is. |
359 | */ |
360 | |
361 | mem = ((uint64_t) 1ULL << 32); |
362 | zone->name = "dma32" ; |
363 | zone->zone_mem = mem; |
364 | zone->max_mem = mem >> 1; |
365 | zone->emer_mem = (mem >> 1) + (mem >> 2); |
366 | zone->swap_limit = zone->max_mem - (mem >> 3); |
367 | zone->used_mem = 0; |
368 | zone->glob = glob; |
369 | glob->zone_dma32 = zone; |
370 | #ifndef __NetBSD__ |
371 | ret = kobject_init_and_add( |
372 | &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); |
373 | if (unlikely(ret != 0)) { |
374 | kobject_put(&zone->kobj); |
375 | return ret; |
376 | } |
377 | #endif |
378 | glob->zones[glob->num_zones++] = zone; |
379 | return 0; |
380 | } |
381 | #endif |
382 | |
383 | int ttm_mem_global_init(struct ttm_mem_global *glob) |
384 | { |
385 | struct sysinfo si; |
386 | int ret; |
387 | int i; |
388 | struct ttm_mem_zone *zone; |
389 | |
390 | spin_lock_init(&glob->lock); |
391 | glob->swap_queue = create_singlethread_workqueue("ttm_swap" ); |
392 | INIT_WORK(&glob->work, ttm_shrink_work); |
393 | #ifndef __NetBSD__ |
394 | ret = kobject_init_and_add( |
395 | &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting" ); |
396 | if (unlikely(ret != 0)) { |
397 | kobject_put(&glob->kobj); |
398 | return ret; |
399 | } |
400 | #endif |
401 | |
402 | si_meminfo(&si); |
403 | |
404 | ret = ttm_mem_init_kernel_zone(glob, &si); |
405 | if (unlikely(ret != 0)) |
406 | goto out_no_zone; |
407 | #ifdef CONFIG_HIGHMEM |
408 | ret = ttm_mem_init_highmem_zone(glob, &si); |
409 | if (unlikely(ret != 0)) |
410 | goto out_no_zone; |
411 | #else |
412 | ret = ttm_mem_init_dma32_zone(glob, &si); |
413 | if (unlikely(ret != 0)) |
414 | goto out_no_zone; |
415 | #endif |
416 | for (i = 0; i < glob->num_zones; ++i) { |
417 | zone = glob->zones[i]; |
418 | pr_info("Zone %7s: Available graphics memory: %llu kiB\n" , |
419 | zone->name, (unsigned long long)zone->max_mem >> 10); |
420 | } |
421 | ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); |
422 | ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); |
423 | return 0; |
424 | out_no_zone: |
425 | ttm_mem_global_release(glob); |
426 | return ret; |
427 | } |
428 | EXPORT_SYMBOL(ttm_mem_global_init); |
429 | |
430 | void ttm_mem_global_release(struct ttm_mem_global *glob) |
431 | { |
432 | unsigned int i; |
433 | struct ttm_mem_zone *zone; |
434 | |
435 | /* let the page allocator first stop the shrink work. */ |
436 | ttm_page_alloc_fini(); |
437 | ttm_dma_page_alloc_fini(); |
438 | |
439 | flush_workqueue(glob->swap_queue); |
440 | destroy_workqueue(glob->swap_queue); |
441 | glob->swap_queue = NULL; |
442 | for (i = 0; i < glob->num_zones; ++i) { |
443 | zone = glob->zones[i]; |
444 | #ifdef __NetBSD__ |
445 | kfree(zone); |
446 | #else |
447 | kobject_del(&zone->kobj); |
448 | kobject_put(&zone->kobj); |
449 | #endif |
450 | } |
451 | #ifdef __NetBSD__ |
452 | kfree(glob); |
453 | #else |
454 | kobject_del(&glob->kobj); |
455 | kobject_put(&glob->kobj); |
456 | #endif |
457 | } |
458 | EXPORT_SYMBOL(ttm_mem_global_release); |
459 | |
460 | static void ttm_check_swapping(struct ttm_mem_global *glob) |
461 | { |
462 | bool needs_swapping = false; |
463 | unsigned int i; |
464 | struct ttm_mem_zone *zone; |
465 | |
466 | spin_lock(&glob->lock); |
467 | for (i = 0; i < glob->num_zones; ++i) { |
468 | zone = glob->zones[i]; |
469 | if (zone->used_mem > zone->swap_limit) { |
470 | needs_swapping = true; |
471 | break; |
472 | } |
473 | } |
474 | |
475 | spin_unlock(&glob->lock); |
476 | |
477 | if (unlikely(needs_swapping)) |
478 | (void)queue_work(glob->swap_queue, &glob->work); |
479 | |
480 | } |
481 | |
482 | static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, |
483 | struct ttm_mem_zone *single_zone, |
484 | uint64_t amount) |
485 | { |
486 | unsigned int i; |
487 | struct ttm_mem_zone *zone; |
488 | |
489 | spin_lock(&glob->lock); |
490 | for (i = 0; i < glob->num_zones; ++i) { |
491 | zone = glob->zones[i]; |
492 | if (single_zone && zone != single_zone) |
493 | continue; |
494 | zone->used_mem -= amount; |
495 | } |
496 | spin_unlock(&glob->lock); |
497 | } |
498 | |
499 | void ttm_mem_global_free(struct ttm_mem_global *glob, |
500 | uint64_t amount) |
501 | { |
502 | return ttm_mem_global_free_zone(glob, NULL, amount); |
503 | } |
504 | EXPORT_SYMBOL(ttm_mem_global_free); |
505 | |
506 | static int ttm_mem_global_reserve(struct ttm_mem_global *glob, |
507 | struct ttm_mem_zone *single_zone, |
508 | uint64_t amount, bool reserve) |
509 | { |
510 | uint64_t limit; |
511 | int ret = -ENOMEM; |
512 | unsigned int i; |
513 | struct ttm_mem_zone *zone; |
514 | |
515 | spin_lock(&glob->lock); |
516 | for (i = 0; i < glob->num_zones; ++i) { |
517 | zone = glob->zones[i]; |
518 | if (single_zone && zone != single_zone) |
519 | continue; |
520 | |
521 | #ifdef __NetBSD__ |
522 | limit = DRM_SUSER() ? |
523 | zone->emer_mem : zone->max_mem; |
524 | #else |
525 | limit = (capable(CAP_SYS_ADMIN)) ? |
526 | zone->emer_mem : zone->max_mem; |
527 | #endif |
528 | |
529 | if (zone->used_mem > limit) |
530 | goto out_unlock; |
531 | } |
532 | |
533 | if (reserve) { |
534 | for (i = 0; i < glob->num_zones; ++i) { |
535 | zone = glob->zones[i]; |
536 | if (single_zone && zone != single_zone) |
537 | continue; |
538 | zone->used_mem += amount; |
539 | } |
540 | } |
541 | |
542 | ret = 0; |
543 | out_unlock: |
544 | spin_unlock(&glob->lock); |
545 | ttm_check_swapping(glob); |
546 | |
547 | return ret; |
548 | } |
549 | |
550 | |
551 | static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, |
552 | struct ttm_mem_zone *single_zone, |
553 | uint64_t memory, |
554 | bool no_wait, bool interruptible) |
555 | { |
556 | int count = TTM_MEMORY_ALLOC_RETRIES; |
557 | |
558 | while (unlikely(ttm_mem_global_reserve(glob, |
559 | single_zone, |
560 | memory, true) |
561 | != 0)) { |
562 | if (no_wait) |
563 | return -ENOMEM; |
564 | if (unlikely(count-- == 0)) |
565 | return -ENOMEM; |
566 | ttm_shrink(glob, false, memory + (memory >> 2) + 16); |
567 | } |
568 | |
569 | return 0; |
570 | } |
571 | |
572 | int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, |
573 | bool no_wait, bool interruptible) |
574 | { |
575 | /** |
576 | * Normal allocations of kernel memory are registered in |
577 | * all zones. |
578 | */ |
579 | |
580 | return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, |
581 | interruptible); |
582 | } |
583 | EXPORT_SYMBOL(ttm_mem_global_alloc); |
584 | |
585 | int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, |
586 | struct page *page, |
587 | bool no_wait, bool interruptible) |
588 | { |
589 | |
590 | struct ttm_mem_zone *zone = NULL; |
591 | |
592 | /** |
593 | * Page allocations may be registed in a single zone |
594 | * only if highmem or !dma32. |
595 | */ |
596 | |
597 | #ifdef CONFIG_HIGHMEM |
598 | if (PageHighMem(page) && glob->zone_highmem != NULL) |
599 | zone = glob->zone_highmem; |
600 | #else |
601 | if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) |
602 | zone = glob->zone_kernel; |
603 | #endif |
604 | return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait, |
605 | interruptible); |
606 | } |
607 | |
608 | void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page) |
609 | { |
610 | struct ttm_mem_zone *zone = NULL; |
611 | |
612 | #ifdef CONFIG_HIGHMEM |
613 | if (PageHighMem(page) && glob->zone_highmem != NULL) |
614 | zone = glob->zone_highmem; |
615 | #else |
616 | if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) |
617 | zone = glob->zone_kernel; |
618 | #endif |
619 | ttm_mem_global_free_zone(glob, zone, PAGE_SIZE); |
620 | } |
621 | |
622 | |
623 | size_t ttm_round_pot(size_t size) |
624 | { |
625 | if ((size & (size - 1)) == 0) |
626 | return size; |
627 | else if (size > PAGE_SIZE) |
628 | return PAGE_ALIGN(size); |
629 | else { |
630 | size_t tmp_size = 4; |
631 | |
632 | while (tmp_size < size) |
633 | tmp_size <<= 1; |
634 | |
635 | return tmp_size; |
636 | } |
637 | return 0; |
638 | } |
639 | EXPORT_SYMBOL(ttm_round_pot); |
640 | |