xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/ttm/ttm_memory.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: ttm_memory.c,v 1.6 2020/02/14 14:34:59 maya Exp $	*/
2 
3 /**************************************************************************
4  *
5  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  **************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: ttm_memory.c,v 1.6 2020/02/14 14:34:59 maya Exp $");
32 
33 #define pr_fmt(fmt) "[TTM] " fmt
34 
35 #include <drm/drmP.h>
36 #include <drm/ttm/ttm_memory.h>
37 #include <drm/ttm/ttm_module.h>
38 #include <drm/ttm/ttm_page_alloc.h>
39 #include <linux/spinlock.h>
40 #include <linux/sched.h>
41 #include <linux/wait.h>
42 #include <linux/mm.h>
43 #include <linux/module.h>
44 #include <linux/slab.h>
45 
46 #define TTM_MEMORY_ALLOC_RETRIES 4
47 
48 struct ttm_mem_zone {
49 #ifndef __NetBSD__
50 	struct kobject kobj;
51 #endif
52 	struct ttm_mem_global *glob;
53 	const char *name;
54 	uint64_t zone_mem;
55 	uint64_t emer_mem;
56 	uint64_t max_mem;
57 	uint64_t swap_limit;
58 	uint64_t used_mem;
59 };
60 
61 #ifndef __NetBSD__
62 static struct attribute ttm_mem_sys = {
63 	.name = "zone_memory",
64 	.mode = S_IRUGO
65 };
66 static struct attribute ttm_mem_emer = {
67 	.name = "emergency_memory",
68 	.mode = S_IRUGO | S_IWUSR
69 };
70 static struct attribute ttm_mem_max = {
71 	.name = "available_memory",
72 	.mode = S_IRUGO | S_IWUSR
73 };
74 static struct attribute ttm_mem_swap = {
75 	.name = "swap_limit",
76 	.mode = S_IRUGO | S_IWUSR
77 };
78 static struct attribute ttm_mem_used = {
79 	.name = "used_memory",
80 	.mode = S_IRUGO
81 };
82 
83 static void ttm_mem_zone_kobj_release(struct kobject *kobj)
84 {
85 	struct ttm_mem_zone *zone =
86 		container_of(kobj, struct ttm_mem_zone, kobj);
87 
88 	pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
89 		zone->name, (unsigned long long)zone->used_mem >> 10);
90 	kfree(zone);
91 }
92 
93 static ssize_t ttm_mem_zone_show(struct kobject *kobj,
94 				 struct attribute *attr,
95 				 char *buffer)
96 {
97 	struct ttm_mem_zone *zone =
98 		container_of(kobj, struct ttm_mem_zone, kobj);
99 	uint64_t val = 0;
100 
101 	spin_lock(&zone->glob->lock);
102 	if (attr == &ttm_mem_sys)
103 		val = zone->zone_mem;
104 	else if (attr == &ttm_mem_emer)
105 		val = zone->emer_mem;
106 	else if (attr == &ttm_mem_max)
107 		val = zone->max_mem;
108 	else if (attr == &ttm_mem_swap)
109 		val = zone->swap_limit;
110 	else if (attr == &ttm_mem_used)
111 		val = zone->used_mem;
112 	spin_unlock(&zone->glob->lock);
113 
114 	return snprintf(buffer, PAGE_SIZE, "%llu\n",
115 			(unsigned long long) val >> 10);
116 }
117 
118 static void ttm_check_swapping(struct ttm_mem_global *glob);
119 
120 static ssize_t ttm_mem_zone_store(struct kobject *kobj,
121 				  struct attribute *attr,
122 				  const char *buffer,
123 				  size_t size)
124 {
125 	struct ttm_mem_zone *zone =
126 		container_of(kobj, struct ttm_mem_zone, kobj);
127 	int chars;
128 	unsigned long val;
129 	uint64_t val64;
130 
131 	chars = sscanf(buffer, "%lu", &val);
132 	if (chars == 0)
133 		return size;
134 
135 	val64 = val;
136 	val64 <<= 10;
137 
138 	spin_lock(&zone->glob->lock);
139 	if (val64 > zone->zone_mem)
140 		val64 = zone->zone_mem;
141 	if (attr == &ttm_mem_emer) {
142 		zone->emer_mem = val64;
143 		if (zone->max_mem > val64)
144 			zone->max_mem = val64;
145 	} else if (attr == &ttm_mem_max) {
146 		zone->max_mem = val64;
147 		if (zone->emer_mem < val64)
148 			zone->emer_mem = val64;
149 	} else if (attr == &ttm_mem_swap)
150 		zone->swap_limit = val64;
151 	spin_unlock(&zone->glob->lock);
152 
153 	ttm_check_swapping(zone->glob);
154 
155 	return size;
156 }
157 
158 static struct attribute *ttm_mem_zone_attrs[] = {
159 	&ttm_mem_sys,
160 	&ttm_mem_emer,
161 	&ttm_mem_max,
162 	&ttm_mem_swap,
163 	&ttm_mem_used,
164 	NULL
165 };
166 
167 static const struct sysfs_ops ttm_mem_zone_ops = {
168 	.show = &ttm_mem_zone_show,
169 	.store = &ttm_mem_zone_store
170 };
171 
172 static struct kobj_type ttm_mem_zone_kobj_type = {
173 	.release = &ttm_mem_zone_kobj_release,
174 	.sysfs_ops = &ttm_mem_zone_ops,
175 	.default_attrs = ttm_mem_zone_attrs,
176 };
177 
178 static void ttm_mem_global_kobj_release(struct kobject *kobj)
179 {
180 	struct ttm_mem_global *glob =
181 		container_of(kobj, struct ttm_mem_global, kobj);
182 
183 	kfree(glob);
184 }
185 
186 static struct kobj_type ttm_mem_glob_kobj_type = {
187 	.release = &ttm_mem_global_kobj_release,
188 };
189 #endif
190 
191 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
192 					bool from_wq, uint64_t extra)
193 {
194 	unsigned int i;
195 	struct ttm_mem_zone *zone;
196 	uint64_t target;
197 
198 	for (i = 0; i < glob->num_zones; ++i) {
199 		zone = glob->zones[i];
200 
201 		if (from_wq)
202 			target = zone->swap_limit;
203 		else if (capable(CAP_SYS_ADMIN))
204 			target = zone->emer_mem;
205 		else
206 			target = zone->max_mem;
207 
208 		target = (extra > target) ? 0ULL : target;
209 
210 		if (zone->used_mem > target)
211 			return true;
212 	}
213 	return false;
214 }
215 
216 /**
217  * At this point we only support a single shrink callback.
218  * Extend this if needed, perhaps using a linked list of callbacks.
219  * Note that this function is reentrant:
220  * many threads may try to swap out at any given time.
221  */
222 
223 static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
224 		       uint64_t extra)
225 {
226 	int ret;
227 	struct ttm_mem_shrink *shrink;
228 
229 	spin_lock(&glob->lock);
230 	if (glob->shrink == NULL)
231 		goto out;
232 
233 	while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
234 		shrink = glob->shrink;
235 		spin_unlock(&glob->lock);
236 		ret = shrink->do_shrink(shrink);
237 		spin_lock(&glob->lock);
238 		if (unlikely(ret != 0))
239 			goto out;
240 	}
241 out:
242 	spin_unlock(&glob->lock);
243 }
244 
245 
246 
247 static void ttm_shrink_work(struct work_struct *work)
248 {
249 	struct ttm_mem_global *glob =
250 	    container_of(work, struct ttm_mem_global, work);
251 
252 	ttm_shrink(glob, true, 0ULL);
253 }
254 
255 static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
256 				    const struct sysinfo *si)
257 {
258 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
259 	uint64_t mem;
260 #ifndef __NetBSD__
261 	int ret;
262 #endif
263 
264 	if (unlikely(!zone))
265 		return -ENOMEM;
266 
267 	mem = si->totalram - si->totalhigh;
268 	mem *= si->mem_unit;
269 
270 	zone->name = "kernel";
271 	zone->zone_mem = mem;
272 	zone->max_mem = mem >> 1;
273 	zone->emer_mem = (mem >> 1) + (mem >> 2);
274 	zone->swap_limit = zone->max_mem - (mem >> 3);
275 	zone->used_mem = 0;
276 	zone->glob = glob;
277 	glob->zone_kernel = zone;
278 #ifndef __NetBSD__
279 	ret = kobject_init_and_add(
280 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
281 	if (unlikely(ret != 0)) {
282 		kobject_put(&zone->kobj);
283 		return ret;
284 	}
285 #endif
286 	glob->zones[glob->num_zones++] = zone;
287 	return 0;
288 }
289 
290 #ifdef CONFIG_HIGHMEM
291 static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
292 				     const struct sysinfo *si)
293 {
294 	struct ttm_mem_zone *zone;
295 	uint64_t mem;
296 #ifndef __NetBSD__
297 	int ret;
298 #endif
299 
300 	if (si->totalhigh == 0)
301 		return 0;
302 
303 	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
304 	if (unlikely(!zone))
305 		return -ENOMEM;
306 
307 	mem = si->totalram;
308 	mem *= si->mem_unit;
309 
310 	zone->name = "highmem";
311 	zone->zone_mem = mem;
312 	zone->max_mem = mem >> 1;
313 	zone->emer_mem = (mem >> 1) + (mem >> 2);
314 	zone->swap_limit = zone->max_mem - (mem >> 3);
315 	zone->used_mem = 0;
316 	zone->glob = glob;
317 	glob->zone_highmem = zone;
318 #ifndef __NetBSD__
319 	ret = kobject_init_and_add(
320 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
321 		zone->name);
322 	if (unlikely(ret != 0)) {
323 		kobject_put(&zone->kobj);
324 		return ret;
325 	}
326 #endif
327 	glob->zones[glob->num_zones++] = zone;
328 	return 0;
329 }
330 #else
331 static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
332 				   const struct sysinfo *si)
333 {
334 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
335 	uint64_t mem;
336 #ifndef __NetBSD__
337 	int ret;
338 #endif
339 
340 	if (unlikely(!zone))
341 		return -ENOMEM;
342 
343 	mem = si->totalram;
344 	mem *= si->mem_unit;
345 
346 	/**
347 	 * No special dma32 zone needed.
348 	 */
349 
350 	if (mem <= ((uint64_t) 1ULL << 32)) {
351 		kfree(zone);
352 		return 0;
353 	}
354 
355 	/*
356 	 * Limit max dma32 memory to 4GB for now
357 	 * until we can figure out how big this
358 	 * zone really is.
359 	 */
360 
361 	mem = ((uint64_t) 1ULL << 32);
362 	zone->name = "dma32";
363 	zone->zone_mem = mem;
364 	zone->max_mem = mem >> 1;
365 	zone->emer_mem = (mem >> 1) + (mem >> 2);
366 	zone->swap_limit = zone->max_mem - (mem >> 3);
367 	zone->used_mem = 0;
368 	zone->glob = glob;
369 	glob->zone_dma32 = zone;
370 #ifndef __NetBSD__
371 	ret = kobject_init_and_add(
372 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
373 	if (unlikely(ret != 0)) {
374 		kobject_put(&zone->kobj);
375 		return ret;
376 	}
377 #endif
378 	glob->zones[glob->num_zones++] = zone;
379 	return 0;
380 }
381 #endif
382 
383 int ttm_mem_global_init(struct ttm_mem_global *glob)
384 {
385 	struct sysinfo si;
386 	int ret;
387 	int i;
388 	struct ttm_mem_zone *zone;
389 
390 	spin_lock_init(&glob->lock);
391 	glob->swap_queue = create_singlethread_workqueue("ttm_swap");
392 	INIT_WORK(&glob->work, ttm_shrink_work);
393 #ifndef __NetBSD__
394 	ret = kobject_init_and_add(
395 		&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
396 	if (unlikely(ret != 0)) {
397 		kobject_put(&glob->kobj);
398 		return ret;
399 	}
400 #endif
401 
402 	si_meminfo(&si);
403 
404 	ret = ttm_mem_init_kernel_zone(glob, &si);
405 	if (unlikely(ret != 0))
406 		goto out_no_zone;
407 #ifdef CONFIG_HIGHMEM
408 	ret = ttm_mem_init_highmem_zone(glob, &si);
409 	if (unlikely(ret != 0))
410 		goto out_no_zone;
411 #else
412 	ret = ttm_mem_init_dma32_zone(glob, &si);
413 	if (unlikely(ret != 0))
414 		goto out_no_zone;
415 #endif
416 	for (i = 0; i < glob->num_zones; ++i) {
417 		zone = glob->zones[i];
418 		pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
419 			zone->name, (unsigned long long)zone->max_mem >> 10);
420 	}
421 	ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
422 	ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
423 	return 0;
424 out_no_zone:
425 	ttm_mem_global_release(glob);
426 	return ret;
427 }
428 EXPORT_SYMBOL(ttm_mem_global_init);
429 
430 void ttm_mem_global_release(struct ttm_mem_global *glob)
431 {
432 	unsigned int i;
433 	struct ttm_mem_zone *zone;
434 
435 	/* let the page allocator first stop the shrink work. */
436 	ttm_page_alloc_fini();
437 	ttm_dma_page_alloc_fini();
438 
439 	flush_workqueue(glob->swap_queue);
440 	destroy_workqueue(glob->swap_queue);
441 	glob->swap_queue = NULL;
442 	for (i = 0; i < glob->num_zones; ++i) {
443 		zone = glob->zones[i];
444 #ifdef __NetBSD__
445 		kfree(zone);
446 #else
447 		kobject_del(&zone->kobj);
448 		kobject_put(&zone->kobj);
449 #endif
450 			}
451 	spin_lock_destroy(&glob->lock);
452 #ifdef __NetBSD__
453 	kfree(glob);
454 #else
455 	kobject_del(&glob->kobj);
456 	kobject_put(&glob->kobj);
457 #endif
458 }
459 EXPORT_SYMBOL(ttm_mem_global_release);
460 
461 static void ttm_check_swapping(struct ttm_mem_global *glob)
462 {
463 	bool needs_swapping = false;
464 	unsigned int i;
465 	struct ttm_mem_zone *zone;
466 
467 	spin_lock(&glob->lock);
468 	for (i = 0; i < glob->num_zones; ++i) {
469 		zone = glob->zones[i];
470 		if (zone->used_mem > zone->swap_limit) {
471 			needs_swapping = true;
472 			break;
473 		}
474 	}
475 
476 	spin_unlock(&glob->lock);
477 
478 	if (unlikely(needs_swapping))
479 		(void)queue_work(glob->swap_queue, &glob->work);
480 
481 }
482 
483 static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
484 				     struct ttm_mem_zone *single_zone,
485 				     uint64_t amount)
486 {
487 	unsigned int i;
488 	struct ttm_mem_zone *zone;
489 
490 	spin_lock(&glob->lock);
491 	for (i = 0; i < glob->num_zones; ++i) {
492 		zone = glob->zones[i];
493 		if (single_zone && zone != single_zone)
494 			continue;
495 		zone->used_mem -= amount;
496 	}
497 	spin_unlock(&glob->lock);
498 }
499 
500 void ttm_mem_global_free(struct ttm_mem_global *glob,
501 			 uint64_t amount)
502 {
503 	return ttm_mem_global_free_zone(glob, NULL, amount);
504 }
505 EXPORT_SYMBOL(ttm_mem_global_free);
506 
507 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
508 				  struct ttm_mem_zone *single_zone,
509 				  uint64_t amount, bool reserve)
510 {
511 	uint64_t limit;
512 	int ret = -ENOMEM;
513 	unsigned int i;
514 	struct ttm_mem_zone *zone;
515 
516 	spin_lock(&glob->lock);
517 	for (i = 0; i < glob->num_zones; ++i) {
518 		zone = glob->zones[i];
519 		if (single_zone && zone != single_zone)
520 			continue;
521 
522 		limit = (capable(CAP_SYS_ADMIN)) ?
523 			zone->emer_mem : zone->max_mem;
524 
525 		if (zone->used_mem > limit)
526 			goto out_unlock;
527 	}
528 
529 	if (reserve) {
530 		for (i = 0; i < glob->num_zones; ++i) {
531 			zone = glob->zones[i];
532 			if (single_zone && zone != single_zone)
533 				continue;
534 			zone->used_mem += amount;
535 		}
536 	}
537 
538 	ret = 0;
539 out_unlock:
540 	spin_unlock(&glob->lock);
541 	ttm_check_swapping(glob);
542 
543 	return ret;
544 }
545 
546 
547 static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
548 				     struct ttm_mem_zone *single_zone,
549 				     uint64_t memory,
550 				     bool no_wait, bool interruptible)
551 {
552 	int count = TTM_MEMORY_ALLOC_RETRIES;
553 
554 	while (unlikely(ttm_mem_global_reserve(glob,
555 					       single_zone,
556 					       memory, true)
557 			!= 0)) {
558 		if (no_wait)
559 			return -ENOMEM;
560 		if (unlikely(count-- == 0))
561 			return -ENOMEM;
562 		ttm_shrink(glob, false, memory + (memory >> 2) + 16);
563 	}
564 
565 	return 0;
566 }
567 
568 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
569 			 bool no_wait, bool interruptible)
570 {
571 	/**
572 	 * Normal allocations of kernel memory are registered in
573 	 * all zones.
574 	 */
575 
576 	return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
577 					 interruptible);
578 }
579 EXPORT_SYMBOL(ttm_mem_global_alloc);
580 
581 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
582 			      struct page *page,
583 			      bool no_wait, bool interruptible)
584 {
585 
586 	struct ttm_mem_zone *zone = NULL;
587 
588 	/**
589 	 * Page allocations may be registed in a single zone
590 	 * only if highmem or !dma32.
591 	 */
592 
593 #ifdef CONFIG_HIGHMEM
594 	if (PageHighMem(page) && glob->zone_highmem != NULL)
595 		zone = glob->zone_highmem;
596 #else
597 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
598 		zone = glob->zone_kernel;
599 #endif
600 	return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
601 					 interruptible);
602 }
603 
604 void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
605 {
606 	struct ttm_mem_zone *zone = NULL;
607 
608 #ifdef CONFIG_HIGHMEM
609 	if (PageHighMem(page) && glob->zone_highmem != NULL)
610 		zone = glob->zone_highmem;
611 #else
612 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
613 		zone = glob->zone_kernel;
614 #endif
615 	ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
616 }
617 
618 
619 size_t ttm_round_pot(size_t size)
620 {
621 	if ((size & (size - 1)) == 0)
622 		return size;
623 	else if (size > PAGE_SIZE)
624 		return PAGE_ALIGN(size);
625 	else {
626 		size_t tmp_size = 4;
627 
628 		while (tmp_size < size)
629 			tmp_size <<= 1;
630 
631 		return tmp_size;
632 	}
633 	return 0;
634 }
635 EXPORT_SYMBOL(ttm_round_pot);
636