1 /* $NetBSD: ttm_bo_manager.c,v 1.6 2021/12/19 12:37:43 riastradh Exp $ */
2
3 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
4 /**************************************************************************
5 *
6 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_manager.c,v 1.6 2021/12/19 12:37:43 riastradh Exp $");
36
37 #include <drm/ttm/ttm_module.h>
38 #include <drm/ttm/ttm_bo_driver.h>
39 #include <drm/ttm/ttm_placement.h>
40 #include <drm/drm_mm.h>
41 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/module.h>
44
45 /**
46 * Currently we use a spinlock for the lock, but a mutex *may* be
47 * more appropriate to reduce scheduling latency if the range manager
48 * ends up with very fragmented allocation patterns.
49 */
50
51 struct ttm_range_manager {
52 struct drm_mm mm;
53 spinlock_t lock;
54 };
55
ttm_bo_man_get_node(struct ttm_mem_type_manager * man,struct ttm_buffer_object * bo,const struct ttm_place * place,struct ttm_mem_reg * mem)56 static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
57 struct ttm_buffer_object *bo,
58 const struct ttm_place *place,
59 struct ttm_mem_reg *mem)
60 {
61 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
62 struct drm_mm *mm = &rman->mm;
63 struct drm_mm_node *node;
64 enum drm_mm_insert_mode mode;
65 unsigned long lpfn;
66 int ret;
67
68 lpfn = place->lpfn;
69 if (!lpfn)
70 lpfn = man->size;
71
72 node = kzalloc(sizeof(*node), GFP_KERNEL);
73 if (!node)
74 return -ENOMEM;
75
76 mode = DRM_MM_INSERT_BEST;
77 if (place->flags & TTM_PL_FLAG_TOPDOWN)
78 mode = DRM_MM_INSERT_HIGH;
79
80 spin_lock(&rman->lock);
81 ret = drm_mm_insert_node_in_range(mm, node,
82 mem->num_pages,
83 mem->page_alignment, 0,
84 place->fpfn, lpfn, mode);
85 spin_unlock(&rman->lock);
86
87 if (unlikely(ret)) {
88 kfree(node);
89 } else {
90 mem->mm_node = node;
91 mem->start = node->start;
92 }
93
94 return 0;
95 }
96
ttm_bo_man_put_node(struct ttm_mem_type_manager * man,struct ttm_mem_reg * mem)97 static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
98 struct ttm_mem_reg *mem)
99 {
100 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
101
102 if (mem->mm_node) {
103 spin_lock(&rman->lock);
104 drm_mm_remove_node(mem->mm_node);
105 spin_unlock(&rman->lock);
106
107 kfree(mem->mm_node);
108 mem->mm_node = NULL;
109 }
110 }
111
ttm_bo_man_init(struct ttm_mem_type_manager * man,unsigned long p_size)112 static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
113 unsigned long p_size)
114 {
115 struct ttm_range_manager *rman;
116
117 rman = kzalloc(sizeof(*rman), GFP_KERNEL);
118 if (!rman)
119 return -ENOMEM;
120
121 drm_mm_init(&rman->mm, 0, p_size);
122 spin_lock_init(&rman->lock);
123 man->priv = rman;
124 return 0;
125 }
126
ttm_bo_man_takedown(struct ttm_mem_type_manager * man)127 static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
128 {
129 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
130 struct drm_mm *mm = &rman->mm;
131
132 spin_lock(&rman->lock);
133 if (drm_mm_clean(mm)) {
134 drm_mm_takedown(mm);
135 spin_unlock(&rman->lock);
136 spin_lock_destroy(&rman->lock);
137 kfree(rman);
138 man->priv = NULL;
139 return 0;
140 }
141 spin_unlock(&rman->lock);
142 return -EBUSY;
143 }
144
ttm_bo_man_debug(struct ttm_mem_type_manager * man,struct drm_printer * printer)145 static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
146 struct drm_printer *printer)
147 {
148 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
149
150 spin_lock(&rman->lock);
151 drm_mm_print(&rman->mm, printer);
152 spin_unlock(&rman->lock);
153 }
154
155 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
156 .init = ttm_bo_man_init,
157 .takedown = ttm_bo_man_takedown,
158 .get_node = ttm_bo_man_get_node,
159 .put_node = ttm_bo_man_put_node,
160 .debug = ttm_bo_man_debug
161 };
162 EXPORT_SYMBOL(ttm_bo_manager_func);
163