xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/drm_vma_manager.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: drm_vma_manager.c,v 1.3 2021/12/18 23:44:57 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 /*
5  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6  * Copyright (c) 2012 David Airlie <airlied@linux.ie>
7  * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: drm_vma_manager.c,v 1.3 2021/12/18 23:44:57 riastradh Exp $");
30 
31 #include <linux/mm.h>
32 #include <linux/module.h>
33 #include <linux/rbtree.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/types.h>
37 
38 #include <drm/drm_mm.h>
39 #include <drm/drm_vma_manager.h>
40 
41 /**
42  * DOC: vma offset manager
43  *
44  * The vma-manager is responsible to map arbitrary driver-dependent memory
45  * regions into the linear user address-space. It provides offsets to the
46  * caller which can then be used on the address_space of the drm-device. It
47  * takes care to not overlap regions, size them appropriately and to not
48  * confuse mm-core by inconsistent fake vm_pgoff fields.
49  * Drivers shouldn't use this for object placement in VMEM. This manager should
50  * only be used to manage mappings into linear user-space VMs.
51  *
52  * We use drm_mm as backend to manage object allocations. But it is highly
53  * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
54  * speed up offset lookups.
55  *
56  * You must not use multiple offset managers on a single address_space.
57  * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
58  * no longer be linear.
59  *
60  * This offset manager works on page-based addresses. That is, every argument
61  * and return code (with the exception of drm_vma_node_offset_addr()) is given
62  * in number of pages, not number of bytes. That means, object sizes and offsets
63  * must always be page-aligned (as usual).
64  * If you want to get a valid byte-based user-space address for a given offset,
65  * please see drm_vma_node_offset_addr().
66  *
67  * Additionally to offset management, the vma offset manager also handles access
68  * management. For every open-file context that is allowed to access a given
69  * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
70  * open-file with the offset of the node will fail with -EACCES. To revoke
71  * access again, use drm_vma_node_revoke(). However, the caller is responsible
72  * for destroying already existing mappings, if required.
73  */
74 
75 /**
76  * drm_vma_offset_manager_init - Initialize new offset-manager
77  * @mgr: Manager object
78  * @page_offset: Offset of available memory area (page-based)
79  * @size: Size of available address space range (page-based)
80  *
81  * Initialize a new offset-manager. The offset and area size available for the
82  * manager are given as @page_offset and @size. Both are interpreted as
83  * page-numbers, not bytes.
84  *
85  * Adding/removing nodes from the manager is locked internally and protected
86  * against concurrent access. However, node allocation and destruction is left
87  * for the caller. While calling into the vma-manager, a given node must
88  * always be guaranteed to be referenced.
89  */
drm_vma_offset_manager_init(struct drm_vma_offset_manager * mgr,unsigned long page_offset,unsigned long size)90 void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
91 				 unsigned long page_offset, unsigned long size)
92 {
93 	rwlock_init(&mgr->vm_lock);
94 	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
95 }
96 EXPORT_SYMBOL(drm_vma_offset_manager_init);
97 
98 /**
99  * drm_vma_offset_manager_destroy() - Destroy offset manager
100  * @mgr: Manager object
101  *
102  * Destroy an object manager which was previously created via
103  * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
104  * before destroying the manager. Otherwise, drm_mm will refuse to free the
105  * requested resources.
106  *
107  * The manager must not be accessed after this function is called.
108  */
drm_vma_offset_manager_destroy(struct drm_vma_offset_manager * mgr)109 void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
110 {
111 	drm_mm_takedown(&mgr->vm_addr_space_mm);
112 }
113 EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
114 
115 /**
116  * drm_vma_offset_lookup_locked() - Find node in offset space
117  * @mgr: Manager object
118  * @start: Start address for object (page-based)
119  * @pages: Size of object (page-based)
120  *
121  * Find a node given a start address and object size. This returns the _best_
122  * match for the given node. That is, @start may point somewhere into a valid
123  * region and the given node will be returned, as long as the node spans the
124  * whole requested area (given the size in number of pages as @pages).
125  *
126  * Note that before lookup the vma offset manager lookup lock must be acquired
127  * with drm_vma_offset_lock_lookup(). See there for an example. This can then be
128  * used to implement weakly referenced lookups using kref_get_unless_zero().
129  *
130  * Example:
131  *
132  * ::
133  *
134  *     drm_vma_offset_lock_lookup(mgr);
135  *     node = drm_vma_offset_lookup_locked(mgr);
136  *     if (node)
137  *         kref_get_unless_zero(container_of(node, sth, entr));
138  *     drm_vma_offset_unlock_lookup(mgr);
139  *
140  * RETURNS:
141  * Returns NULL if no suitable node can be found. Otherwise, the best match
142  * is returned. It's the caller's responsibility to make sure the node doesn't
143  * get destroyed before the caller can access it.
144  */
drm_vma_offset_lookup_locked(struct drm_vma_offset_manager * mgr,unsigned long start,unsigned long pages)145 struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
146 							 unsigned long start,
147 							 unsigned long pages)
148 {
149 	struct drm_mm_node *node, *best;
150 	struct rb_node *iter;
151 	unsigned long offset;
152 
153 	iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node;
154 	best = NULL;
155 
156 	while (likely(iter)) {
157 		node = rb_entry(iter, struct drm_mm_node, rb);
158 		offset = node->start;
159 		if (start >= offset) {
160 			iter = iter->rb_right;
161 			best = node;
162 			if (start == offset)
163 				break;
164 		} else {
165 			iter = iter->rb_left;
166 		}
167 	}
168 
169 	/* verify that the node spans the requested area */
170 	if (best) {
171 		offset = best->start + best->size;
172 		if (offset < start + pages)
173 			best = NULL;
174 	}
175 
176 	if (!best)
177 		return NULL;
178 
179 	return container_of(best, struct drm_vma_offset_node, vm_node);
180 }
181 EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
182 
183 /**
184  * drm_vma_offset_add() - Add offset node to manager
185  * @mgr: Manager object
186  * @node: Node to be added
187  * @pages: Allocation size visible to user-space (in number of pages)
188  *
189  * Add a node to the offset-manager. If the node was already added, this does
190  * nothing and return 0. @pages is the size of the object given in number of
191  * pages.
192  * After this call succeeds, you can access the offset of the node until it
193  * is removed again.
194  *
195  * If this call fails, it is safe to retry the operation or call
196  * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
197  * case.
198  *
199  * @pages is not required to be the same size as the underlying memory object
200  * that you want to map. It only limits the size that user-space can map into
201  * their address space.
202  *
203  * RETURNS:
204  * 0 on success, negative error code on failure.
205  */
drm_vma_offset_add(struct drm_vma_offset_manager * mgr,struct drm_vma_offset_node * node,unsigned long pages)206 int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
207 		       struct drm_vma_offset_node *node, unsigned long pages)
208 {
209 	int ret = 0;
210 
211 	write_lock(&mgr->vm_lock);
212 
213 	if (!drm_mm_node_allocated(&node->vm_node))
214 		ret = drm_mm_insert_node(&mgr->vm_addr_space_mm,
215 					 &node->vm_node, pages);
216 
217 	write_unlock(&mgr->vm_lock);
218 
219 	return ret;
220 }
221 EXPORT_SYMBOL(drm_vma_offset_add);
222 
223 /**
224  * drm_vma_offset_remove() - Remove offset node from manager
225  * @mgr: Manager object
226  * @node: Node to be removed
227  *
228  * Remove a node from the offset manager. If the node wasn't added before, this
229  * does nothing. After this call returns, the offset and size will be 0 until a
230  * new offset is allocated via drm_vma_offset_add() again. Helper functions like
231  * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
232  * offset is allocated.
233  */
drm_vma_offset_remove(struct drm_vma_offset_manager * mgr,struct drm_vma_offset_node * node)234 void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
235 			   struct drm_vma_offset_node *node)
236 {
237 	write_lock(&mgr->vm_lock);
238 
239 	if (drm_mm_node_allocated(&node->vm_node)) {
240 		drm_mm_remove_node(&node->vm_node);
241 		memset(&node->vm_node, 0, sizeof(node->vm_node));
242 	}
243 
244 	write_unlock(&mgr->vm_lock);
245 }
246 EXPORT_SYMBOL(drm_vma_offset_remove);
247 
248 /**
249  * drm_vma_node_allow - Add open-file to list of allowed users
250  * @node: Node to modify
251  * @tag: Tag of file to remove
252  *
253  * Add @tag to the list of allowed open-files for this node. If @tag is
254  * already on this list, the ref-count is incremented.
255  *
256  * The list of allowed-users is preserved across drm_vma_offset_add() and
257  * drm_vma_offset_remove() calls. You may even call it if the node is currently
258  * not added to any offset-manager.
259  *
260  * You must remove all open-files the same number of times as you added them
261  * before destroying the node. Otherwise, you will leak memory.
262  *
263  * This is locked against concurrent access internally.
264  *
265  * RETURNS:
266  * 0 on success, negative error code on internal failure (out-of-mem)
267  */
drm_vma_node_allow(struct drm_vma_offset_node * node,struct drm_file * tag)268 int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
269 {
270 	struct rb_node **iter;
271 	struct rb_node *parent = NULL;
272 	struct drm_vma_offset_file *new, *entry;
273 	int ret = 0;
274 
275 	/* Preallocate entry to avoid atomic allocations below. It is quite
276 	 * unlikely that an open-file is added twice to a single node so we
277 	 * don't optimize for this case. OOM is checked below only if the entry
278 	 * is actually used. */
279 	new = kmalloc(sizeof(*entry), GFP_KERNEL);
280 
281 	write_lock(&node->vm_lock);
282 
283 	iter = &node->vm_files.rb_node;
284 
285 	while (likely(*iter)) {
286 		parent = *iter;
287 		entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
288 
289 		if (tag == entry->vm_tag) {
290 			entry->vm_count++;
291 			goto unlock;
292 		} else if (tag > entry->vm_tag) {
293 			iter = &(*iter)->rb_right;
294 		} else {
295 			iter = &(*iter)->rb_left;
296 		}
297 	}
298 
299 	if (!new) {
300 		ret = -ENOMEM;
301 		goto unlock;
302 	}
303 
304 	new->vm_tag = tag;
305 	new->vm_count = 1;
306 	rb_link_node(&new->vm_rb, parent, iter);
307 	rb_insert_color(&new->vm_rb, &node->vm_files);
308 	new = NULL;
309 
310 unlock:
311 	write_unlock(&node->vm_lock);
312 	kfree(new);
313 	return ret;
314 }
315 EXPORT_SYMBOL(drm_vma_node_allow);
316 
317 /**
318  * drm_vma_node_revoke - Remove open-file from list of allowed users
319  * @node: Node to modify
320  * @tag: Tag of file to remove
321  *
322  * Decrement the ref-count of @tag in the list of allowed open-files on @node.
323  * If the ref-count drops to zero, remove @tag from the list. You must call
324  * this once for every drm_vma_node_allow() on @tag.
325  *
326  * This is locked against concurrent access internally.
327  *
328  * If @tag is not on the list, nothing is done.
329  */
drm_vma_node_revoke(struct drm_vma_offset_node * node,struct drm_file * tag)330 void drm_vma_node_revoke(struct drm_vma_offset_node *node,
331 			 struct drm_file *tag)
332 {
333 	struct drm_vma_offset_file *entry;
334 	struct rb_node *iter;
335 
336 	write_lock(&node->vm_lock);
337 
338 	iter = node->vm_files.rb_node;
339 	while (likely(iter)) {
340 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
341 		if (tag == entry->vm_tag) {
342 			if (!--entry->vm_count) {
343 				rb_erase(&entry->vm_rb, &node->vm_files);
344 				kfree(entry);
345 			}
346 			break;
347 		} else if (tag > entry->vm_tag) {
348 			iter = iter->rb_right;
349 		} else {
350 			iter = iter->rb_left;
351 		}
352 	}
353 
354 	write_unlock(&node->vm_lock);
355 }
356 EXPORT_SYMBOL(drm_vma_node_revoke);
357 
358 /**
359  * drm_vma_node_is_allowed - Check whether an open-file is granted access
360  * @node: Node to check
361  * @tag: Tag of file to remove
362  *
363  * Search the list in @node whether @tag is currently on the list of allowed
364  * open-files (see drm_vma_node_allow()).
365  *
366  * This is locked against concurrent access internally.
367  *
368  * RETURNS:
369  * true iff @filp is on the list
370  */
drm_vma_node_is_allowed(struct drm_vma_offset_node * node,struct drm_file * tag)371 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
372 			     struct drm_file *tag)
373 {
374 	struct drm_vma_offset_file *entry;
375 	struct rb_node *iter;
376 
377 	read_lock(&node->vm_lock);
378 
379 	iter = node->vm_files.rb_node;
380 	while (likely(iter)) {
381 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
382 		if (tag == entry->vm_tag)
383 			break;
384 		else if (tag > entry->vm_tag)
385 			iter = iter->rb_right;
386 		else
387 			iter = iter->rb_left;
388 	}
389 
390 	read_unlock(&node->vm_lock);
391 
392 	return iter;
393 }
394 EXPORT_SYMBOL(drm_vma_node_is_allowed);
395