xref: /openbsd-src/sys/dev/pci/drm/drm_vma_manager.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: drm_vma_manager.c,v 1.2 2016/04/05 20:50:44 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * Copyright (c) 2012 David Airlie <airlied@linux.ie>
5  * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include "drmP.h"
27 #include "drm_mm.h"
28 #include "drm_vma_manager.h"
29 #include "drm_linux_rbtree.h"
30 
31 /**
32  * DOC: vma offset manager
33  *
34  * The vma-manager is responsible to map arbitrary driver-dependent memory
35  * regions into the linear user address-space. It provides offsets to the
36  * caller which can then be used on the address_space of the drm-device. It
37  * takes care to not overlap regions, size them appropriately and to not
38  * confuse mm-core by inconsistent fake vm_pgoff fields.
39  * Drivers shouldn't use this for object placement in VMEM. This manager should
40  * only be used to manage mappings into linear user-space VMs.
41  *
42  * We use drm_mm as backend to manage object allocations. But it is highly
43  * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
44  * speed up offset lookups.
45  *
46  * You must not use multiple offset managers on a single address_space.
47  * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
48  * no longer be linear. Please use VM_NONLINEAR in that case and implement your
49  * own offset managers.
50  *
51  * This offset manager works on page-based addresses. That is, every argument
52  * and return code (with the exception of drm_vma_node_offset_addr()) is given
53  * in number of pages, not number of bytes. That means, object sizes and offsets
54  * must always be page-aligned (as usual).
55  * If you want to get a valid byte-based user-space address for a given offset,
56  * please see drm_vma_node_offset_addr().
57  *
58  * Additionally to offset management, the vma offset manager also handles access
59  * management. For every open-file context that is allowed to access a given
60  * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
61  * open-file with the offset of the node will fail with -EACCES. To revoke
62  * access again, use drm_vma_node_revoke(). However, the caller is responsible
63  * for destroying already existing mappings, if required.
64  */
65 
66 /**
67  * drm_vma_offset_manager_init - Initialize new offset-manager
68  * @mgr: Manager object
69  * @page_offset: Offset of available memory area (page-based)
70  * @size: Size of available address space range (page-based)
71  *
72  * Initialize a new offset-manager. The offset and area size available for the
73  * manager are given as @page_offset and @size. Both are interpreted as
74  * page-numbers, not bytes.
75  *
76  * Adding/removing nodes from the manager is locked internally and protected
77  * against concurrent access. However, node allocation and destruction is left
78  * for the caller. While calling into the vma-manager, a given node must
79  * always be guaranteed to be referenced.
80  */
81 void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
82 				 unsigned long page_offset, unsigned long size)
83 {
84 	rw_init(&mgr->vm_lock, "drmvmo");
85 	mgr->vm_addr_space_rb = RB_ROOT;
86 	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
87 }
88 EXPORT_SYMBOL(drm_vma_offset_manager_init);
89 
90 /**
91  * drm_vma_offset_manager_destroy() - Destroy offset manager
92  * @mgr: Manager object
93  *
94  * Destroy an object manager which was previously created via
95  * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
96  * before destroying the manager. Otherwise, drm_mm will refuse to free the
97  * requested resources.
98  *
99  * The manager must not be accessed after this function is called.
100  */
101 void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
102 {
103 	/* take the lock to protect against buggy drivers */
104 	write_lock(&mgr->vm_lock);
105 	drm_mm_takedown(&mgr->vm_addr_space_mm);
106 	write_unlock(&mgr->vm_lock);
107 }
108 EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
109 
110 /**
111  * drm_vma_offset_lookup() - Find node in offset space
112  * @mgr: Manager object
113  * @start: Start address for object (page-based)
114  * @pages: Size of object (page-based)
115  *
116  * Find a node given a start address and object size. This returns the _best_
117  * match for the given node. That is, @start may point somewhere into a valid
118  * region and the given node will be returned, as long as the node spans the
119  * whole requested area (given the size in number of pages as @pages).
120  *
121  * RETURNS:
122  * Returns NULL if no suitable node can be found. Otherwise, the best match
123  * is returned. It's the caller's responsibility to make sure the node doesn't
124  * get destroyed before the caller can access it.
125  */
126 struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
127 						  unsigned long start,
128 						  unsigned long pages)
129 {
130 	struct drm_vma_offset_node *node;
131 
132 	read_lock(&mgr->vm_lock);
133 	node = drm_vma_offset_lookup_locked(mgr, start, pages);
134 	read_unlock(&mgr->vm_lock);
135 
136 	return node;
137 }
138 EXPORT_SYMBOL(drm_vma_offset_lookup);
139 
140 /**
141  * drm_vma_offset_lookup_locked() - Find node in offset space
142  * @mgr: Manager object
143  * @start: Start address for object (page-based)
144  * @pages: Size of object (page-based)
145  *
146  * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
147  * manually. See drm_vma_offset_lock_lookup() for an example.
148  *
149  * RETURNS:
150  * Returns NULL if no suitable node can be found. Otherwise, the best match
151  * is returned.
152  */
153 struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
154 							 unsigned long start,
155 							 unsigned long pages)
156 {
157 	struct drm_vma_offset_node *node, *best;
158 	struct rb_node *iter;
159 	unsigned long offset;
160 
161 	iter = mgr->vm_addr_space_rb.rb_node;
162 	best = NULL;
163 
164 	while (likely(iter)) {
165 		node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
166 		offset = node->vm_node.start;
167 		if (start >= offset) {
168 			iter = iter->rb_right;
169 			best = node;
170 			if (start == offset)
171 				break;
172 		} else {
173 			iter = iter->rb_left;
174 		}
175 	}
176 
177 	/* verify that the node spans the requested area */
178 	if (best) {
179 		offset = best->vm_node.start + best->vm_node.size;
180 		if (offset < start + pages)
181 			best = NULL;
182 	}
183 
184 	return best;
185 }
186 EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
187 
188 /* internal helper to link @node into the rb-tree */
189 static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
190 				   struct drm_vma_offset_node *node)
191 {
192 	struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
193 	struct rb_node *parent = NULL;
194 	struct drm_vma_offset_node *iter_node;
195 
196 	while (likely(*iter)) {
197 		parent = *iter;
198 		iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
199 
200 		if (node->vm_node.start < iter_node->vm_node.start)
201 			iter = &(*iter)->rb_left;
202 		else if (node->vm_node.start > iter_node->vm_node.start)
203 			iter = &(*iter)->rb_right;
204 		else
205 			BUG();
206 	}
207 
208 	rb_link_node(&node->vm_rb, parent, iter);
209 	rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
210 }
211 
212 /**
213  * drm_vma_offset_add() - Add offset node to manager
214  * @mgr: Manager object
215  * @node: Node to be added
216  * @pages: Allocation size visible to user-space (in number of pages)
217  *
218  * Add a node to the offset-manager. If the node was already added, this does
219  * nothing and return 0. @pages is the size of the object given in number of
220  * pages.
221  * After this call succeeds, you can access the offset of the node until it
222  * is removed again.
223  *
224  * If this call fails, it is safe to retry the operation or call
225  * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
226  * case.
227  *
228  * @pages is not required to be the same size as the underlying memory object
229  * that you want to map. It only limits the size that user-space can map into
230  * their address space.
231  *
232  * RETURNS:
233  * 0 on success, negative error code on failure.
234  */
235 int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
236 		       struct drm_vma_offset_node *node, unsigned long pages)
237 {
238 	int ret;
239 
240 	write_lock(&mgr->vm_lock);
241 
242 	if (drm_mm_node_allocated(&node->vm_node)) {
243 		ret = 0;
244 		goto out_unlock;
245 	}
246 
247 	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
248 				 pages, 0, DRM_MM_SEARCH_DEFAULT);
249 	if (ret)
250 		goto out_unlock;
251 
252 	_drm_vma_offset_add_rb(mgr, node);
253 
254 out_unlock:
255 	write_unlock(&mgr->vm_lock);
256 	return ret;
257 }
258 EXPORT_SYMBOL(drm_vma_offset_add);
259 
260 /**
261  * drm_vma_offset_remove() - Remove offset node from manager
262  * @mgr: Manager object
263  * @node: Node to be removed
264  *
265  * Remove a node from the offset manager. If the node wasn't added before, this
266  * does nothing. After this call returns, the offset and size will be 0 until a
267  * new offset is allocated via drm_vma_offset_add() again. Helper functions like
268  * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
269  * offset is allocated.
270  */
271 void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
272 			   struct drm_vma_offset_node *node)
273 {
274 	write_lock(&mgr->vm_lock);
275 
276 	if (drm_mm_node_allocated(&node->vm_node)) {
277 		rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
278 		drm_mm_remove_node(&node->vm_node);
279 		memset(&node->vm_node, 0, sizeof(node->vm_node));
280 	}
281 
282 	write_unlock(&mgr->vm_lock);
283 }
284 EXPORT_SYMBOL(drm_vma_offset_remove);
285 
286 /**
287  * drm_vma_node_allow - Add open-file to list of allowed users
288  * @node: Node to modify
289  * @filp: Open file to add
290  *
291  * Add @filp to the list of allowed open-files for this node. If @filp is
292  * already on this list, the ref-count is incremented.
293  *
294  * The list of allowed-users is preserved across drm_vma_offset_add() and
295  * drm_vma_offset_remove() calls. You may even call it if the node is currently
296  * not added to any offset-manager.
297  *
298  * You must remove all open-files the same number of times as you added them
299  * before destroying the node. Otherwise, you will leak memory.
300  *
301  * This is locked against concurrent access internally.
302  *
303  * RETURNS:
304  * 0 on success, negative error code on internal failure (out-of-mem)
305  */
306 int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
307 {
308 	struct rb_node **iter;
309 	struct rb_node *parent = NULL;
310 	struct drm_vma_offset_file *new, *entry;
311 	int ret = 0;
312 
313 	/* Preallocate entry to avoid atomic allocations below. It is quite
314 	 * unlikely that an open-file is added twice to a single node so we
315 	 * don't optimize for this case. OOM is checked below only if the entry
316 	 * is actually used. */
317 	new = kmalloc(sizeof(*entry), GFP_KERNEL);
318 
319 	write_lock(&node->vm_lock);
320 
321 	iter = &node->vm_files.rb_node;
322 
323 	while (likely(*iter)) {
324 		parent = *iter;
325 		entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
326 
327 		if (filp == entry->vm_filp) {
328 			entry->vm_count++;
329 			goto unlock;
330 		} else if (filp > entry->vm_filp) {
331 			iter = &(*iter)->rb_right;
332 		} else {
333 			iter = &(*iter)->rb_left;
334 		}
335 	}
336 
337 	if (!new) {
338 		ret = -ENOMEM;
339 		goto unlock;
340 	}
341 
342 	new->vm_filp = filp;
343 	new->vm_count = 1;
344 	rb_link_node(&new->vm_rb, parent, iter);
345 	rb_insert_color(&new->vm_rb, &node->vm_files);
346 	new = NULL;
347 
348 unlock:
349 	write_unlock(&node->vm_lock);
350 	kfree(new);
351 	return ret;
352 }
353 EXPORT_SYMBOL(drm_vma_node_allow);
354 
355 /**
356  * drm_vma_node_revoke - Remove open-file from list of allowed users
357  * @node: Node to modify
358  * @filp: Open file to remove
359  *
360  * Decrement the ref-count of @filp in the list of allowed open-files on @node.
361  * If the ref-count drops to zero, remove @filp from the list. You must call
362  * this once for every drm_vma_node_allow() on @filp.
363  *
364  * This is locked against concurrent access internally.
365  *
366  * If @filp is not on the list, nothing is done.
367  */
368 void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
369 {
370 	struct drm_vma_offset_file *entry;
371 	struct rb_node *iter;
372 
373 	write_lock(&node->vm_lock);
374 
375 	iter = node->vm_files.rb_node;
376 	while (likely(iter)) {
377 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
378 		if (filp == entry->vm_filp) {
379 			if (!--entry->vm_count) {
380 				rb_erase(&entry->vm_rb, &node->vm_files);
381 				kfree(entry);
382 			}
383 			break;
384 		} else if (filp > entry->vm_filp) {
385 			iter = iter->rb_right;
386 		} else {
387 			iter = iter->rb_left;
388 		}
389 	}
390 
391 	write_unlock(&node->vm_lock);
392 }
393 EXPORT_SYMBOL(drm_vma_node_revoke);
394 
395 /**
396  * drm_vma_node_is_allowed - Check whether an open-file is granted access
397  * @node: Node to check
398  * @filp: Open-file to check for
399  *
400  * Search the list in @node whether @filp is currently on the list of allowed
401  * open-files (see drm_vma_node_allow()).
402  *
403  * This is locked against concurrent access internally.
404  *
405  * RETURNS:
406  * true iff @filp is on the list
407  */
408 bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
409 			     struct file *filp)
410 {
411 	struct drm_vma_offset_file *entry;
412 	struct rb_node *iter;
413 
414 	read_lock(&node->vm_lock);
415 
416 	iter = node->vm_files.rb_node;
417 	while (likely(iter)) {
418 		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
419 		if (filp == entry->vm_filp)
420 			break;
421 		else if (filp > entry->vm_filp)
422 			iter = iter->rb_right;
423 		else
424 			iter = iter->rb_left;
425 	}
426 
427 	read_unlock(&node->vm_lock);
428 
429 	return iter;
430 }
431 EXPORT_SYMBOL(drm_vma_node_is_allowed);
432