1 /* $NetBSD: drm_gem.h,v 1.12 2021/12/19 10:38:23 riastradh Exp $ */
2
3 #ifndef __DRM_GEM_H__
4 #define __DRM_GEM_H__
5
6 /*
7 * GEM Graphics Execution Manager Driver Interfaces
8 *
9 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
10 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
11 * Copyright (c) 2009-2010, Code Aurora Forum.
12 * All rights reserved.
13 * Copyright © 2014 Intel Corporation
14 * Daniel Vetter <daniel.vetter@ffwll.ch>
15 *
16 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
17 * Author: Gareth Hughes <gareth@valinux.com>
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a
20 * copy of this software and associated documentation files (the "Software"),
21 * to deal in the Software without restriction, including without limitation
22 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
23 * and/or sell copies of the Software, and to permit persons to whom the
24 * Software is furnished to do so, subject to the following conditions:
25 *
26 * The above copyright notice and this permission notice (including the next
27 * paragraph) shall be included in all copies or substantial portions of the
28 * Software.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
33 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
34 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
35 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
36 * OTHER DEALINGS IN THE SOFTWARE.
37 */
38
39 #ifdef __NetBSD__
40 #include <uvm/uvm.h>
41 #endif
42
43 #include <linux/types.h>
44 #include <linux/kref.h>
45 #include <linux/dma-resv.h>
46
47 #include <drm/drm_device.h>
48 #include <drm/drm_print.h>
49 #include <drm/drm_vma_manager.h>
50 #undef free
51
52 struct drm_gem_object;
53 struct xarray;
54
55 /**
56 * struct drm_gem_object_funcs - GEM object functions
57 */
58 struct drm_gem_object_funcs {
59 /**
60 * @free:
61 *
62 * Deconstructor for drm_gem_objects.
63 *
64 * This callback is mandatory.
65 */
66 void (*free)(struct drm_gem_object *obj);
67
68 /**
69 * @open:
70 *
71 * Called upon GEM handle creation.
72 *
73 * This callback is optional.
74 */
75 int (*open)(struct drm_gem_object *obj, struct drm_file *file);
76
77 /**
78 * @close:
79 *
80 * Called upon GEM handle release.
81 *
82 * This callback is optional.
83 */
84 void (*close)(struct drm_gem_object *obj, struct drm_file *file);
85
86 /**
87 * @print_info:
88 *
89 * If driver subclasses struct &drm_gem_object, it can implement this
90 * optional hook for printing additional driver specific info.
91 *
92 * drm_printf_indent() should be used in the callback passing it the
93 * indent argument.
94 *
95 * This callback is called from drm_gem_print_info().
96 *
97 * This callback is optional.
98 */
99 void (*print_info)(struct drm_printer *p, unsigned int indent,
100 const struct drm_gem_object *obj);
101
102 /**
103 * @export:
104 *
105 * Export backing buffer as a &dma_buf.
106 * If this is not set drm_gem_prime_export() is used.
107 *
108 * This callback is optional.
109 */
110 struct dma_buf *(*export)(struct drm_gem_object *obj, int flags);
111
112 /**
113 * @pin:
114 *
115 * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper.
116 *
117 * This callback is optional.
118 */
119 int (*pin)(struct drm_gem_object *obj);
120
121 /**
122 * @unpin:
123 *
124 * Unpin backing buffer. Used by the drm_gem_map_detach() helper.
125 *
126 * This callback is optional.
127 */
128 void (*unpin)(struct drm_gem_object *obj);
129
130 /**
131 * @get_sg_table:
132 *
133 * Returns a Scatter-Gather table representation of the buffer.
134 * Used when exporting a buffer by the drm_gem_map_dma_buf() helper.
135 * Releasing is done by calling dma_unmap_sg_attrs() and sg_free_table()
136 * in drm_gem_unmap_buf(), therefore these helpers and this callback
137 * here cannot be used for sg tables pointing at driver private memory
138 * ranges.
139 *
140 * See also drm_prime_pages_to_sg().
141 */
142 struct sg_table *(*get_sg_table)(struct drm_gem_object *obj);
143
144 /**
145 * @vmap:
146 *
147 * Returns a virtual address for the buffer. Used by the
148 * drm_gem_dmabuf_vmap() helper.
149 *
150 * This callback is optional.
151 */
152 void *(*vmap)(struct drm_gem_object *obj);
153
154 /**
155 * @vunmap:
156 *
157 * Releases the the address previously returned by @vmap. Used by the
158 * drm_gem_dmabuf_vunmap() helper.
159 *
160 * This callback is optional.
161 */
162 void (*vunmap)(struct drm_gem_object *obj, void *vaddr);
163
164 /**
165 * @mmap:
166 *
167 * Handle mmap() of the gem object, setup vma accordingly.
168 *
169 * This callback is optional.
170 *
171 * The callback is used by by both drm_gem_mmap_obj() and
172 * drm_gem_prime_mmap(). When @mmap is present @vm_ops is not
173 * used, the @mmap callback must set vma->vm_ops instead.
174 */
175 #ifdef __NetBSD__
176 int (*mmap)(struct drm_gem_object *, off_t *, size_t, int,
177 int *, int *, struct uvm_object **, int *);
178 #else
179 int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
180 #endif
181
182 /**
183 * @vm_ops:
184 *
185 * Virtual memory operations used with mmap.
186 *
187 * This is optional but necessary for mmap support.
188 */
189 const struct vm_operations_struct *vm_ops;
190 };
191
192 /**
193 * struct drm_gem_object - GEM buffer object
194 *
195 * This structure defines the generic parts for GEM buffer objects, which are
196 * mostly around handling mmap and userspace handles.
197 *
198 * Buffer objects are often abbreviated to BO.
199 */
200 struct drm_gem_object {
201 /**
202 * @refcount:
203 *
204 * Reference count of this object
205 *
206 * Please use drm_gem_object_get() to acquire and drm_gem_object_put()
207 * or drm_gem_object_put_unlocked() to release a reference to a GEM
208 * buffer object.
209 */
210 struct kref refcount;
211
212 /**
213 * @handle_count:
214 *
215 * This is the GEM file_priv handle count of this object.
216 *
217 * Each handle also holds a reference. Note that when the handle_count
218 * drops to 0 any global names (e.g. the id in the flink namespace) will
219 * be cleared.
220 *
221 * Protected by &drm_device.object_name_lock.
222 */
223 unsigned handle_count;
224
225 /**
226 * @dev: DRM dev this object belongs to.
227 */
228 struct drm_device *dev;
229
230 #ifdef __NetBSD__
231 /* UVM anonymous object for shared memory mappings. */
232 struct uvm_object *filp;
233
234 /* UVM object with custom pager ops for device memory mappings. */
235 struct uvm_object gemo_uvmobj;
236 #else
237 /**
238 * @filp:
239 *
240 * SHMEM file node used as backing storage for swappable buffer objects.
241 * GEM also supports driver private objects with driver-specific backing
242 * storage (contiguous CMA memory, special reserved blocks). In this
243 * case @filp is NULL.
244 */
245 struct file *filp;
246 #endif
247
248 /**
249 * @vma_node:
250 *
251 * Mapping info for this object to support mmap. Drivers are supposed to
252 * allocate the mmap offset using drm_gem_create_mmap_offset(). The
253 * offset itself can be retrieved using drm_vma_node_offset_addr().
254 *
255 * Memory mapping itself is handled by drm_gem_mmap(), which also checks
256 * that userspace is allowed to access the object.
257 */
258 struct drm_vma_offset_node vma_node;
259
260 /**
261 * @size:
262 *
263 * Size of the object, in bytes. Immutable over the object's
264 * lifetime.
265 */
266 size_t size;
267
268 /**
269 * @name:
270 *
271 * Global name for this object, starts at 1. 0 means unnamed.
272 * Access is covered by &drm_device.object_name_lock. This is used by
273 * the GEM_FLINK and GEM_OPEN ioctls.
274 */
275 int name;
276
277 /**
278 * @dma_buf:
279 *
280 * dma-buf associated with this GEM object.
281 *
282 * Pointer to the dma-buf associated with this gem object (either
283 * through importing or exporting). We break the resulting reference
284 * loop when the last gem handle for this object is released.
285 *
286 * Protected by &drm_device.object_name_lock.
287 */
288 struct dma_buf *dma_buf;
289
290 /**
291 * @import_attach:
292 *
293 * dma-buf attachment backing this object.
294 *
295 * Any foreign dma_buf imported as a gem object has this set to the
296 * attachment point for the device. This is invariant over the lifetime
297 * of a gem object.
298 *
299 * The &drm_driver.gem_free_object callback is responsible for cleaning
300 * up the dma_buf attachment and references acquired at import time.
301 *
302 * Note that the drm gem/prime core does not depend upon drivers setting
303 * this field any more. So for drivers where this doesn't make sense
304 * (e.g. virtual devices or a displaylink behind an usb bus) they can
305 * simply leave it as NULL.
306 */
307 struct dma_buf_attachment *import_attach;
308
309 /**
310 * @resv:
311 *
312 * Pointer to reservation object associated with the this GEM object.
313 *
314 * Normally (@resv == &@_resv) except for imported GEM objects.
315 */
316 struct dma_resv *resv;
317
318 /**
319 * @_resv:
320 *
321 * A reservation object for this GEM object.
322 *
323 * This is unused for imported GEM objects.
324 */
325 struct dma_resv _resv;
326
327 /**
328 * @funcs:
329 *
330 * Optional GEM object functions. If this is set, it will be used instead of the
331 * corresponding &drm_driver GEM callbacks.
332 *
333 * New drivers should use this.
334 *
335 */
336 const struct drm_gem_object_funcs *funcs;
337 };
338
339 /**
340 * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers
341 * @name: name for the generated structure
342 *
343 * This macro autogenerates a suitable &struct file_operations for GEM based
344 * drivers, which can be assigned to &drm_driver.fops. Note that this structure
345 * cannot be shared between drivers, because it contains a reference to the
346 * current module using THIS_MODULE.
347 *
348 * Note that the declaration is already marked as static - if you need a
349 * non-static version of this you're probably doing it wrong and will break the
350 * THIS_MODULE reference by accident.
351 */
352 #define DEFINE_DRM_GEM_FOPS(name) \
353 static const struct file_operations name = {\
354 .owner = THIS_MODULE,\
355 .open = drm_open,\
356 .release = drm_release,\
357 .unlocked_ioctl = drm_ioctl,\
358 .compat_ioctl = drm_compat_ioctl,\
359 .poll = drm_poll,\
360 .read = drm_read,\
361 .llseek = noop_llseek,\
362 .mmap = drm_gem_mmap,\
363 }
364
365 void drm_gem_object_release(struct drm_gem_object *obj);
366 void drm_gem_object_free(struct kref *kref);
367 int drm_gem_object_init(struct drm_device *dev,
368 struct drm_gem_object *obj, size_t size);
369 void drm_gem_private_object_init(struct drm_device *dev,
370 struct drm_gem_object *obj, size_t size);
371 #ifdef __NetBSD__
372 void drm_gem_pager_reference(struct uvm_object *);
373 void drm_gem_pager_detach(struct uvm_object *);
374 int drm_gem_mmap_object(struct drm_device *, off_t, size_t, int,
375 struct uvm_object **, voff_t *, struct file *);
376 int drm_gem_or_legacy_mmap_object(struct drm_device *, off_t, size_t, int,
377 struct uvm_object **, voff_t *, struct file *);
378 #else
379 void drm_gem_vm_open(struct vm_area_struct *vma);
380 void drm_gem_vm_close(struct vm_area_struct *vma);
381 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
382 struct vm_area_struct *vma);
383 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
384 #endif
385
386 /**
387 * drm_gem_object_get - acquire a GEM buffer object reference
388 * @obj: GEM buffer object
389 *
390 * This function acquires an additional reference to @obj. It is illegal to
391 * call this without already holding a reference. No locks required.
392 */
drm_gem_object_get(struct drm_gem_object * obj)393 static inline void drm_gem_object_get(struct drm_gem_object *obj)
394 {
395 kref_get(&obj->refcount);
396 }
397
398 /**
399 * __drm_gem_object_put - raw function to release a GEM buffer object reference
400 * @obj: GEM buffer object
401 *
402 * This function is meant to be used by drivers which are not encumbered with
403 * &drm_device.struct_mutex legacy locking and which are using the
404 * gem_free_object_unlocked callback. It avoids all the locking checks and
405 * locking overhead of drm_gem_object_put() and drm_gem_object_put_unlocked().
406 *
407 * Drivers should never call this directly in their code. Instead they should
408 * wrap it up into a ``driver_gem_object_put(struct driver_gem_object *obj)``
409 * wrapper function, and use that. Shared code should never call this, to
410 * avoid breaking drivers by accident which still depend upon
411 * &drm_device.struct_mutex locking.
412 */
413 static inline void
__drm_gem_object_put(struct drm_gem_object * obj)414 __drm_gem_object_put(struct drm_gem_object *obj)
415 {
416 kref_put(&obj->refcount, drm_gem_object_free);
417 }
418
419 void drm_gem_object_put_unlocked(struct drm_gem_object *obj);
420 void drm_gem_object_put(struct drm_gem_object *obj);
421
422 int drm_gem_handle_create(struct drm_file *file_priv,
423 struct drm_gem_object *obj,
424 u32 *handlep);
425 int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
426
427
428 void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
429 int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
430 int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
431
432 struct page **drm_gem_get_pages(struct drm_gem_object *obj);
433 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
434 bool dirty, bool accessed);
435
436 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
437 int count, struct drm_gem_object ***objs_out);
438 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
439 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
440 bool wait_all, unsigned long timeout);
441 int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
442 struct ww_acquire_ctx *acquire_ctx);
443 void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
444 struct ww_acquire_ctx *acquire_ctx);
445 int drm_gem_fence_array_add(struct xarray *fence_array,
446 struct dma_fence *fence);
447 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
448 struct drm_gem_object *obj,
449 bool write);
450 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
451 u32 handle, u64 *offset);
452 int drm_gem_dumb_destroy(struct drm_file *file,
453 struct drm_device *dev,
454 uint32_t handle);
455
456 #define free(addr, type) kern_free(addr)
457
458 #endif /* __DRM_GEM_H__ */
459