xref: /netbsd-src/sys/external/bsd/drm2/dist/include/drm/drm_gem.h (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: drm_gem.h,v 1.5 2018/08/27 15:22:54 riastradh Exp $	*/
2 
3 #ifndef __DRM_GEM_H__
4 #define __DRM_GEM_H__
5 
6 /*
7  * GEM Graphics Execution Manager Driver Interfaces
8  *
9  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
10  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
11  * Copyright (c) 2009-2010, Code Aurora Forum.
12  * All rights reserved.
13  * Copyright © 2014 Intel Corporation
14  *   Daniel Vetter <daniel.vetter@ffwll.ch>
15  *
16  * Author: Rickard E. (Rik) Faith <faith@valinux.com>
17  * Author: Gareth Hughes <gareth@valinux.com>
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a
20  * copy of this software and associated documentation files (the "Software"),
21  * to deal in the Software without restriction, including without limitation
22  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
23  * and/or sell copies of the Software, and to permit persons to whom the
24  * Software is furnished to do so, subject to the following conditions:
25  *
26  * The above copyright notice and this permission notice (including the next
27  * paragraph) shall be included in all copies or substantial portions of the
28  * Software.
29  *
30  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
33  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
34  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
35  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
36  * OTHER DEALINGS IN THE SOFTWARE.
37  */
38 
39 #ifdef __NetBSD__
40 #include <uvm/uvm.h>
41 #endif
42 
43 /**
44  * This structure defines the drm_mm memory object, which will be used by the
45  * DRM for its buffer objects.
46  */
47 struct drm_gem_object {
48 	/** Reference count of this object */
49 	struct kref refcount;
50 
51 	/**
52 	 * handle_count - gem file_priv handle count of this object
53 	 *
54 	 * Each handle also holds a reference. Note that when the handle_count
55 	 * drops to 0 any global names (e.g. the id in the flink namespace) will
56 	 * be cleared.
57 	 *
58 	 * Protected by dev->object_name_lock.
59 	 * */
60 	unsigned handle_count;
61 
62 	/** Related drm device */
63 	struct drm_device *dev;
64 
65 #ifdef __NetBSD__
66 	/* UVM anonymous object for shared memory mappings.  */
67 	struct uvm_object *filp;
68 
69 	/* UVM object with custom pager ops for device memory mappings.  */
70 	struct uvm_object gemo_uvmobj;
71 #else
72 	/** File representing the shmem storage */
73 	struct file *filp;
74 #endif
75 
76 	/* Mapping info for this object */
77 	struct drm_vma_offset_node vma_node;
78 
79 	/**
80 	 * Size of the object, in bytes.  Immutable over the object's
81 	 * lifetime.
82 	 */
83 	size_t size;
84 
85 	/**
86 	 * Global name for this object, starts at 1. 0 means unnamed.
87 	 * Access is covered by the object_name_lock in the related drm_device
88 	 */
89 	int name;
90 
91 	/**
92 	 * Memory domains. These monitor which caches contain read/write data
93 	 * related to the object. When transitioning from one set of domains
94 	 * to another, the driver is called to ensure that caches are suitably
95 	 * flushed and invalidated
96 	 */
97 	uint32_t read_domains;
98 	uint32_t write_domain;
99 
100 	/**
101 	 * While validating an exec operation, the
102 	 * new read/write domain values are computed here.
103 	 * They will be transferred to the above values
104 	 * at the point that any cache flushing occurs
105 	 */
106 	uint32_t pending_read_domains;
107 	uint32_t pending_write_domain;
108 
109 	/**
110 	 * dma_buf - dma buf associated with this GEM object
111 	 *
112 	 * Pointer to the dma-buf associated with this gem object (either
113 	 * through importing or exporting). We break the resulting reference
114 	 * loop when the last gem handle for this object is released.
115 	 *
116 	 * Protected by obj->object_name_lock
117 	 */
118 	struct dma_buf *dma_buf;
119 
120 	/**
121 	 * import_attach - dma buf attachment backing this object
122 	 *
123 	 * Any foreign dma_buf imported as a gem object has this set to the
124 	 * attachment point for the device. This is invariant over the lifetime
125 	 * of a gem object.
126 	 *
127 	 * The driver's ->gem_free_object callback is responsible for cleaning
128 	 * up the dma_buf attachment and references acquired at import time.
129 	 *
130 	 * Note that the drm gem/prime core does not depend upon drivers setting
131 	 * this field any more. So for drivers where this doesn't make sense
132 	 * (e.g. virtual devices or a displaylink behind an usb bus) they can
133 	 * simply leave it as NULL.
134 	 */
135 	struct dma_buf_attachment *import_attach;
136 };
137 
138 void drm_gem_object_release(struct drm_gem_object *obj);
139 void drm_gem_object_free(struct kref *kref);
140 int drm_gem_object_init(struct drm_device *dev,
141 			struct drm_gem_object *obj, size_t size);
142 void drm_gem_private_object_init(struct drm_device *dev,
143 				 struct drm_gem_object *obj, size_t size);
144 #ifdef __NetBSD__
145 void drm_gem_pager_reference(struct uvm_object *);
146 void drm_gem_pager_detach(struct uvm_object *);
147 int drm_gem_mmap_object(struct drm_device *, off_t, size_t, int,
148     struct uvm_object **, voff_t *, struct file *);
149 int drm_gem_or_legacy_mmap_object(struct drm_device *, off_t, size_t, int,
150     struct uvm_object **, voff_t *, struct file *);
151 #else
152 void drm_gem_vm_open(struct vm_area_struct *vma);
153 void drm_gem_vm_close(struct vm_area_struct *vma);
154 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
155 		     struct vm_area_struct *vma);
156 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
157 #endif
158 
159 static inline void
160 drm_gem_object_reference(struct drm_gem_object *obj)
161 {
162 	kref_get(&obj->refcount);
163 }
164 
165 static inline void
166 drm_gem_object_unreference(struct drm_gem_object *obj)
167 {
168 	if (obj != NULL) {
169 		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
170 
171 		kref_put(&obj->refcount, drm_gem_object_free);
172 	}
173 }
174 
175 static inline void
176 drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
177 {
178 	struct drm_device *dev;
179 
180 	if (!obj)
181 		return;
182 
183 	dev = obj->dev;
184 	if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex))
185 		mutex_unlock(&dev->struct_mutex);
186 	else
187 		might_lock(&dev->struct_mutex);
188 }
189 
190 int drm_gem_handle_create(struct drm_file *file_priv,
191 			  struct drm_gem_object *obj,
192 			  u32 *handlep);
193 int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
194 
195 
196 void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
197 int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
198 int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
199 
200 struct page **drm_gem_get_pages(struct drm_gem_object *obj);
201 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
202 		bool dirty, bool accessed);
203 
204 struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
205 					     struct drm_file *filp,
206 					     u32 handle);
207 int drm_gem_dumb_destroy(struct drm_file *file,
208 			 struct drm_device *dev,
209 			 uint32_t handle);
210 
211 #endif /* __DRM_GEM_H__ */
212