xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_prime.c (revision 122b5006ee1bd67145794b4cde92f4fe4781a5ec)
1 /*	$NetBSD: vmwgfx_prime.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $	*/
2 
3 /**************************************************************************
4  *
5  * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  **************************************************************************/
29 /*
30  * Authors:
31  *     Thomas Hellstrom <thellstrom@vmware.com>
32  *
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_prime.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $");
37 
38 #include "vmwgfx_drv.h"
39 #include <linux/dma-buf.h>
40 #include <drm/ttm/ttm_object.h>
41 
42 /*
43  * DMA-BUF attach- and mapping methods. No need to implement
44  * these until we have other virtual devices use them.
45  */
46 
47 static int vmw_prime_map_attach(struct dma_buf *dma_buf,
48 				struct device *target_dev,
49 				struct dma_buf_attachment *attach)
50 {
51 	return -ENOSYS;
52 }
53 
54 static void vmw_prime_map_detach(struct dma_buf *dma_buf,
55 				 struct dma_buf_attachment *attach)
56 {
57 }
58 
59 static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach,
60 					      enum dma_data_direction dir)
61 {
62 	return ERR_PTR(-ENOSYS);
63 }
64 
65 static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
66 				    struct sg_table *sgb,
67 				    enum dma_data_direction dir)
68 {
69 }
70 
71 static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
72 {
73 	return NULL;
74 }
75 
76 static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
77 {
78 }
79 
80 static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
81 		unsigned long page_num)
82 {
83 	return NULL;
84 }
85 
86 static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
87 		unsigned long page_num, void *addr)
88 {
89 
90 }
91 static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
92 		unsigned long page_num)
93 {
94 	return NULL;
95 }
96 
97 static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
98 		unsigned long page_num, void *addr)
99 {
100 
101 }
102 
103 static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
104 				 struct vm_area_struct *vma)
105 {
106 	WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
107 	return -ENOSYS;
108 }
109 
110 const struct dma_buf_ops vmw_prime_dmabuf_ops =  {
111 	.attach = vmw_prime_map_attach,
112 	.detach = vmw_prime_map_detach,
113 	.map_dma_buf = vmw_prime_map_dma_buf,
114 	.unmap_dma_buf = vmw_prime_unmap_dma_buf,
115 	.release = NULL,
116 	.kmap = vmw_prime_dmabuf_kmap,
117 	.kmap_atomic = vmw_prime_dmabuf_kmap_atomic,
118 	.kunmap = vmw_prime_dmabuf_kunmap,
119 	.kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
120 	.mmap = vmw_prime_dmabuf_mmap,
121 	.vmap = vmw_prime_dmabuf_vmap,
122 	.vunmap = vmw_prime_dmabuf_vunmap,
123 };
124 
125 int vmw_prime_fd_to_handle(struct drm_device *dev,
126 			   struct drm_file *file_priv,
127 			   int fd, u32 *handle)
128 {
129 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
130 
131 	return ttm_prime_fd_to_handle(tfile, fd, handle);
132 }
133 
134 int vmw_prime_handle_to_fd(struct drm_device *dev,
135 			   struct drm_file *file_priv,
136 			   uint32_t handle, uint32_t flags,
137 			   int *prime_fd)
138 {
139 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
140 
141 	return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
142 }
143