xref: /dflybsd-src/sys/dev/drm/drm_vm.c (revision 381fa6da48f61b44a92fbaf73b2f65e40c705e14)
1 /*-
2  * Copyright 2003 Eric Anholt
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * $FreeBSD: head/sys/dev/drm2/drm_vm.c 235783 2012-05-22 11:07:44Z kib $"
24  */
25 
26 /** @file drm_vm.c
27  * Support code for mmaping of DRM maps.
28  */
29 
30 #include <sys/conf.h>
31 #include <sys/mutex2.h>
32 #include <vm/vm_page.h>
33 #include <vm/vm_pager.h>
34 
35 #include <drm/drmP.h>
36 
37 int drm_mmap(struct dev_mmap_args *ap)
38 {
39 	struct cdev *kdev = ap->a_head.a_dev;
40 	vm_offset_t offset = ap->a_offset;
41 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
42 	struct drm_file *file_priv = NULL;
43 	struct drm_local_map *map = NULL;
44 	struct drm_map_list *r_list;
45 
46 	enum drm_map_type type;
47 	vm_paddr_t phys;
48 
49 	/* d_mmap gets called twice, we can only reference file_priv during
50 	 * the first call.  We need to assume that if error is EBADF the
51 	 * call was succesful and the client is authenticated.
52 	 */
53 	DRM_LOCK(dev);
54 	file_priv = drm_find_file_by_proc(dev, curthread);
55 	DRM_UNLOCK(dev);
56 
57 	if (!file_priv) {
58 		DRM_ERROR("Could not find authenticator!\n");
59 		return EINVAL;
60 	}
61 
62 	if (!file_priv->authenticated)
63 		return EACCES;
64 
65 	DRM_DEBUG("called with offset %016jx\n", (uintmax_t)offset);
66 	if (dev->dma && offset < ptoa(dev->dma->page_count)) {
67 		drm_device_dma_t *dma = dev->dma;
68 
69 		spin_lock(&dev->dma_lock);
70 
71 		if (dma->pagelist != NULL) {
72 			unsigned long page = offset >> PAGE_SHIFT;
73 			unsigned long phys = dma->pagelist[page];
74 
75 			spin_unlock(&dev->dma_lock);
76 			// XXX *paddr = phys;
77 			ap->a_result = phys;
78 			return 0;
79 		} else {
80 			spin_unlock(&dev->dma_lock);
81 			return -1;
82 		}
83 	}
84 
85 	/* A sequential search of a linked list is
86 	   fine here because: 1) there will only be
87 	   about 5-10 entries in the list and, 2) a
88 	   DRI client only has to do this mapping
89 	   once, so it doesn't have to be optimized
90 	   for performance, even if the list was a
91 	   bit longer.
92 	*/
93 	DRM_LOCK(dev);
94 	list_for_each_entry(r_list, &dev->maplist, head) {
95 		if (r_list->map && r_list->map->offset >> DRM_MAP_HANDLE_SHIFT ==
96 		    (unsigned long)r_list->map->handle >> DRM_MAP_HANDLE_SHIFT) {
97 			map = r_list->map;
98 			break;
99 		}
100 	}
101 
102 	if (map == NULL) {
103 		DRM_DEBUG("Can't find map, request offset = %016jx\n",
104 		    (uintmax_t)offset);
105 		list_for_each_entry(r_list, &dev->maplist, head) {
106 			DRM_DEBUG("map offset = %016lx, handle = %016lx\n",
107 			    r_list->map->offset, (unsigned long)r_list->map->handle);
108 		}
109 		DRM_UNLOCK(dev);
110 		return -1;
111 	}
112 	if (((map->flags & _DRM_RESTRICTED) && !DRM_SUSER(DRM_CURPROC))) {
113 		DRM_UNLOCK(dev);
114 		DRM_DEBUG("restricted map\n");
115 		return -1;
116 	}
117 	type = map->type;
118 	DRM_UNLOCK(dev);
119 
120 	offset = offset & ((1ULL << DRM_MAP_HANDLE_SHIFT) - 1);
121 
122 	switch (type) {
123 	case _DRM_FRAME_BUFFER:
124 	case _DRM_AGP:
125 #if 0	/* XXX */
126 		*memattr = VM_MEMATTR_WRITE_COMBINING;
127 #endif
128 		/* FALLTHROUGH */
129 	case _DRM_REGISTERS:
130 		phys = map->offset + offset;
131 		break;
132 	case _DRM_SCATTER_GATHER:
133 #if 0	/* XXX */
134 		*memattr = VM_MEMATTR_WRITE_COMBINING;
135 #endif
136 		/* FALLTHROUGH */
137 	case _DRM_CONSISTENT:
138 	case _DRM_SHM:
139 		phys = vtophys((char *)map->virtual + offset);
140 		break;
141 	default:
142 		DRM_ERROR("bad map type %d\n", type);
143 		return -1;	/* This should never happen. */
144 	}
145 
146 	ap->a_result = atop(phys);
147 	return 0;
148 }
149 
150 /* XXX The following is just temporary hack to replace the
151  * vm_phys_fictitious functions available on FreeBSD
152  */
153 #define VM_PHYS_FICTITIOUS_NSEGS        8
154 static struct vm_phys_fictitious_seg {
155         vm_paddr_t      start;
156         vm_paddr_t      end;
157         vm_page_t       first_page;
158 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS];
159 static struct mtx vm_phys_fictitious_reg_mtx = MTX_INITIALIZER;
160 
161 MALLOC_DEFINE(M_FICT_PAGES, "", "");
162 
163 vm_page_t
164 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
165 {
166         struct vm_phys_fictitious_seg *seg;
167         vm_page_t m;
168         int segind;
169 
170         m = NULL;
171         for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
172                 seg = &vm_phys_fictitious_segs[segind];
173                 if (pa >= seg->start && pa < seg->end) {
174                         m = &seg->first_page[atop(pa - seg->start)];
175                         KASSERT((m->flags & PG_FICTITIOUS) != 0,
176                             ("%p not fictitious", m));
177                         break;
178                 }
179         }
180         return (m);
181 }
182 
183 int
184 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
185     vm_memattr_t memattr)
186 {
187         struct vm_phys_fictitious_seg *seg;
188         vm_page_t fp;
189         long i, page_count;
190         int segind;
191 
192         page_count = (end - start) / PAGE_SIZE;
193 
194         fp = kmalloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
195                     M_WAITOK | M_ZERO);
196 
197         for (i = 0; i < page_count; i++) {
198 		vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr);
199 		fp[i].flags &= ~(PG_BUSY | PG_UNMANAGED);
200         }
201         mtx_lock(&vm_phys_fictitious_reg_mtx);
202         for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
203                 seg = &vm_phys_fictitious_segs[segind];
204                 if (seg->start == 0 && seg->end == 0) {
205                         seg->start = start;
206                         seg->end = end;
207                         seg->first_page = fp;
208                         mtx_unlock(&vm_phys_fictitious_reg_mtx);
209                         return (0);
210                 }
211         }
212         mtx_unlock(&vm_phys_fictitious_reg_mtx);
213         kfree(fp, M_FICT_PAGES);
214         return (EBUSY);
215 }
216 
217 void
218 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
219 {
220 	struct vm_phys_fictitious_seg *seg;
221 	vm_page_t fp;
222 	int segind;
223 
224 	mtx_lock(&vm_phys_fictitious_reg_mtx);
225 	for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
226 		seg = &vm_phys_fictitious_segs[segind];
227 		if (seg->start == start && seg->end == end) {
228 			seg->start = seg->end = 0;
229 			fp = seg->first_page;
230 			seg->first_page = NULL;
231 			mtx_unlock(&vm_phys_fictitious_reg_mtx);
232 			kfree(fp, M_FICT_PAGES);
233 			return;
234 		}
235 	}
236 	mtx_unlock(&vm_phys_fictitious_reg_mtx);
237 	KASSERT(0, ("Unregistering not registered fictitious range"));
238 }
239