xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/drm_vm.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: drm_vm.c,v 1.2 2018/08/27 04:58:19 riastradh Exp $	*/
2 
3 /**
4  * \file drm_vm.c
5  * Memory mapping for DRM
6  *
7  * \author Rickard E. (Rik) Faith <faith@valinux.com>
8  * \author Gareth Hughes <gareth@valinux.com>
9  */
10 
11 /*
12  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
13  *
14  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
15  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
16  * All Rights Reserved.
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a
19  * copy of this software and associated documentation files (the "Software"),
20  * to deal in the Software without restriction, including without limitation
21  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
22  * and/or sell copies of the Software, and to permit persons to whom the
23  * Software is furnished to do so, subject to the following conditions:
24  *
25  * The above copyright notice and this permission notice (including the next
26  * paragraph) shall be included in all copies or substantial portions of the
27  * Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
32  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
33  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
34  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
35  * OTHER DEALINGS IN THE SOFTWARE.
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: drm_vm.c,v 1.2 2018/08/27 04:58:19 riastradh Exp $");
40 
41 #include <drm/drmP.h>
42 #include <linux/export.h>
43 #include <linux/seq_file.h>
44 #if defined(__ia64__)
45 #include <linux/efi.h>
46 #include <linux/slab.h>
47 #endif
48 #include <asm/pgtable.h>
49 #include "drm_internal.h"
50 #include "drm_legacy.h"
51 
52 struct drm_vma_entry {
53 	struct list_head head;
54 	struct vm_area_struct *vma;
55 	pid_t pid;
56 };
57 
58 static void drm_vm_open(struct vm_area_struct *vma);
59 static void drm_vm_close(struct vm_area_struct *vma);
60 
61 static pgprot_t drm_io_prot(struct drm_local_map *map,
62 			    struct vm_area_struct *vma)
63 {
64 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
65 
66 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
67 	if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
68 		tmp = pgprot_noncached(tmp);
69 	else
70 		tmp = pgprot_writecombine(tmp);
71 #elif defined(__ia64__)
72 	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
73 				    vma->vm_start))
74 		tmp = pgprot_writecombine(tmp);
75 	else
76 		tmp = pgprot_noncached(tmp);
77 #elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
78 	tmp = pgprot_noncached(tmp);
79 #endif
80 	return tmp;
81 }
82 
83 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
84 {
85 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
86 
87 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
88 	tmp |= _PAGE_NO_CACHE;
89 #endif
90 	return tmp;
91 }
92 
93 /**
94  * \c fault method for AGP virtual memory.
95  *
96  * \param vma virtual memory area.
97  * \param address access address.
98  * \return pointer to the page structure.
99  *
100  * Find the right map and if it's AGP memory find the real physical page to
101  * map, get the page, increment the use count and return it.
102  */
103 #if IS_ENABLED(CONFIG_AGP)
104 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
105 {
106 	struct drm_file *priv = vma->vm_file->private_data;
107 	struct drm_device *dev = priv->minor->dev;
108 	struct drm_local_map *map = NULL;
109 	struct drm_map_list *r_list;
110 	struct drm_hash_item *hash;
111 
112 	/*
113 	 * Find the right map
114 	 */
115 	if (!dev->agp)
116 		goto vm_fault_error;
117 
118 	if (!dev->agp || !dev->agp->cant_use_aperture)
119 		goto vm_fault_error;
120 
121 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
122 		goto vm_fault_error;
123 
124 	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
125 	map = r_list->map;
126 
127 	if (map && map->type == _DRM_AGP) {
128 		/*
129 		 * Using vm_pgoff as a selector forces us to use this unusual
130 		 * addressing scheme.
131 		 */
132 		resource_size_t offset = (unsigned long)vmf->virtual_address -
133 			vma->vm_start;
134 		resource_size_t baddr = map->offset + offset;
135 		struct drm_agp_mem *agpmem;
136 		struct page *page;
137 
138 #ifdef __alpha__
139 		/*
140 		 * Adjust to a bus-relative address
141 		 */
142 		baddr -= dev->hose->mem_space->start;
143 #endif
144 
145 		/*
146 		 * It's AGP memory - find the real physical page to map
147 		 */
148 		list_for_each_entry(agpmem, &dev->agp->memory, head) {
149 			if (agpmem->bound <= baddr &&
150 			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
151 				break;
152 		}
153 
154 		if (&agpmem->head == &dev->agp->memory)
155 			goto vm_fault_error;
156 
157 		/*
158 		 * Get the page, inc the use count, and return it
159 		 */
160 		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
161 		page = agpmem->memory->pages[offset];
162 		get_page(page);
163 		vmf->page = page;
164 
165 		DRM_DEBUG
166 		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
167 		     (unsigned long long)baddr,
168 		     agpmem->memory->pages[offset],
169 		     (unsigned long long)offset,
170 		     page_count(page));
171 		return 0;
172 	}
173 vm_fault_error:
174 	return VM_FAULT_SIGBUS;	/* Disallow mremap */
175 }
176 #else
177 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
178 {
179 	return VM_FAULT_SIGBUS;
180 }
181 #endif
182 
183 /**
184  * \c nopage method for shared virtual memory.
185  *
186  * \param vma virtual memory area.
187  * \param address access address.
188  * \return pointer to the page structure.
189  *
190  * Get the mapping, find the real physical page to map, get the page, and
191  * return it.
192  */
193 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
194 {
195 	struct drm_local_map *map = vma->vm_private_data;
196 	unsigned long offset;
197 	unsigned long i;
198 	struct page *page;
199 
200 	if (!map)
201 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
202 
203 	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
204 	i = (unsigned long)map->handle + offset;
205 	page = vmalloc_to_page((void *)i);
206 	if (!page)
207 		return VM_FAULT_SIGBUS;
208 	get_page(page);
209 	vmf->page = page;
210 
211 	DRM_DEBUG("shm_fault 0x%lx\n", offset);
212 	return 0;
213 }
214 
215 /**
216  * \c close method for shared virtual memory.
217  *
218  * \param vma virtual memory area.
219  *
220  * Deletes map information if we are the last
221  * person to close a mapping and it's not in the global maplist.
222  */
223 static void drm_vm_shm_close(struct vm_area_struct *vma)
224 {
225 	struct drm_file *priv = vma->vm_file->private_data;
226 	struct drm_device *dev = priv->minor->dev;
227 	struct drm_vma_entry *pt, *temp;
228 	struct drm_local_map *map;
229 	struct drm_map_list *r_list;
230 	int found_maps = 0;
231 
232 	DRM_DEBUG("0x%08lx,0x%08lx\n",
233 		  vma->vm_start, vma->vm_end - vma->vm_start);
234 
235 	map = vma->vm_private_data;
236 
237 	mutex_lock(&dev->struct_mutex);
238 	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
239 		if (pt->vma->vm_private_data == map)
240 			found_maps++;
241 		if (pt->vma == vma) {
242 			list_del(&pt->head);
243 			kfree(pt);
244 		}
245 	}
246 
247 	/* We were the only map that was found */
248 	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
249 		/* Check to see if we are in the maplist, if we are not, then
250 		 * we delete this mappings information.
251 		 */
252 		found_maps = 0;
253 		list_for_each_entry(r_list, &dev->maplist, head) {
254 			if (r_list->map == map)
255 				found_maps++;
256 		}
257 
258 		if (!found_maps) {
259 			drm_dma_handle_t dmah;
260 
261 			switch (map->type) {
262 			case _DRM_REGISTERS:
263 			case _DRM_FRAME_BUFFER:
264 				arch_phys_wc_del(map->mtrr);
265 				iounmap(map->handle);
266 				break;
267 			case _DRM_SHM:
268 				vfree(map->handle);
269 				break;
270 			case _DRM_AGP:
271 			case _DRM_SCATTER_GATHER:
272 				break;
273 			case _DRM_CONSISTENT:
274 				dmah.vaddr = map->handle;
275 				dmah.busaddr = map->offset;
276 				dmah.size = map->size;
277 				__drm_legacy_pci_free(dev, &dmah);
278 				break;
279 			}
280 			kfree(map);
281 		}
282 	}
283 	mutex_unlock(&dev->struct_mutex);
284 }
285 
286 /**
287  * \c fault method for DMA virtual memory.
288  *
289  * \param vma virtual memory area.
290  * \param address access address.
291  * \return pointer to the page structure.
292  *
293  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
294  */
295 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
296 {
297 	struct drm_file *priv = vma->vm_file->private_data;
298 	struct drm_device *dev = priv->minor->dev;
299 	struct drm_device_dma *dma = dev->dma;
300 	unsigned long offset;
301 	unsigned long page_nr;
302 	struct page *page;
303 
304 	if (!dma)
305 		return VM_FAULT_SIGBUS;	/* Error */
306 	if (!dma->pagelist)
307 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
308 
309 	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
310 	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
311 	page = virt_to_page((void *)dma->pagelist[page_nr]);
312 
313 	get_page(page);
314 	vmf->page = page;
315 
316 	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
317 	return 0;
318 }
319 
320 /**
321  * \c fault method for scatter-gather virtual memory.
322  *
323  * \param vma virtual memory area.
324  * \param address access address.
325  * \return pointer to the page structure.
326  *
327  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
328  */
329 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
330 {
331 	struct drm_local_map *map = vma->vm_private_data;
332 	struct drm_file *priv = vma->vm_file->private_data;
333 	struct drm_device *dev = priv->minor->dev;
334 	struct drm_sg_mem *entry = dev->sg;
335 	unsigned long offset;
336 	unsigned long map_offset;
337 	unsigned long page_offset;
338 	struct page *page;
339 
340 	if (!entry)
341 		return VM_FAULT_SIGBUS;	/* Error */
342 	if (!entry->pagelist)
343 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
344 
345 	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
346 	map_offset = map->offset - (unsigned long)dev->sg->virtual;
347 	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
348 	page = entry->pagelist[page_offset];
349 	get_page(page);
350 	vmf->page = page;
351 
352 	return 0;
353 }
354 
355 static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
356 {
357 	return drm_do_vm_fault(vma, vmf);
358 }
359 
360 static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
361 {
362 	return drm_do_vm_shm_fault(vma, vmf);
363 }
364 
365 static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
366 {
367 	return drm_do_vm_dma_fault(vma, vmf);
368 }
369 
370 static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
371 {
372 	return drm_do_vm_sg_fault(vma, vmf);
373 }
374 
375 /** AGP virtual memory operations */
376 static const struct vm_operations_struct drm_vm_ops = {
377 	.fault = drm_vm_fault,
378 	.open = drm_vm_open,
379 	.close = drm_vm_close,
380 };
381 
382 /** Shared virtual memory operations */
383 static const struct vm_operations_struct drm_vm_shm_ops = {
384 	.fault = drm_vm_shm_fault,
385 	.open = drm_vm_open,
386 	.close = drm_vm_shm_close,
387 };
388 
389 /** DMA virtual memory operations */
390 static const struct vm_operations_struct drm_vm_dma_ops = {
391 	.fault = drm_vm_dma_fault,
392 	.open = drm_vm_open,
393 	.close = drm_vm_close,
394 };
395 
396 /** Scatter-gather virtual memory operations */
397 static const struct vm_operations_struct drm_vm_sg_ops = {
398 	.fault = drm_vm_sg_fault,
399 	.open = drm_vm_open,
400 	.close = drm_vm_close,
401 };
402 
403 /**
404  * \c open method for shared virtual memory.
405  *
406  * \param vma virtual memory area.
407  *
408  * Create a new drm_vma_entry structure as the \p vma private data entry and
409  * add it to drm_device::vmalist.
410  */
411 void drm_vm_open_locked(struct drm_device *dev,
412 		struct vm_area_struct *vma)
413 {
414 	struct drm_vma_entry *vma_entry;
415 
416 	DRM_DEBUG("0x%08lx,0x%08lx\n",
417 		  vma->vm_start, vma->vm_end - vma->vm_start);
418 
419 	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
420 	if (vma_entry) {
421 		vma_entry->vma = vma;
422 		vma_entry->pid = current->pid;
423 		list_add(&vma_entry->head, &dev->vmalist);
424 	}
425 }
426 
427 static void drm_vm_open(struct vm_area_struct *vma)
428 {
429 	struct drm_file *priv = vma->vm_file->private_data;
430 	struct drm_device *dev = priv->minor->dev;
431 
432 	mutex_lock(&dev->struct_mutex);
433 	drm_vm_open_locked(dev, vma);
434 	mutex_unlock(&dev->struct_mutex);
435 }
436 
437 void drm_vm_close_locked(struct drm_device *dev,
438 		struct vm_area_struct *vma)
439 {
440 	struct drm_vma_entry *pt, *temp;
441 
442 	DRM_DEBUG("0x%08lx,0x%08lx\n",
443 		  vma->vm_start, vma->vm_end - vma->vm_start);
444 
445 	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
446 		if (pt->vma == vma) {
447 			list_del(&pt->head);
448 			kfree(pt);
449 			break;
450 		}
451 	}
452 }
453 
454 /**
455  * \c close method for all virtual memory types.
456  *
457  * \param vma virtual memory area.
458  *
459  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
460  * free it.
461  */
462 static void drm_vm_close(struct vm_area_struct *vma)
463 {
464 	struct drm_file *priv = vma->vm_file->private_data;
465 	struct drm_device *dev = priv->minor->dev;
466 
467 	mutex_lock(&dev->struct_mutex);
468 	drm_vm_close_locked(dev, vma);
469 	mutex_unlock(&dev->struct_mutex);
470 }
471 
472 /**
473  * mmap DMA memory.
474  *
475  * \param file_priv DRM file private.
476  * \param vma virtual memory area.
477  * \return zero on success or a negative number on failure.
478  *
479  * Sets the virtual memory area operations structure to vm_dma_ops, the file
480  * pointer, and calls vm_open().
481  */
482 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
483 {
484 	struct drm_file *priv = filp->private_data;
485 	struct drm_device *dev;
486 	struct drm_device_dma *dma;
487 	unsigned long length = vma->vm_end - vma->vm_start;
488 
489 	dev = priv->minor->dev;
490 	dma = dev->dma;
491 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
492 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
493 
494 	/* Length must match exact page count */
495 	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
496 		return -EINVAL;
497 	}
498 
499 	if (!capable(CAP_SYS_ADMIN) &&
500 	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
501 		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
502 #if defined(__i386__) || defined(__x86_64__)
503 		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
504 #else
505 		/* Ye gads this is ugly.  With more thought
506 		   we could move this up higher and use
507 		   `protection_map' instead.  */
508 		vma->vm_page_prot =
509 		    __pgprot(pte_val
510 			     (pte_wrprotect
511 			      (__pte(pgprot_val(vma->vm_page_prot)))));
512 #endif
513 	}
514 
515 	vma->vm_ops = &drm_vm_dma_ops;
516 
517 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
518 
519 	drm_vm_open_locked(dev, vma);
520 	return 0;
521 }
522 
523 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
524 {
525 #ifdef __alpha__
526 	return dev->hose->dense_mem_base;
527 #else
528 	return 0;
529 #endif
530 }
531 
532 /**
533  * mmap DMA memory.
534  *
535  * \param file_priv DRM file private.
536  * \param vma virtual memory area.
537  * \return zero on success or a negative number on failure.
538  *
539  * If the virtual memory area has no offset associated with it then it's a DMA
540  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
541  * checks that the restricted flag is not set, sets the virtual memory operations
542  * according to the mapping type and remaps the pages. Finally sets the file
543  * pointer and calls vm_open().
544  */
545 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
546 {
547 	struct drm_file *priv = filp->private_data;
548 	struct drm_device *dev = priv->minor->dev;
549 	struct drm_local_map *map = NULL;
550 	resource_size_t offset = 0;
551 	struct drm_hash_item *hash;
552 
553 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
554 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
555 
556 	if (!priv->authenticated)
557 		return -EACCES;
558 
559 	/* We check for "dma". On Apple's UniNorth, it's valid to have
560 	 * the AGP mapped at physical address 0
561 	 * --BenH.
562 	 */
563 	if (!vma->vm_pgoff
564 #if IS_ENABLED(CONFIG_AGP)
565 	    && (!dev->agp
566 		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
567 #endif
568 	    )
569 		return drm_mmap_dma(filp, vma);
570 
571 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
572 		DRM_ERROR("Could not find map\n");
573 		return -EINVAL;
574 	}
575 
576 	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
577 	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
578 		return -EPERM;
579 
580 	/* Check for valid size. */
581 	if (map->size < vma->vm_end - vma->vm_start)
582 		return -EINVAL;
583 
584 	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
585 		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
586 #if defined(__i386__) || defined(__x86_64__)
587 		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
588 #else
589 		/* Ye gads this is ugly.  With more thought
590 		   we could move this up higher and use
591 		   `protection_map' instead.  */
592 		vma->vm_page_prot =
593 		    __pgprot(pte_val
594 			     (pte_wrprotect
595 			      (__pte(pgprot_val(vma->vm_page_prot)))));
596 #endif
597 	}
598 
599 	switch (map->type) {
600 #if !defined(__arm__)
601 	case _DRM_AGP:
602 		if (dev->agp && dev->agp->cant_use_aperture) {
603 			/*
604 			 * On some platforms we can't talk to bus dma address from the CPU, so for
605 			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
606 			 * pages and mappings in fault()
607 			 */
608 #if defined(__powerpc__)
609 			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
610 #endif
611 			vma->vm_ops = &drm_vm_ops;
612 			break;
613 		}
614 		/* fall through to _DRM_FRAME_BUFFER... */
615 #endif
616 	case _DRM_FRAME_BUFFER:
617 	case _DRM_REGISTERS:
618 		offset = drm_core_get_reg_ofs(dev);
619 		vma->vm_page_prot = drm_io_prot(map, vma);
620 		if (io_remap_pfn_range(vma, vma->vm_start,
621 				       (map->offset + offset) >> PAGE_SHIFT,
622 				       vma->vm_end - vma->vm_start,
623 				       vma->vm_page_prot))
624 			return -EAGAIN;
625 		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
626 			  " offset = 0x%llx\n",
627 			  map->type,
628 			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
629 
630 		vma->vm_ops = &drm_vm_ops;
631 		break;
632 	case _DRM_CONSISTENT:
633 		/* Consistent memory is really like shared memory. But
634 		 * it's allocated in a different way, so avoid fault */
635 		if (remap_pfn_range(vma, vma->vm_start,
636 		    page_to_pfn(virt_to_page(map->handle)),
637 		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
638 			return -EAGAIN;
639 		vma->vm_page_prot = drm_dma_prot(map->type, vma);
640 	/* fall through to _DRM_SHM */
641 	case _DRM_SHM:
642 		vma->vm_ops = &drm_vm_shm_ops;
643 		vma->vm_private_data = (void *)map;
644 		break;
645 	case _DRM_SCATTER_GATHER:
646 		vma->vm_ops = &drm_vm_sg_ops;
647 		vma->vm_private_data = (void *)map;
648 		vma->vm_page_prot = drm_dma_prot(map->type, vma);
649 		break;
650 	default:
651 		return -EINVAL;	/* This should never happen. */
652 	}
653 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
654 
655 	drm_vm_open_locked(dev, vma);
656 	return 0;
657 }
658 
659 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
660 {
661 	struct drm_file *priv = filp->private_data;
662 	struct drm_device *dev = priv->minor->dev;
663 	int ret;
664 
665 	if (drm_device_is_unplugged(dev))
666 		return -ENODEV;
667 
668 	mutex_lock(&dev->struct_mutex);
669 	ret = drm_mmap_locked(filp, vma);
670 	mutex_unlock(&dev->struct_mutex);
671 
672 	return ret;
673 }
674 EXPORT_SYMBOL(drm_legacy_mmap);
675 
676 void drm_legacy_vma_flush(struct drm_device *dev)
677 {
678 	struct drm_vma_entry *vma, *vma_temp;
679 
680 	/* Clear vma list (only needed for legacy drivers) */
681 	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
682 		list_del(&vma->head);
683 		kfree(vma);
684 	}
685 }
686 
687 int drm_vma_info(struct seq_file *m, void *data)
688 {
689 	struct drm_info_node *node = (struct drm_info_node *) m->private;
690 	struct drm_device *dev = node->minor->dev;
691 	struct drm_vma_entry *pt;
692 	struct vm_area_struct *vma;
693 	unsigned long vma_count = 0;
694 #if defined(__i386__)
695 	unsigned int pgprot;
696 #endif
697 
698 	mutex_lock(&dev->struct_mutex);
699 	list_for_each_entry(pt, &dev->vmalist, head)
700 		vma_count++;
701 
702 	seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
703 		   vma_count, high_memory,
704 		   (void *)(unsigned long)virt_to_phys(high_memory));
705 
706 	list_for_each_entry(pt, &dev->vmalist, head) {
707 		vma = pt->vma;
708 		if (!vma)
709 			continue;
710 		seq_printf(m,
711 			   "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
712 			   pt->pid,
713 			   (void *)vma->vm_start, (void *)vma->vm_end,
714 			   vma->vm_flags & VM_READ ? 'r' : '-',
715 			   vma->vm_flags & VM_WRITE ? 'w' : '-',
716 			   vma->vm_flags & VM_EXEC ? 'x' : '-',
717 			   vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
718 			   vma->vm_flags & VM_LOCKED ? 'l' : '-',
719 			   vma->vm_flags & VM_IO ? 'i' : '-',
720 			   vma->vm_pgoff);
721 
722 #if defined(__i386__)
723 		pgprot = pgprot_val(vma->vm_page_prot);
724 		seq_printf(m, " %c%c%c%c%c%c%c%c%c",
725 			   pgprot & _PAGE_PRESENT ? 'p' : '-',
726 			   pgprot & _PAGE_RW ? 'w' : 'r',
727 			   pgprot & _PAGE_USER ? 'u' : 's',
728 			   pgprot & _PAGE_PWT ? 't' : 'b',
729 			   pgprot & _PAGE_PCD ? 'u' : 'c',
730 			   pgprot & _PAGE_ACCESSED ? 'a' : '-',
731 			   pgprot & _PAGE_DIRTY ? 'd' : '-',
732 			   pgprot & _PAGE_PSE ? 'm' : 'k',
733 			   pgprot & _PAGE_GLOBAL ? 'g' : 'l');
734 #endif
735 		seq_printf(m, "\n");
736 	}
737 	mutex_unlock(&dev->struct_mutex);
738 	return 0;
739 }
740