xref: /openbsd-src/sys/dev/pci/drm/drm_vm.c (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
1c349dbc7Sjsg /*
2c349dbc7Sjsg  * \file drm_vm.c
3c349dbc7Sjsg  * Memory mapping for DRM
4c349dbc7Sjsg  *
5c349dbc7Sjsg  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6c349dbc7Sjsg  * \author Gareth Hughes <gareth@valinux.com>
7c349dbc7Sjsg  */
8c349dbc7Sjsg 
9c349dbc7Sjsg /*
10c349dbc7Sjsg  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11c349dbc7Sjsg  *
12c349dbc7Sjsg  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13c349dbc7Sjsg  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14c349dbc7Sjsg  * All Rights Reserved.
15c349dbc7Sjsg  *
16c349dbc7Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
17c349dbc7Sjsg  * copy of this software and associated documentation files (the "Software"),
18c349dbc7Sjsg  * to deal in the Software without restriction, including without limitation
19c349dbc7Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20c349dbc7Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
21c349dbc7Sjsg  * Software is furnished to do so, subject to the following conditions:
22c349dbc7Sjsg  *
23c349dbc7Sjsg  * The above copyright notice and this permission notice (including the next
24c349dbc7Sjsg  * paragraph) shall be included in all copies or substantial portions of the
25c349dbc7Sjsg  * Software.
26c349dbc7Sjsg  *
27c349dbc7Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28c349dbc7Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29c349dbc7Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30c349dbc7Sjsg  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31c349dbc7Sjsg  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32c349dbc7Sjsg  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33c349dbc7Sjsg  * OTHER DEALINGS IN THE SOFTWARE.
34c349dbc7Sjsg  */
35c349dbc7Sjsg 
36c349dbc7Sjsg #include <linux/export.h>
37c349dbc7Sjsg #include <linux/pci.h>
38c349dbc7Sjsg #include <linux/seq_file.h>
39c349dbc7Sjsg #include <linux/vmalloc.h>
40ad8b1aafSjsg #include <linux/pgtable.h>
41c349dbc7Sjsg 
42c349dbc7Sjsg #if defined(__ia64__)
43c349dbc7Sjsg #include <linux/efi.h>
44c349dbc7Sjsg #include <linux/slab.h>
45c349dbc7Sjsg #endif
46c349dbc7Sjsg #include <linux/mem_encrypt.h>
47c349dbc7Sjsg 
48c349dbc7Sjsg #include <drm/drm_device.h>
49c349dbc7Sjsg #include <drm/drm_drv.h>
50c349dbc7Sjsg #include <drm/drm_file.h>
51c349dbc7Sjsg #include <drm/drm_framebuffer.h>
52c349dbc7Sjsg #include <drm/drm_print.h>
53c349dbc7Sjsg 
54c349dbc7Sjsg #include "drm_internal.h"
55c349dbc7Sjsg #include "drm_legacy.h"
56c349dbc7Sjsg 
57c349dbc7Sjsg struct drm_vma_entry {
58c349dbc7Sjsg 	struct list_head head;
59c349dbc7Sjsg 	struct vm_area_struct *vma;
60c349dbc7Sjsg 	pid_t pid;
61c349dbc7Sjsg };
62c349dbc7Sjsg 
63c349dbc7Sjsg static void drm_vm_open(struct vm_area_struct *vma);
64c349dbc7Sjsg static void drm_vm_close(struct vm_area_struct *vma);
65c349dbc7Sjsg 
drm_io_prot(struct drm_local_map * map,struct vm_area_struct * vma)66c349dbc7Sjsg static pgprot_t drm_io_prot(struct drm_local_map *map,
67c349dbc7Sjsg 			    struct vm_area_struct *vma)
68c349dbc7Sjsg {
69c349dbc7Sjsg 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
70c349dbc7Sjsg 
71c349dbc7Sjsg #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
721bb76ff1Sjsg     defined(__mips__) || defined(__loongarch__)
73c349dbc7Sjsg 	if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
74c349dbc7Sjsg 		tmp = pgprot_noncached(tmp);
75c349dbc7Sjsg 	else
76c349dbc7Sjsg 		tmp = pgprot_writecombine(tmp);
77c349dbc7Sjsg #elif defined(__ia64__)
78c349dbc7Sjsg 	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
79c349dbc7Sjsg 				    vma->vm_start))
80c349dbc7Sjsg 		tmp = pgprot_writecombine(tmp);
81c349dbc7Sjsg 	else
82c349dbc7Sjsg 		tmp = pgprot_noncached(tmp);
83c349dbc7Sjsg #elif defined(__sparc__) || defined(__arm__)
84c349dbc7Sjsg 	tmp = pgprot_noncached(tmp);
85c349dbc7Sjsg #endif
86c349dbc7Sjsg 	return tmp;
87c349dbc7Sjsg }
88c349dbc7Sjsg 
drm_dma_prot(uint32_t map_type,struct vm_area_struct * vma)89c349dbc7Sjsg static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
90c349dbc7Sjsg {
91c349dbc7Sjsg 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
92c349dbc7Sjsg 
93c349dbc7Sjsg #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
94c349dbc7Sjsg 	tmp = pgprot_noncached_wc(tmp);
95c349dbc7Sjsg #endif
96c349dbc7Sjsg 	return tmp;
97c349dbc7Sjsg }
98c349dbc7Sjsg 
99c349dbc7Sjsg /*
100c349dbc7Sjsg  * \c fault method for AGP virtual memory.
101c349dbc7Sjsg  *
102c349dbc7Sjsg  * \param vma virtual memory area.
103c349dbc7Sjsg  * \param address access address.
104c349dbc7Sjsg  * \return pointer to the page structure.
105c349dbc7Sjsg  *
106c349dbc7Sjsg  * Find the right map and if it's AGP memory find the real physical page to
107c349dbc7Sjsg  * map, get the page, increment the use count and return it.
108c349dbc7Sjsg  */
109c349dbc7Sjsg #if IS_ENABLED(CONFIG_AGP)
drm_vm_fault(struct vm_fault * vmf)110c349dbc7Sjsg static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
111c349dbc7Sjsg {
112c349dbc7Sjsg 	struct vm_area_struct *vma = vmf->vma;
113c349dbc7Sjsg 	struct drm_file *priv = vma->vm_file->private_data;
114c349dbc7Sjsg 	struct drm_device *dev = priv->minor->dev;
115c349dbc7Sjsg 	struct drm_local_map *map = NULL;
116c349dbc7Sjsg 	struct drm_map_list *r_list;
117c349dbc7Sjsg 	struct drm_hash_item *hash;
118c349dbc7Sjsg 
119c349dbc7Sjsg 	/*
120c349dbc7Sjsg 	 * Find the right map
121c349dbc7Sjsg 	 */
122c349dbc7Sjsg 	if (!dev->agp)
123c349dbc7Sjsg 		goto vm_fault_error;
124c349dbc7Sjsg 
125c349dbc7Sjsg 	if (!dev->agp || !dev->agp->cant_use_aperture)
126c349dbc7Sjsg 		goto vm_fault_error;
127c349dbc7Sjsg 
128c349dbc7Sjsg 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
129c349dbc7Sjsg 		goto vm_fault_error;
130c349dbc7Sjsg 
131c349dbc7Sjsg 	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
132c349dbc7Sjsg 	map = r_list->map;
133c349dbc7Sjsg 
134c349dbc7Sjsg 	if (map && map->type == _DRM_AGP) {
135c349dbc7Sjsg 		/*
136c349dbc7Sjsg 		 * Using vm_pgoff as a selector forces us to use this unusual
137c349dbc7Sjsg 		 * addressing scheme.
138c349dbc7Sjsg 		 */
139c349dbc7Sjsg 		resource_size_t offset = vmf->address - vma->vm_start;
140c349dbc7Sjsg 		resource_size_t baddr = map->offset + offset;
141c349dbc7Sjsg 		struct drm_agp_mem *agpmem;
142c349dbc7Sjsg 		struct vm_page *page;
143c349dbc7Sjsg 
144c349dbc7Sjsg #ifdef __alpha__
145c349dbc7Sjsg 		/*
146c349dbc7Sjsg 		 * Adjust to a bus-relative address
147c349dbc7Sjsg 		 */
148c349dbc7Sjsg 		baddr -= dev->hose->mem_space->start;
149c349dbc7Sjsg #endif
150c349dbc7Sjsg 
151c349dbc7Sjsg 		/*
152c349dbc7Sjsg 		 * It's AGP memory - find the real physical page to map
153c349dbc7Sjsg 		 */
154c349dbc7Sjsg 		list_for_each_entry(agpmem, &dev->agp->memory, head) {
155c349dbc7Sjsg 			if (agpmem->bound <= baddr &&
156c349dbc7Sjsg 			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
157c349dbc7Sjsg 				break;
158c349dbc7Sjsg 		}
159c349dbc7Sjsg 
160c349dbc7Sjsg 		if (&agpmem->head == &dev->agp->memory)
161c349dbc7Sjsg 			goto vm_fault_error;
162c349dbc7Sjsg 
163c349dbc7Sjsg 		/*
164c349dbc7Sjsg 		 * Get the page, inc the use count, and return it
165c349dbc7Sjsg 		 */
166c349dbc7Sjsg 		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
167c349dbc7Sjsg 		page = agpmem->memory->pages[offset];
168c349dbc7Sjsg 		get_page(page);
169c349dbc7Sjsg 		vmf->page = page;
170c349dbc7Sjsg 
171c349dbc7Sjsg 		DRM_DEBUG
172c349dbc7Sjsg 		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
173c349dbc7Sjsg 		     (unsigned long long)baddr,
174c349dbc7Sjsg 		     agpmem->memory->pages[offset],
175c349dbc7Sjsg 		     (unsigned long long)offset,
176c349dbc7Sjsg 		     page_count(page));
177c349dbc7Sjsg 		return 0;
178c349dbc7Sjsg 	}
179c349dbc7Sjsg vm_fault_error:
180c349dbc7Sjsg 	return VM_FAULT_SIGBUS;	/* Disallow mremap */
181c349dbc7Sjsg }
182c349dbc7Sjsg #else
drm_vm_fault(struct vm_fault * vmf)183c349dbc7Sjsg static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
184c349dbc7Sjsg {
185c349dbc7Sjsg 	return VM_FAULT_SIGBUS;
186c349dbc7Sjsg }
187c349dbc7Sjsg #endif
188c349dbc7Sjsg 
189c349dbc7Sjsg /*
190c349dbc7Sjsg  * \c nopage method for shared virtual memory.
191c349dbc7Sjsg  *
192c349dbc7Sjsg  * \param vma virtual memory area.
193c349dbc7Sjsg  * \param address access address.
194c349dbc7Sjsg  * \return pointer to the page structure.
195c349dbc7Sjsg  *
196c349dbc7Sjsg  * Get the mapping, find the real physical page to map, get the page, and
197c349dbc7Sjsg  * return it.
198c349dbc7Sjsg  */
drm_vm_shm_fault(struct vm_fault * vmf)199c349dbc7Sjsg static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
200c349dbc7Sjsg {
201c349dbc7Sjsg 	struct vm_area_struct *vma = vmf->vma;
202c349dbc7Sjsg 	struct drm_local_map *map = vma->vm_private_data;
203c349dbc7Sjsg 	unsigned long offset;
204c349dbc7Sjsg 	unsigned long i;
205c349dbc7Sjsg 	struct vm_page *page;
206c349dbc7Sjsg 
207c349dbc7Sjsg 	if (!map)
208c349dbc7Sjsg 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
209c349dbc7Sjsg 
210c349dbc7Sjsg 	offset = vmf->address - vma->vm_start;
211c349dbc7Sjsg 	i = (unsigned long)map->handle + offset;
212c349dbc7Sjsg 	page = vmalloc_to_page((void *)i);
213c349dbc7Sjsg 	if (!page)
214c349dbc7Sjsg 		return VM_FAULT_SIGBUS;
215c349dbc7Sjsg 	get_page(page);
216c349dbc7Sjsg 	vmf->page = page;
217c349dbc7Sjsg 
218c349dbc7Sjsg 	DRM_DEBUG("shm_fault 0x%lx\n", offset);
219c349dbc7Sjsg 	return 0;
220c349dbc7Sjsg }
221c349dbc7Sjsg 
222c349dbc7Sjsg /*
223c349dbc7Sjsg  * \c close method for shared virtual memory.
224c349dbc7Sjsg  *
225c349dbc7Sjsg  * \param vma virtual memory area.
226c349dbc7Sjsg  *
227c349dbc7Sjsg  * Deletes map information if we are the last
228c349dbc7Sjsg  * person to close a mapping and it's not in the global maplist.
229c349dbc7Sjsg  */
drm_vm_shm_close(struct vm_area_struct * vma)230c349dbc7Sjsg static void drm_vm_shm_close(struct vm_area_struct *vma)
231c349dbc7Sjsg {
232c349dbc7Sjsg 	struct drm_file *priv = vma->vm_file->private_data;
233c349dbc7Sjsg 	struct drm_device *dev = priv->minor->dev;
234c349dbc7Sjsg 	struct drm_vma_entry *pt, *temp;
235c349dbc7Sjsg 	struct drm_local_map *map;
236c349dbc7Sjsg 	struct drm_map_list *r_list;
237c349dbc7Sjsg 	int found_maps = 0;
238c349dbc7Sjsg 
239c349dbc7Sjsg 	DRM_DEBUG("0x%08lx,0x%08lx\n",
240c349dbc7Sjsg 		  vma->vm_start, vma->vm_end - vma->vm_start);
241c349dbc7Sjsg 
242c349dbc7Sjsg 	map = vma->vm_private_data;
243c349dbc7Sjsg 
244c349dbc7Sjsg 	mutex_lock(&dev->struct_mutex);
245c349dbc7Sjsg 	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
246c349dbc7Sjsg 		if (pt->vma->vm_private_data == map)
247c349dbc7Sjsg 			found_maps++;
248c349dbc7Sjsg 		if (pt->vma == vma) {
249c349dbc7Sjsg 			list_del(&pt->head);
250c349dbc7Sjsg 			kfree(pt);
251c349dbc7Sjsg 		}
252c349dbc7Sjsg 	}
253c349dbc7Sjsg 
254c349dbc7Sjsg 	/* We were the only map that was found */
255c349dbc7Sjsg 	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
256c349dbc7Sjsg 		/* Check to see if we are in the maplist, if we are not, then
257c349dbc7Sjsg 		 * we delete this mappings information.
258c349dbc7Sjsg 		 */
259c349dbc7Sjsg 		found_maps = 0;
260c349dbc7Sjsg 		list_for_each_entry(r_list, &dev->maplist, head) {
261c349dbc7Sjsg 			if (r_list->map == map)
262c349dbc7Sjsg 				found_maps++;
263c349dbc7Sjsg 		}
264c349dbc7Sjsg 
265c349dbc7Sjsg 		if (!found_maps) {
266c349dbc7Sjsg 			switch (map->type) {
267c349dbc7Sjsg 			case _DRM_REGISTERS:
268c349dbc7Sjsg 			case _DRM_FRAME_BUFFER:
269c349dbc7Sjsg 				arch_phys_wc_del(map->mtrr);
270c349dbc7Sjsg 				iounmap(map->handle);
271c349dbc7Sjsg 				break;
272c349dbc7Sjsg 			case _DRM_SHM:
273c349dbc7Sjsg 				vfree(map->handle);
274c349dbc7Sjsg 				break;
275c349dbc7Sjsg 			case _DRM_AGP:
276c349dbc7Sjsg 			case _DRM_SCATTER_GATHER:
277c349dbc7Sjsg 				break;
278c349dbc7Sjsg 			case _DRM_CONSISTENT:
2795ca02815Sjsg 				dma_free_coherent(dev->dev,
280c349dbc7Sjsg 						  map->size,
281c349dbc7Sjsg 						  map->handle,
282c349dbc7Sjsg 						  map->offset);
283c349dbc7Sjsg 				break;
284c349dbc7Sjsg 			}
285c349dbc7Sjsg 			kfree(map);
286c349dbc7Sjsg 		}
287c349dbc7Sjsg 	}
288c349dbc7Sjsg 	mutex_unlock(&dev->struct_mutex);
289c349dbc7Sjsg }
290c349dbc7Sjsg 
291c349dbc7Sjsg /*
292c349dbc7Sjsg  * \c fault method for DMA virtual memory.
293c349dbc7Sjsg  *
294c349dbc7Sjsg  * \param address access address.
295c349dbc7Sjsg  * \return pointer to the page structure.
296c349dbc7Sjsg  *
297c349dbc7Sjsg  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
298c349dbc7Sjsg  */
drm_vm_dma_fault(struct vm_fault * vmf)299c349dbc7Sjsg static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
300c349dbc7Sjsg {
301c349dbc7Sjsg 	struct vm_area_struct *vma = vmf->vma;
302c349dbc7Sjsg 	struct drm_file *priv = vma->vm_file->private_data;
303c349dbc7Sjsg 	struct drm_device *dev = priv->minor->dev;
304c349dbc7Sjsg 	struct drm_device_dma *dma = dev->dma;
305c349dbc7Sjsg 	unsigned long offset;
306c349dbc7Sjsg 	unsigned long page_nr;
307c349dbc7Sjsg 	struct vm_page *page;
308c349dbc7Sjsg 
309c349dbc7Sjsg 	if (!dma)
310c349dbc7Sjsg 		return VM_FAULT_SIGBUS;	/* Error */
311c349dbc7Sjsg 	if (!dma->pagelist)
312c349dbc7Sjsg 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
313c349dbc7Sjsg 
314c349dbc7Sjsg 	offset = vmf->address - vma->vm_start;
315c349dbc7Sjsg 					/* vm_[pg]off[set] should be 0 */
316c349dbc7Sjsg 	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
317c349dbc7Sjsg 	page = virt_to_page((void *)dma->pagelist[page_nr]);
318c349dbc7Sjsg 
319c349dbc7Sjsg 	get_page(page);
320c349dbc7Sjsg 	vmf->page = page;
321c349dbc7Sjsg 
322c349dbc7Sjsg 	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
323c349dbc7Sjsg 	return 0;
324c349dbc7Sjsg }
325c349dbc7Sjsg 
326c349dbc7Sjsg /*
327c349dbc7Sjsg  * \c fault method for scatter-gather virtual memory.
328c349dbc7Sjsg  *
329c349dbc7Sjsg  * \param address access address.
330c349dbc7Sjsg  * \return pointer to the page structure.
331c349dbc7Sjsg  *
332c349dbc7Sjsg  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
333c349dbc7Sjsg  */
drm_vm_sg_fault(struct vm_fault * vmf)334c349dbc7Sjsg static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
335c349dbc7Sjsg {
336c349dbc7Sjsg 	struct vm_area_struct *vma = vmf->vma;
337c349dbc7Sjsg 	struct drm_local_map *map = vma->vm_private_data;
338c349dbc7Sjsg 	struct drm_file *priv = vma->vm_file->private_data;
339c349dbc7Sjsg 	struct drm_device *dev = priv->minor->dev;
340c349dbc7Sjsg 	struct drm_sg_mem *entry = dev->sg;
341c349dbc7Sjsg 	unsigned long offset;
342c349dbc7Sjsg 	unsigned long map_offset;
343c349dbc7Sjsg 	unsigned long page_offset;
344c349dbc7Sjsg 	struct vm_page *page;
345c349dbc7Sjsg 
346c349dbc7Sjsg 	if (!entry)
347c349dbc7Sjsg 		return VM_FAULT_SIGBUS;	/* Error */
348c349dbc7Sjsg 	if (!entry->pagelist)
349c349dbc7Sjsg 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
350c349dbc7Sjsg 
351c349dbc7Sjsg 	offset = vmf->address - vma->vm_start;
352c349dbc7Sjsg 	map_offset = map->offset - (unsigned long)dev->sg->virtual;
353c349dbc7Sjsg 	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
354c349dbc7Sjsg 	page = entry->pagelist[page_offset];
355c349dbc7Sjsg 	get_page(page);
356c349dbc7Sjsg 	vmf->page = page;
357c349dbc7Sjsg 
358c349dbc7Sjsg 	return 0;
359c349dbc7Sjsg }
360c349dbc7Sjsg 
361c349dbc7Sjsg /** AGP virtual memory operations */
362c349dbc7Sjsg static const struct vm_operations_struct drm_vm_ops = {
363c349dbc7Sjsg 	.fault = drm_vm_fault,
364c349dbc7Sjsg 	.open = drm_vm_open,
365c349dbc7Sjsg 	.close = drm_vm_close,
366c349dbc7Sjsg };
367c349dbc7Sjsg 
368c349dbc7Sjsg /** Shared virtual memory operations */
369c349dbc7Sjsg static const struct vm_operations_struct drm_vm_shm_ops = {
370c349dbc7Sjsg 	.fault = drm_vm_shm_fault,
371c349dbc7Sjsg 	.open = drm_vm_open,
372c349dbc7Sjsg 	.close = drm_vm_shm_close,
373c349dbc7Sjsg };
374c349dbc7Sjsg 
375c349dbc7Sjsg /** DMA virtual memory operations */
376c349dbc7Sjsg static const struct vm_operations_struct drm_vm_dma_ops = {
377c349dbc7Sjsg 	.fault = drm_vm_dma_fault,
378c349dbc7Sjsg 	.open = drm_vm_open,
379c349dbc7Sjsg 	.close = drm_vm_close,
380c349dbc7Sjsg };
381c349dbc7Sjsg 
382c349dbc7Sjsg /** Scatter-gather virtual memory operations */
383c349dbc7Sjsg static const struct vm_operations_struct drm_vm_sg_ops = {
384c349dbc7Sjsg 	.fault = drm_vm_sg_fault,
385c349dbc7Sjsg 	.open = drm_vm_open,
386c349dbc7Sjsg 	.close = drm_vm_close,
387c349dbc7Sjsg };
388c349dbc7Sjsg 
drm_vm_open_locked(struct drm_device * dev,struct vm_area_struct * vma)389c349dbc7Sjsg static void drm_vm_open_locked(struct drm_device *dev,
390c349dbc7Sjsg 			       struct vm_area_struct *vma)
391c349dbc7Sjsg {
392c349dbc7Sjsg 	struct drm_vma_entry *vma_entry;
393c349dbc7Sjsg 
394c349dbc7Sjsg 	DRM_DEBUG("0x%08lx,0x%08lx\n",
395c349dbc7Sjsg 		  vma->vm_start, vma->vm_end - vma->vm_start);
396c349dbc7Sjsg 
397c349dbc7Sjsg 	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
398c349dbc7Sjsg 	if (vma_entry) {
399c349dbc7Sjsg 		vma_entry->vma = vma;
400c349dbc7Sjsg 		vma_entry->pid = current->pid;
401c349dbc7Sjsg 		list_add(&vma_entry->head, &dev->vmalist);
402c349dbc7Sjsg 	}
403c349dbc7Sjsg }
404c349dbc7Sjsg 
drm_vm_open(struct vm_area_struct * vma)405c349dbc7Sjsg static void drm_vm_open(struct vm_area_struct *vma)
406c349dbc7Sjsg {
407c349dbc7Sjsg 	struct drm_file *priv = vma->vm_file->private_data;
408c349dbc7Sjsg 	struct drm_device *dev = priv->minor->dev;
409c349dbc7Sjsg 
410c349dbc7Sjsg 	mutex_lock(&dev->struct_mutex);
411c349dbc7Sjsg 	drm_vm_open_locked(dev, vma);
412c349dbc7Sjsg 	mutex_unlock(&dev->struct_mutex);
413c349dbc7Sjsg }
414c349dbc7Sjsg 
drm_vm_close_locked(struct drm_device * dev,struct vm_area_struct * vma)415c349dbc7Sjsg static void drm_vm_close_locked(struct drm_device *dev,
416c349dbc7Sjsg 				struct vm_area_struct *vma)
417c349dbc7Sjsg {
418c349dbc7Sjsg 	struct drm_vma_entry *pt, *temp;
419c349dbc7Sjsg 
420c349dbc7Sjsg 	DRM_DEBUG("0x%08lx,0x%08lx\n",
421c349dbc7Sjsg 		  vma->vm_start, vma->vm_end - vma->vm_start);
422c349dbc7Sjsg 
423c349dbc7Sjsg 	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
424c349dbc7Sjsg 		if (pt->vma == vma) {
425c349dbc7Sjsg 			list_del(&pt->head);
426c349dbc7Sjsg 			kfree(pt);
427c349dbc7Sjsg 			break;
428c349dbc7Sjsg 		}
429c349dbc7Sjsg 	}
430c349dbc7Sjsg }
431c349dbc7Sjsg 
432c349dbc7Sjsg /*
433c349dbc7Sjsg  * \c close method for all virtual memory types.
434c349dbc7Sjsg  *
435c349dbc7Sjsg  * \param vma virtual memory area.
436c349dbc7Sjsg  *
437c349dbc7Sjsg  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
438c349dbc7Sjsg  * free it.
439c349dbc7Sjsg  */
drm_vm_close(struct vm_area_struct * vma)440c349dbc7Sjsg static void drm_vm_close(struct vm_area_struct *vma)
441c349dbc7Sjsg {
442c349dbc7Sjsg 	struct drm_file *priv = vma->vm_file->private_data;
443c349dbc7Sjsg 	struct drm_device *dev = priv->minor->dev;
444c349dbc7Sjsg 
445c349dbc7Sjsg 	mutex_lock(&dev->struct_mutex);
446c349dbc7Sjsg 	drm_vm_close_locked(dev, vma);
447c349dbc7Sjsg 	mutex_unlock(&dev->struct_mutex);
448c349dbc7Sjsg }
449c349dbc7Sjsg 
450c349dbc7Sjsg /*
451c349dbc7Sjsg  * mmap DMA memory.
452c349dbc7Sjsg  *
453c349dbc7Sjsg  * \param file_priv DRM file private.
454c349dbc7Sjsg  * \param vma virtual memory area.
455c349dbc7Sjsg  * \return zero on success or a negative number on failure.
456c349dbc7Sjsg  *
457c349dbc7Sjsg  * Sets the virtual memory area operations structure to vm_dma_ops, the file
458c349dbc7Sjsg  * pointer, and calls vm_open().
459c349dbc7Sjsg  */
drm_mmap_dma(struct file * filp,struct vm_area_struct * vma)460c349dbc7Sjsg static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
461c349dbc7Sjsg {
462c349dbc7Sjsg 	struct drm_file *priv = filp->private_data;
463c349dbc7Sjsg 	struct drm_device *dev;
464c349dbc7Sjsg 	struct drm_device_dma *dma;
465c349dbc7Sjsg 	unsigned long length = vma->vm_end - vma->vm_start;
466c349dbc7Sjsg 
467c349dbc7Sjsg 	dev = priv->minor->dev;
468c349dbc7Sjsg 	dma = dev->dma;
469c349dbc7Sjsg 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
470c349dbc7Sjsg 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
471c349dbc7Sjsg 
472c349dbc7Sjsg 	/* Length must match exact page count */
473c349dbc7Sjsg 	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
474c349dbc7Sjsg 		return -EINVAL;
475c349dbc7Sjsg 	}
476c349dbc7Sjsg 
477c349dbc7Sjsg 	if (!capable(CAP_SYS_ADMIN) &&
478c349dbc7Sjsg 	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
479*f005ef32Sjsg 		vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
480c349dbc7Sjsg #if defined(__i386__) || defined(__x86_64__)
481c349dbc7Sjsg 		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
482c349dbc7Sjsg #else
483c349dbc7Sjsg 		/* Ye gads this is ugly.  With more thought
484c349dbc7Sjsg 		   we could move this up higher and use
485c349dbc7Sjsg 		   `protection_map' instead.  */
486c349dbc7Sjsg 		vma->vm_page_prot =
487c349dbc7Sjsg 		    __pgprot(pte_val
488c349dbc7Sjsg 			     (pte_wrprotect
489c349dbc7Sjsg 			      (__pte(pgprot_val(vma->vm_page_prot)))));
490c349dbc7Sjsg #endif
491c349dbc7Sjsg 	}
492c349dbc7Sjsg 
493c349dbc7Sjsg 	vma->vm_ops = &drm_vm_dma_ops;
494c349dbc7Sjsg 
495*f005ef32Sjsg 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
496c349dbc7Sjsg 
497c349dbc7Sjsg 	drm_vm_open_locked(dev, vma);
498c349dbc7Sjsg 	return 0;
499c349dbc7Sjsg }
500c349dbc7Sjsg 
drm_core_get_reg_ofs(struct drm_device * dev)501c349dbc7Sjsg static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
502c349dbc7Sjsg {
503c349dbc7Sjsg #ifdef __alpha__
504c349dbc7Sjsg 	return dev->hose->dense_mem_base;
505c349dbc7Sjsg #else
506c349dbc7Sjsg 	return 0;
507c349dbc7Sjsg #endif
508c349dbc7Sjsg }
509c349dbc7Sjsg 
510c349dbc7Sjsg /*
511c349dbc7Sjsg  * mmap DMA memory.
512c349dbc7Sjsg  *
513c349dbc7Sjsg  * \param file_priv DRM file private.
514c349dbc7Sjsg  * \param vma virtual memory area.
515c349dbc7Sjsg  * \return zero on success or a negative number on failure.
516c349dbc7Sjsg  *
517c349dbc7Sjsg  * If the virtual memory area has no offset associated with it then it's a DMA
518c349dbc7Sjsg  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
519c349dbc7Sjsg  * checks that the restricted flag is not set, sets the virtual memory operations
520c349dbc7Sjsg  * according to the mapping type and remaps the pages. Finally sets the file
521c349dbc7Sjsg  * pointer and calls vm_open().
522c349dbc7Sjsg  */
drm_mmap_locked(struct file * filp,struct vm_area_struct * vma)523c349dbc7Sjsg static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
524c349dbc7Sjsg {
525c349dbc7Sjsg 	struct drm_file *priv = filp->private_data;
526c349dbc7Sjsg 	struct drm_device *dev = priv->minor->dev;
527c349dbc7Sjsg 	struct drm_local_map *map = NULL;
528c349dbc7Sjsg 	resource_size_t offset = 0;
529c349dbc7Sjsg 	struct drm_hash_item *hash;
530c349dbc7Sjsg 
531c349dbc7Sjsg 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
532c349dbc7Sjsg 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
533c349dbc7Sjsg 
534c349dbc7Sjsg 	if (!priv->authenticated)
535c349dbc7Sjsg 		return -EACCES;
536c349dbc7Sjsg 
537c349dbc7Sjsg 	/* We check for "dma". On Apple's UniNorth, it's valid to have
538c349dbc7Sjsg 	 * the AGP mapped at physical address 0
539c349dbc7Sjsg 	 * --BenH.
540c349dbc7Sjsg 	 */
541c349dbc7Sjsg 	if (!vma->vm_pgoff
542c349dbc7Sjsg #if IS_ENABLED(CONFIG_AGP)
543c349dbc7Sjsg 	    && (!dev->agp
544c349dbc7Sjsg 		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
545c349dbc7Sjsg #endif
546c349dbc7Sjsg 	    )
547c349dbc7Sjsg 		return drm_mmap_dma(filp, vma);
548c349dbc7Sjsg 
549c349dbc7Sjsg 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
550c349dbc7Sjsg 		DRM_ERROR("Could not find map\n");
551c349dbc7Sjsg 		return -EINVAL;
552c349dbc7Sjsg 	}
553c349dbc7Sjsg 
554c349dbc7Sjsg 	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
555c349dbc7Sjsg 	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
556c349dbc7Sjsg 		return -EPERM;
557c349dbc7Sjsg 
558c349dbc7Sjsg 	/* Check for valid size. */
559c349dbc7Sjsg 	if (map->size < vma->vm_end - vma->vm_start)
560c349dbc7Sjsg 		return -EINVAL;
561c349dbc7Sjsg 
562c349dbc7Sjsg 	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
563*f005ef32Sjsg 		vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
564c349dbc7Sjsg #if defined(__i386__) || defined(__x86_64__)
565c349dbc7Sjsg 		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
566c349dbc7Sjsg #else
567c349dbc7Sjsg 		/* Ye gads this is ugly.  With more thought
568c349dbc7Sjsg 		   we could move this up higher and use
569c349dbc7Sjsg 		   `protection_map' instead.  */
570c349dbc7Sjsg 		vma->vm_page_prot =
571c349dbc7Sjsg 		    __pgprot(pte_val
572c349dbc7Sjsg 			     (pte_wrprotect
573c349dbc7Sjsg 			      (__pte(pgprot_val(vma->vm_page_prot)))));
574c349dbc7Sjsg #endif
575c349dbc7Sjsg 	}
576c349dbc7Sjsg 
577c349dbc7Sjsg 	switch (map->type) {
578c349dbc7Sjsg #if !defined(__arm__)
579c349dbc7Sjsg 	case _DRM_AGP:
580c349dbc7Sjsg 		if (dev->agp && dev->agp->cant_use_aperture) {
581c349dbc7Sjsg 			/*
582c349dbc7Sjsg 			 * On some platforms we can't talk to bus dma address from the CPU, so for
583c349dbc7Sjsg 			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
584c349dbc7Sjsg 			 * pages and mappings in fault()
585c349dbc7Sjsg 			 */
586c349dbc7Sjsg #if defined(__powerpc__)
587c349dbc7Sjsg 			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
588c349dbc7Sjsg #endif
589c349dbc7Sjsg 			vma->vm_ops = &drm_vm_ops;
590c349dbc7Sjsg 			break;
591c349dbc7Sjsg 		}
592ad8b1aafSjsg 		fallthrough;	/* to _DRM_FRAME_BUFFER... */
593c349dbc7Sjsg #endif
594c349dbc7Sjsg 	case _DRM_FRAME_BUFFER:
595c349dbc7Sjsg 	case _DRM_REGISTERS:
596c349dbc7Sjsg 		offset = drm_core_get_reg_ofs(dev);
597c349dbc7Sjsg 		vma->vm_page_prot = drm_io_prot(map, vma);
598c349dbc7Sjsg 		if (io_remap_pfn_range(vma, vma->vm_start,
599c349dbc7Sjsg 				       (map->offset + offset) >> PAGE_SHIFT,
600c349dbc7Sjsg 				       vma->vm_end - vma->vm_start,
601c349dbc7Sjsg 				       vma->vm_page_prot))
602c349dbc7Sjsg 			return -EAGAIN;
603c349dbc7Sjsg 		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
604c349dbc7Sjsg 			  " offset = 0x%llx\n",
605c349dbc7Sjsg 			  map->type,
606c349dbc7Sjsg 			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
607c349dbc7Sjsg 
608c349dbc7Sjsg 		vma->vm_ops = &drm_vm_ops;
609c349dbc7Sjsg 		break;
610c349dbc7Sjsg 	case _DRM_CONSISTENT:
611c349dbc7Sjsg 		/* Consistent memory is really like shared memory. But
612c349dbc7Sjsg 		 * it's allocated in a different way, so avoid fault */
613c349dbc7Sjsg 		if (remap_pfn_range(vma, vma->vm_start,
614c349dbc7Sjsg 		    page_to_pfn(virt_to_page(map->handle)),
615c349dbc7Sjsg 		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
616c349dbc7Sjsg 			return -EAGAIN;
617c349dbc7Sjsg 		vma->vm_page_prot = drm_dma_prot(map->type, vma);
618ad8b1aafSjsg 		fallthrough;	/* to _DRM_SHM */
619c349dbc7Sjsg 	case _DRM_SHM:
620c349dbc7Sjsg 		vma->vm_ops = &drm_vm_shm_ops;
621c349dbc7Sjsg 		vma->vm_private_data = (void *)map;
622c349dbc7Sjsg 		break;
623c349dbc7Sjsg 	case _DRM_SCATTER_GATHER:
624c349dbc7Sjsg 		vma->vm_ops = &drm_vm_sg_ops;
625c349dbc7Sjsg 		vma->vm_private_data = (void *)map;
626c349dbc7Sjsg 		vma->vm_page_prot = drm_dma_prot(map->type, vma);
627c349dbc7Sjsg 		break;
628c349dbc7Sjsg 	default:
629c349dbc7Sjsg 		return -EINVAL;	/* This should never happen. */
630c349dbc7Sjsg 	}
631*f005ef32Sjsg 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
632c349dbc7Sjsg 
633c349dbc7Sjsg 	drm_vm_open_locked(dev, vma);
634c349dbc7Sjsg 	return 0;
635c349dbc7Sjsg }
636c349dbc7Sjsg 
drm_legacy_mmap(struct file * filp,struct vm_area_struct * vma)637c349dbc7Sjsg int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
638c349dbc7Sjsg {
639c349dbc7Sjsg 	struct drm_file *priv = filp->private_data;
640c349dbc7Sjsg 	struct drm_device *dev = priv->minor->dev;
641c349dbc7Sjsg 	int ret;
642c349dbc7Sjsg 
643c349dbc7Sjsg 	if (drm_dev_is_unplugged(dev))
644c349dbc7Sjsg 		return -ENODEV;
645c349dbc7Sjsg 
646c349dbc7Sjsg 	mutex_lock(&dev->struct_mutex);
647c349dbc7Sjsg 	ret = drm_mmap_locked(filp, vma);
648c349dbc7Sjsg 	mutex_unlock(&dev->struct_mutex);
649c349dbc7Sjsg 
650c349dbc7Sjsg 	return ret;
651c349dbc7Sjsg }
652c349dbc7Sjsg EXPORT_SYMBOL(drm_legacy_mmap);
653c349dbc7Sjsg 
654c349dbc7Sjsg #if IS_ENABLED(CONFIG_DRM_LEGACY)
drm_legacy_vma_flush(struct drm_device * dev)655c349dbc7Sjsg void drm_legacy_vma_flush(struct drm_device *dev)
656c349dbc7Sjsg {
657c349dbc7Sjsg 	struct drm_vma_entry *vma, *vma_temp;
658c349dbc7Sjsg 
659c349dbc7Sjsg 	/* Clear vma list (only needed for legacy drivers) */
660c349dbc7Sjsg 	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
661c349dbc7Sjsg 		list_del(&vma->head);
662c349dbc7Sjsg 		kfree(vma);
663c349dbc7Sjsg 	}
664c349dbc7Sjsg }
665c349dbc7Sjsg #endif
666