xref: /netbsd-src/sys/dev/mm.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: mm.c,v 1.18 2014/03/16 05:20:26 dholland Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2008, 2010 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Christos Zoulas, Joerg Sonnenberger and Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Special /dev/{mem,kmem,zero,null} memory devices.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: mm.c,v 1.18 2014/03/16 05:20:26 dholland Exp $");
38 
39 #include "opt_compat_netbsd.h"
40 
41 #include <sys/param.h>
42 #include <sys/conf.h>
43 #include <sys/ioctl.h>
44 #include <sys/mman.h>
45 #include <sys/uio.h>
46 #include <sys/termios.h>
47 
48 #include <dev/mm.h>
49 
50 #include <uvm/uvm_extern.h>
51 
52 static void *		dev_zero_page	__read_mostly;
53 static kmutex_t		dev_mem_lock	__cacheline_aligned;
54 static vaddr_t		dev_mem_addr	__read_mostly;
55 
56 static dev_type_read(mm_readwrite);
57 static dev_type_ioctl(mm_ioctl);
58 static dev_type_mmap(mm_mmap);
59 static dev_type_ioctl(mm_ioctl);
60 
61 const struct cdevsw mem_cdevsw = {
62 #ifdef __HAVE_MM_MD_OPEN
63 	.d_open = mm_md_open,
64 #else
65 	.d_open = nullopen,
66 #endif
67 	.d_close = nullclose,
68 	.d_read = mm_readwrite,
69 	.d_write = mm_readwrite,
70 	.d_ioctl = mm_ioctl,
71 	.d_stop = nostop,
72 	.d_tty = notty,
73 	.d_poll = nopoll,
74 	.d_mmap = mm_mmap,
75 	.d_kqfilter = nokqfilter,
76 	.d_flag = D_MPSAFE
77 };
78 
79 #ifdef pmax	/* XXX */
80 const struct cdevsw mem_ultrix_cdevsw = {
81 	.d_open = nullopen,
82 	.d_close = nullclose,
83 	.d_read = mm_readwrite,
84 	.d_write = mm_readwrite,
85 	.d_ioctl = mm_ioctl,
86 	.d_stop = nostop,
87 	.d_tty = notty,
88 	.d_poll = nopoll,
89 	.d_mmap = mm_mmap,
90 	.d_kqfilter = nokqfilter,
91 	.d_flag = D_MPSAFE
92 };
93 #endif
94 
95 /*
96  * mm_init: initialize memory device driver.
97  */
98 void
99 mm_init(void)
100 {
101 	vaddr_t pg;
102 
103 	mutex_init(&dev_mem_lock, MUTEX_DEFAULT, IPL_NONE);
104 
105 	/* Read-only zero-page. */
106 	pg = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
107 	KASSERT(pg != 0);
108 	pmap_protect(pmap_kernel(), pg, pg + PAGE_SIZE, VM_PROT_READ);
109 	pmap_update(pmap_kernel());
110 	dev_zero_page = (void *)pg;
111 
112 #ifndef __HAVE_MM_MD_CACHE_ALIASING
113 	/* KVA for mappings during I/O. */
114 	dev_mem_addr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
115 	    UVM_KMF_VAONLY|UVM_KMF_WAITVA);
116 	KASSERT(dev_mem_addr != 0);
117 #else
118 	dev_mem_addr = 0;
119 #endif
120 }
121 
122 
123 /*
124  * dev_mem_getva: get a special virtual address.  If architecture requires,
125  * allocate VA according to PA, which avoids cache-aliasing issues.  Use a
126  * constant, general mapping address otherwise.
127  */
128 static inline vaddr_t
129 dev_mem_getva(paddr_t pa)
130 {
131 #ifdef __HAVE_MM_MD_CACHE_ALIASING
132 	return uvm_km_alloc(kernel_map, PAGE_SIZE,
133 	    atop(pa) & uvmexp.colormask,
134 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
135 #else
136 	return dev_mem_addr;
137 #endif
138 }
139 
140 static inline void
141 dev_mem_relva(paddr_t pa, vaddr_t va)
142 {
143 #ifdef __HAVE_MM_MD_CACHE_ALIASING
144 	uvm_km_free(kernel_map, va, PAGE_SIZE, UVM_KMF_VAONLY);
145 #else
146 	KASSERT(dev_mem_addr == va);
147 #endif
148 }
149 
150 /*
151  * dev_kmem_readwrite: helper for DEV_MEM (/dev/mem) case of R/W.
152  */
153 static int
154 dev_mem_readwrite(struct uio *uio, struct iovec *iov)
155 {
156 	paddr_t paddr;
157 	vaddr_t vaddr;
158 	vm_prot_t prot;
159 	size_t len, offset;
160 	bool have_direct;
161 	int error;
162 
163 	/* Check for wrap around. */
164 	if ((intptr_t)uio->uio_offset != uio->uio_offset) {
165 		return EFAULT;
166 	}
167 	paddr = uio->uio_offset & ~PAGE_MASK;
168 	prot = (uio->uio_rw == UIO_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
169 	error = mm_md_physacc(paddr, prot);
170 	if (error) {
171 		return error;
172 	}
173 	offset = uio->uio_offset & PAGE_MASK;
174 	len = MIN(uio->uio_resid, PAGE_SIZE - offset);
175 
176 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
177 	/* Is physical address directly mapped?  Return VA. */
178 	have_direct = mm_md_direct_mapped_phys(paddr, &vaddr);
179 #else
180 	vaddr = 0;
181 	have_direct = false;
182 #endif
183 	if (!have_direct) {
184 		/* Get a special virtual address. */
185 		const vaddr_t va = dev_mem_getva(paddr);
186 
187 		/* Map selected KVA to physical address. */
188 		mutex_enter(&dev_mem_lock);
189 		pmap_kenter_pa(va, paddr, prot, 0);
190 		pmap_update(pmap_kernel());
191 
192 		/* Perform I/O. */
193 		vaddr = va + offset;
194 		error = uiomove((void *)vaddr, len, uio);
195 
196 		/* Unmap, flush before unlock. */
197 		pmap_kremove(va, PAGE_SIZE);
198 		pmap_update(pmap_kernel());
199 		mutex_exit(&dev_mem_lock);
200 
201 		/* "Release" the virtual address. */
202 		dev_mem_relva(paddr, va);
203 	} else {
204 		/* Direct map, just perform I/O. */
205 		vaddr += offset;
206 		error = uiomove((void *)vaddr, len, uio);
207 	}
208 	return error;
209 }
210 
211 /*
212  * dev_kmem_readwrite: helper for DEV_KMEM (/dev/kmem) case of R/W.
213  */
214 static int
215 dev_kmem_readwrite(struct uio *uio, struct iovec *iov)
216 {
217 	void *addr;
218 	size_t len, offset;
219 	vm_prot_t prot;
220 	int error;
221 	bool md_kva;
222 
223 	/* Check for wrap around. */
224 	addr = (void *)(intptr_t)uio->uio_offset;
225 	if ((uintptr_t)addr != uio->uio_offset) {
226 		return EFAULT;
227 	}
228 	/*
229 	 * Handle non-page aligned offset.
230 	 * Otherwise, we operate in page-by-page basis.
231 	 */
232 	offset = uio->uio_offset & PAGE_MASK;
233 	len = MIN(uio->uio_resid, PAGE_SIZE - offset);
234 	prot = (uio->uio_rw == UIO_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
235 
236 	md_kva = false;
237 
238 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_IO
239 	paddr_t paddr;
240 	/* MD case: is this is a directly mapped address? */
241 	if (mm_md_direct_mapped_io(addr, &paddr)) {
242 		/* If so, validate physical address. */
243 		error = mm_md_physacc(paddr, prot);
244 		if (error) {
245 			return error;
246 		}
247 		md_kva = true;
248 	}
249 #endif
250 	if (!md_kva) {
251 		bool checked = false;
252 
253 #ifdef __HAVE_MM_MD_KERNACC
254 		/* MD check for the address. */
255 		error = mm_md_kernacc(addr, prot, &checked);
256 		if (error) {
257 			return error;
258 		}
259 #endif
260 		/* UVM check for the address (unless MD indicated to not). */
261 		if (!checked && !uvm_kernacc(addr, len, prot)) {
262 			return EFAULT;
263 		}
264 	}
265 	error = uiomove(addr, len, uio);
266 	return error;
267 }
268 
269 /*
270  * dev_zero_readwrite: helper for DEV_ZERO (/dev/null) case of R/W.
271  */
272 static inline int
273 dev_zero_readwrite(struct uio *uio, struct iovec *iov)
274 {
275 	size_t len;
276 
277 	/* Nothing to do for the write case. */
278 	if (uio->uio_rw == UIO_WRITE) {
279 		uio->uio_resid = 0;
280 		return 0;
281 	}
282 	/*
283 	 * Read in page-by-page basis, caller will continue.
284 	 * Cut appropriately for a single/last-iteration cases.
285 	 */
286 	len = MIN(iov->iov_len, PAGE_SIZE);
287 	return uiomove(dev_zero_page, len, uio);
288 }
289 
290 /*
291  * mm_readwrite: general memory R/W function.
292  */
293 static int
294 mm_readwrite(dev_t dev, struct uio *uio, int flags)
295 {
296 	struct iovec *iov;
297 	int error;
298 
299 #ifdef __HAVE_MM_MD_READWRITE
300 	/* If defined - there are extra MD cases. */
301 	switch (minor(dev)) {
302 	case DEV_MEM:
303 	case DEV_KMEM:
304 	case DEV_NULL:
305 	case DEV_ZERO:
306 #if defined(COMPAT_16) && defined(__arm)
307 	case _DEV_ZERO_oARM:
308 #endif
309 		break;
310 	default:
311 		return mm_md_readwrite(dev, uio);
312 	}
313 #endif
314 	error = 0;
315 	while (uio->uio_resid > 0 && error == 0) {
316 		iov = uio->uio_iov;
317 		if (iov->iov_len == 0) {
318 			/* Processed; next I/O vector. */
319 			uio->uio_iov++;
320 			uio->uio_iovcnt--;
321 			KASSERT(uio->uio_iovcnt >= 0);
322 			continue;
323 		}
324 		/* Helper functions will process in page-by-page basis. */
325 		switch (minor(dev)) {
326 		case DEV_MEM:
327 			error = dev_mem_readwrite(uio, iov);
328 			break;
329 		case DEV_KMEM:
330 			error = dev_kmem_readwrite(uio, iov);
331 			break;
332 		case DEV_NULL:
333 			if (uio->uio_rw == UIO_WRITE) {
334 				uio->uio_resid = 0;
335 			}
336 			/* Break directly out of the loop. */
337 			return 0;
338 #if defined(COMPAT_16) && defined(__arm)
339 		case _DEV_ZERO_oARM:
340 #endif
341 		case DEV_ZERO:
342 			error = dev_zero_readwrite(uio, iov);
343 			break;
344 		default:
345 			error = ENXIO;
346 			break;
347 		}
348 	}
349 	return error;
350 }
351 
352 /*
353  * mm_mmap: general mmap() handler.
354  */
355 static paddr_t
356 mm_mmap(dev_t dev, off_t off, int acc)
357 {
358 	vm_prot_t prot;
359 
360 #ifdef __HAVE_MM_MD_MMAP
361 	/* If defined - there are extra mmap() MD cases. */
362 	switch (minor(dev)) {
363 	case DEV_MEM:
364 	case DEV_KMEM:
365 	case DEV_NULL:
366 #if defined(COMPAT_16) && defined(__arm)
367 	case _DEV_ZERO_oARM:
368 #endif
369 	case DEV_ZERO:
370 		break;
371 	default:
372 		return mm_md_mmap(dev, off, acc);
373 	}
374 #endif
375 	/*
376 	 * /dev/null does not make sense, /dev/kmem is volatile and
377 	 * /dev/zero is handled in mmap already.
378 	 */
379 	if (minor(dev) != DEV_MEM) {
380 		return -1;
381 	}
382 
383 	prot = 0;
384 	if (acc & PROT_EXEC)
385 		prot |= VM_PROT_EXECUTE;
386 	if (acc & PROT_READ)
387 		prot |= VM_PROT_READ;
388 	if (acc & PROT_WRITE)
389 		prot |= VM_PROT_WRITE;
390 
391 	/* Validate the physical address. */
392 	if (mm_md_physacc(off, prot) != 0) {
393 		return -1;
394 	}
395 	return off >> PGSHIFT;
396 }
397 
398 static int
399 mm_ioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
400 {
401 
402 	switch (cmd) {
403 	case FIONBIO:
404 		/* We never block anyway. */
405 		return 0;
406 
407 	case FIOSETOWN:
408 	case FIOGETOWN:
409 	case TIOCGPGRP:
410 	case TIOCSPGRP:
411 	case TIOCGETA:
412 		return ENOTTY;
413 
414 	case FIOASYNC:
415 		if ((*(int *)data) == 0) {
416 			return 0;
417 		}
418 		/* FALLTHROUGH */
419 	default:
420 		return EOPNOTSUPP;
421 	}
422 }
423