1 /* $OpenBSD: mem.c,v 1.39 2024/12/30 02:46:00 guenther Exp $ */ 2 /* 3 * Copyright (c) 1988 University of Utah. 4 * Copyright (c) 1982, 1986, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * @(#)mem.c 8.3 (Berkeley) 1/12/94 40 */ 41 42 /* 43 * Memory special file 44 */ 45 46 #include <sys/param.h> 47 #include <sys/buf.h> 48 #include <sys/filio.h> 49 #include <sys/systm.h> 50 #include <sys/uio.h> 51 #include <sys/ioccom.h> 52 #include <sys/malloc.h> 53 #include <sys/memrange.h> 54 #include <sys/atomic.h> 55 56 #include <machine/cpu.h> 57 58 #include <uvm/uvm_extern.h> 59 60 caddr_t zeropage; 61 extern int start, end, etext; 62 63 /* open counter for aperture */ 64 #ifdef APERTURE 65 static int ap_open_count = 0; 66 extern int allowaperture; 67 68 #define VGA_START 0xA0000 69 #define BIOS_END 0xFFFFF 70 #endif 71 72 #ifdef MTRR 73 struct mem_range_softc mem_range_softc; 74 int mem_ioctl(dev_t, u_long, caddr_t, int, struct proc *); 75 int mem_range_attr_get(struct mem_range_desc *, int *); 76 int mem_range_attr_set(struct mem_range_desc *, int *); 77 #endif 78 79 80 int 81 mmopen(dev_t dev, int flag, int mode, struct proc *p) 82 { 83 extern int allowkmem; 84 85 switch (minor(dev)) { 86 case 0: 87 case 1: 88 if ((int)atomic_load_int(&securelevel) <= 0 || 89 atomic_load_int(&allowkmem)) 90 break; 91 return (EPERM); 92 case 2: 93 case 12: 94 break; 95 #ifdef APERTURE 96 case 4: 97 if (suser(p) != 0 || !allowaperture) 98 return (EPERM); 99 100 /* authorize only one simultaneous open() unless 101 * allowaperture=3 */ 102 if (ap_open_count > 0 && allowaperture < 3) 103 return (EPERM); 104 ap_open_count++; 105 break; 106 #endif 107 default: 108 return (ENXIO); 109 } 110 return (0); 111 } 112 113 int 114 mmclose(dev_t dev, int flag, int mode, struct proc *p) 115 { 116 #ifdef APERTURE 117 if (minor(dev) == 4) 118 ap_open_count = 0; 119 #endif 120 return (0); 121 } 122 123 int 124 mmrw(dev_t dev, struct uio *uio, int flags) 125 { 126 extern vaddr_t kern_end; 127 vaddr_t v; 128 size_t c; 129 struct iovec *iov; 130 int error = 0; 131 132 while (uio->uio_resid > 0 && error == 0) { 133 iov = uio->uio_iov; 134 if (iov->iov_len == 0) { 135 uio->uio_iov++; 136 uio->uio_iovcnt--; 137 if (uio->uio_iovcnt < 0) 138 panic("mmrw"); 139 continue; 140 } 141 switch (minor(dev)) { 142 143 /* minor device 0 is physical memory */ 144 case 0: 145 v = PMAP_DIRECT_MAP(uio->uio_offset); 146 error = uiomove((caddr_t)v, uio->uio_resid, uio); 147 continue; 148 149 /* minor device 1 is kernel memory */ 150 case 1: 151 v = uio->uio_offset; 152 c = ulmin(iov->iov_len, MAXPHYS); 153 if (v >= (vaddr_t)&start && v < kern_end - c) { 154 if (v < (vaddr_t)&etext - c && 155 uio->uio_rw == UIO_WRITE) 156 return EFAULT; 157 } else if ((!uvm_kernacc((caddr_t)v, c, 158 uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) && 159 (v < PMAP_DIRECT_BASE || v > PMAP_DIRECT_END - c)) 160 return (EFAULT); 161 error = uiomove((caddr_t)v, c, uio); 162 continue; 163 164 /* minor device 2 is /dev/null */ 165 case 2: 166 if (uio->uio_rw == UIO_WRITE) 167 uio->uio_resid = 0; 168 return (0); 169 170 /* minor device 12 is /dev/zero */ 171 case 12: 172 if (uio->uio_rw == UIO_WRITE) { 173 c = iov->iov_len; 174 break; 175 } 176 if (zeropage == NULL) 177 zeropage = 178 malloc(PAGE_SIZE, M_TEMP, M_WAITOK|M_ZERO); 179 c = ulmin(iov->iov_len, PAGE_SIZE); 180 error = uiomove(zeropage, c, uio); 181 continue; 182 183 default: 184 return (ENXIO); 185 } 186 iov->iov_base += c; 187 iov->iov_len -= c; 188 uio->uio_offset += c; 189 uio->uio_resid -= c; 190 } 191 192 return (error); 193 } 194 195 paddr_t 196 mmmmap(dev_t dev, off_t off, int prot) 197 { 198 struct proc *p = curproc; /* XXX */ 199 200 switch (minor(dev)) { 201 /* minor device 0 is physical memory */ 202 case 0: 203 if (suser(p) != 0 && amd64_pa_used(off)) 204 return -1; 205 return off; 206 207 #ifdef APERTURE 208 /* minor device 4 is aperture driver */ 209 case 4: 210 /* Check if a write combining mapping is requested. */ 211 if (off >= MEMRANGE_WC_RANGE) 212 off = (off - MEMRANGE_WC_RANGE) | PMAP_WC; 213 214 switch (allowaperture) { 215 case 1: 216 /* Allow mapping of the VGA framebuffer & BIOS only */ 217 if ((off >= VGA_START && off <= BIOS_END) || 218 !amd64_pa_used(off)) 219 return off; 220 else 221 return -1; 222 case 2: 223 case 3: 224 /* Allow mapping of the whole 1st megabyte 225 for x86emu */ 226 if (off <= BIOS_END || !amd64_pa_used(off)) 227 return off; 228 else 229 return -1; 230 default: 231 return -1; 232 } 233 234 #endif 235 default: 236 return -1; 237 } 238 } 239 240 int 241 mmioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p) 242 { 243 switch (cmd) { 244 case FIOASYNC: 245 /* handled by fd layer */ 246 return 0; 247 } 248 249 #ifdef MTRR 250 switch (minor(dev)) { 251 case 0: 252 case 4: 253 return mem_ioctl(dev, cmd, data, flags, p); 254 } 255 #endif 256 return (ENOTTY); 257 } 258 259 #ifdef MTRR 260 /* 261 * Operations for changing memory attributes. 262 * 263 * This is basically just an ioctl shim for mem_range_attr_get 264 * and mem_range_attr_set. 265 */ 266 int 267 mem_ioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p) 268 { 269 int nd, error = 0; 270 struct mem_range_op *mo = (struct mem_range_op *)data; 271 struct mem_range_desc *md; 272 273 /* is this for us? */ 274 if ((cmd != MEMRANGE_GET) && 275 (cmd != MEMRANGE_SET)) 276 return (ENOTTY); 277 278 /* any chance we can handle this? */ 279 if (mem_range_softc.mr_op == NULL) 280 return (EOPNOTSUPP); 281 /* do we have any descriptors? */ 282 if (mem_range_softc.mr_ndesc == 0) 283 return (ENXIO); 284 285 switch (cmd) { 286 case MEMRANGE_GET: 287 nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); 288 if (nd > 0) { 289 md = mallocarray(nd, sizeof(struct mem_range_desc), 290 M_MEMDESC, M_WAITOK); 291 error = mem_range_attr_get(md, &nd); 292 if (!error) 293 error = copyout(md, mo->mo_desc, 294 nd * sizeof(struct mem_range_desc)); 295 free(md, M_MEMDESC, nd * sizeof(struct mem_range_desc)); 296 } else { 297 nd = mem_range_softc.mr_ndesc; 298 } 299 mo->mo_arg[0] = nd; 300 break; 301 302 case MEMRANGE_SET: 303 md = malloc(sizeof(struct mem_range_desc), M_MEMDESC, M_WAITOK); 304 error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); 305 /* clamp description string */ 306 md->mr_owner[sizeof(md->mr_owner) - 1] = 0; 307 if (error == 0) 308 error = mem_range_attr_set(md, &mo->mo_arg[0]); 309 free(md, M_MEMDESC, sizeof(struct mem_range_desc)); 310 break; 311 } 312 return (error); 313 } 314 315 /* 316 * Implementation-neutral, kernel-callable functions for manipulating 317 * memory range attributes. 318 */ 319 int 320 mem_range_attr_get(struct mem_range_desc *mrd, int *arg) 321 { 322 /* can we handle this? */ 323 if (mem_range_softc.mr_op == NULL) 324 return (EOPNOTSUPP); 325 326 if (*arg == 0) { 327 *arg = mem_range_softc.mr_ndesc; 328 } else { 329 memcpy(mrd, mem_range_softc.mr_desc, (*arg) * sizeof(struct mem_range_desc)); 330 } 331 return (0); 332 } 333 334 int 335 mem_range_attr_set(struct mem_range_desc *mrd, int *arg) 336 { 337 /* can we handle this? */ 338 if (mem_range_softc.mr_op == NULL) 339 return (EOPNOTSUPP); 340 341 return (mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg)); 342 } 343 #endif /* MTRR */ 344