1*7cd05121Suwe /* $NetBSD: mm.c,v 1.25 2024/08/25 11:29:38 uwe Exp $ */ 25d823a22Schristos 35d823a22Schristos /*- 4e225b7bdSrmind * Copyright (c) 2002, 2008, 2010 The NetBSD Foundation, Inc. 55d823a22Schristos * All rights reserved. 65d823a22Schristos * 75d823a22Schristos * This code is derived from software contributed to The NetBSD Foundation 8e225b7bdSrmind * by Christos Zoulas, Joerg Sonnenberger and Mindaugas Rasiukevicius. 95d823a22Schristos * 105d823a22Schristos * Redistribution and use in source and binary forms, with or without 115d823a22Schristos * modification, are permitted provided that the following conditions 125d823a22Schristos * are met: 135d823a22Schristos * 1. Redistributions of source code must retain the above copyright 145d823a22Schristos * notice, this list of conditions and the following disclaimer. 155d823a22Schristos * 2. Redistributions in binary form must reproduce the above copyright 165d823a22Schristos * notice, this list of conditions and the following disclaimer in the 175d823a22Schristos * documentation and/or other materials provided with the distribution. 185d823a22Schristos * 195d823a22Schristos * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 205d823a22Schristos * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 215d823a22Schristos * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 225d823a22Schristos * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 235d823a22Schristos * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 245d823a22Schristos * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 255d823a22Schristos * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 265d823a22Schristos * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 275d823a22Schristos * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 285d823a22Schristos * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 295d823a22Schristos * POSSIBILITY OF SUCH DAMAGE. 305d823a22Schristos */ 315d823a22Schristos 32e225b7bdSrmind /* 33e225b7bdSrmind * Special /dev/{mem,kmem,zero,null} memory devices. 34e225b7bdSrmind */ 355d823a22Schristos 365d823a22Schristos #include <sys/cdefs.h> 37*7cd05121Suwe __KERNEL_RCSID(0, "$NetBSD: mm.c,v 1.25 2024/08/25 11:29:38 uwe Exp $"); 385d823a22Schristos 39e225b7bdSrmind #include "opt_compat_netbsd.h" 405d823a22Schristos 415d823a22Schristos #include <sys/param.h> 425d823a22Schristos #include <sys/conf.h> 435d823a22Schristos #include <sys/ioctl.h> 44e225b7bdSrmind #include <sys/mman.h> 45e225b7bdSrmind #include <sys/uio.h> 46c68af69bSoster #include <sys/termios.h> 475d823a22Schristos 48e225b7bdSrmind #include <dev/mm.h> 4977a6b82bSgehenna 50e225b7bdSrmind #include <uvm/uvm_extern.h> 51e225b7bdSrmind 52e225b7bdSrmind static void * dev_zero_page __read_mostly; 53e225b7bdSrmind static kmutex_t dev_mem_lock __cacheline_aligned; 54e225b7bdSrmind static vaddr_t dev_mem_addr __read_mostly; 55e225b7bdSrmind 56dea54605Schristos static dev_type_open(mm_open); 57e225b7bdSrmind static dev_type_read(mm_readwrite); 58e225b7bdSrmind static dev_type_mmap(mm_mmap); 59e225b7bdSrmind static dev_type_ioctl(mm_ioctl); 60e225b7bdSrmind 61e225b7bdSrmind const struct cdevsw mem_cdevsw = { 62dea54605Schristos .d_open = mm_open, 63a68f9396Sdholland .d_close = nullclose, 64a68f9396Sdholland .d_read = mm_readwrite, 65a68f9396Sdholland .d_write = mm_readwrite, 66a68f9396Sdholland .d_ioctl = mm_ioctl, 67a68f9396Sdholland .d_stop = nostop, 68a68f9396Sdholland .d_tty = notty, 69a68f9396Sdholland .d_poll = nopoll, 70a68f9396Sdholland .d_mmap = mm_mmap, 71a68f9396Sdholland .d_kqfilter = nokqfilter, 72f9228f42Sdholland .d_discard = nodiscard, 73a68f9396Sdholland .d_flag = D_MPSAFE 74e225b7bdSrmind }; 75e225b7bdSrmind 76e225b7bdSrmind #ifdef pmax /* XXX */ 77e225b7bdSrmind const struct cdevsw mem_ultrix_cdevsw = { 78a68f9396Sdholland .d_open = nullopen, 79a68f9396Sdholland .d_close = nullclose, 80a68f9396Sdholland .d_read = mm_readwrite, 81a68f9396Sdholland .d_write = mm_readwrite, 82a68f9396Sdholland .d_ioctl = mm_ioctl, 83a68f9396Sdholland .d_stop = nostop, 84a68f9396Sdholland .d_tty = notty, 85a68f9396Sdholland .d_poll = nopoll, 86a68f9396Sdholland .d_mmap = mm_mmap, 87a68f9396Sdholland .d_kqfilter = nokqfilter, 88f9228f42Sdholland .d_discard = nodiscard, 89a68f9396Sdholland .d_flag = D_MPSAFE 90e225b7bdSrmind }; 91e225b7bdSrmind #endif 92e225b7bdSrmind 93dea54605Schristos static int 94dea54605Schristos mm_open(dev_t dev, int flag, int mode, struct lwp *l) 95dea54605Schristos { 96dea54605Schristos #ifdef __HAVE_MM_MD_OPEN 97dea54605Schristos int error; 98dea54605Schristos if ((error = mm_md_open(dev, flag, mode, l)) != 0) 99dea54605Schristos return error; 100dea54605Schristos #endif 101dea54605Schristos l->l_proc->p_flag |= PK_KMEM; 102dea54605Schristos return 0; 103dea54605Schristos } 104dea54605Schristos 105e225b7bdSrmind /* 106e225b7bdSrmind * mm_init: initialize memory device driver. 107e225b7bdSrmind */ 108e225b7bdSrmind void 109e225b7bdSrmind mm_init(void) 1105d823a22Schristos { 111e225b7bdSrmind vaddr_t pg; 112e225b7bdSrmind 113e225b7bdSrmind mutex_init(&dev_mem_lock, MUTEX_DEFAULT, IPL_NONE); 114e225b7bdSrmind 115e225b7bdSrmind /* Read-only zero-page. */ 116e225b7bdSrmind pg = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO); 117e225b7bdSrmind KASSERT(pg != 0); 118e225b7bdSrmind pmap_protect(pmap_kernel(), pg, pg + PAGE_SIZE, VM_PROT_READ); 119e225b7bdSrmind pmap_update(pmap_kernel()); 120e225b7bdSrmind dev_zero_page = (void *)pg; 121e225b7bdSrmind 122e225b7bdSrmind #ifndef __HAVE_MM_MD_CACHE_ALIASING 123e225b7bdSrmind /* KVA for mappings during I/O. */ 124e225b7bdSrmind dev_mem_addr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 125e225b7bdSrmind UVM_KMF_VAONLY|UVM_KMF_WAITVA); 126e225b7bdSrmind KASSERT(dev_mem_addr != 0); 127e225b7bdSrmind #else 128e225b7bdSrmind dev_mem_addr = 0; 129e225b7bdSrmind #endif 130e225b7bdSrmind } 131e225b7bdSrmind 132e225b7bdSrmind 133e225b7bdSrmind /* 134e225b7bdSrmind * dev_mem_getva: get a special virtual address. If architecture requires, 135e225b7bdSrmind * allocate VA according to PA, which avoids cache-aliasing issues. Use a 136e225b7bdSrmind * constant, general mapping address otherwise. 137e225b7bdSrmind */ 138e225b7bdSrmind static inline vaddr_t 13906058c33Smatt dev_mem_getva(paddr_t pa, int color) 140e225b7bdSrmind { 141e225b7bdSrmind #ifdef __HAVE_MM_MD_CACHE_ALIASING 14240300a6dSmatt return uvm_km_alloc(kernel_map, PAGE_SIZE, 14306058c33Smatt color & uvmexp.colormask, 14440300a6dSmatt UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH); 145e225b7bdSrmind #else 146e225b7bdSrmind return dev_mem_addr; 147e225b7bdSrmind #endif 148e225b7bdSrmind } 149e225b7bdSrmind 150e225b7bdSrmind static inline void 151e225b7bdSrmind dev_mem_relva(paddr_t pa, vaddr_t va) 152e225b7bdSrmind { 153e225b7bdSrmind #ifdef __HAVE_MM_MD_CACHE_ALIASING 15440300a6dSmatt uvm_km_free(kernel_map, va, PAGE_SIZE, UVM_KMF_VAONLY); 155e225b7bdSrmind #else 156e225b7bdSrmind KASSERT(dev_mem_addr == va); 157e225b7bdSrmind #endif 158e225b7bdSrmind } 159e225b7bdSrmind 160e225b7bdSrmind /* 161e225b7bdSrmind * dev_kmem_readwrite: helper for DEV_MEM (/dev/mem) case of R/W. 162e225b7bdSrmind */ 163e225b7bdSrmind static int 164e225b7bdSrmind dev_mem_readwrite(struct uio *uio, struct iovec *iov) 165e225b7bdSrmind { 166e225b7bdSrmind paddr_t paddr; 167e225b7bdSrmind vaddr_t vaddr; 168e225b7bdSrmind vm_prot_t prot; 169e225b7bdSrmind size_t len, offset; 170e225b7bdSrmind bool have_direct; 171e225b7bdSrmind int error; 17206058c33Smatt int color = 0; 173e225b7bdSrmind 174e225b7bdSrmind /* Check for wrap around. */ 175b4bcaf04Sryo if ((uintptr_t)uio->uio_offset != uio->uio_offset) { 176e225b7bdSrmind return EFAULT; 177e225b7bdSrmind } 178e225b7bdSrmind paddr = uio->uio_offset & ~PAGE_MASK; 179e225b7bdSrmind prot = (uio->uio_rw == UIO_WRITE) ? VM_PROT_WRITE : VM_PROT_READ; 180e225b7bdSrmind error = mm_md_physacc(paddr, prot); 181e225b7bdSrmind if (error) { 182e225b7bdSrmind return error; 183e225b7bdSrmind } 184e225b7bdSrmind offset = uio->uio_offset & PAGE_MASK; 185e225b7bdSrmind len = MIN(uio->uio_resid, PAGE_SIZE - offset); 186e225b7bdSrmind 18706058c33Smatt #ifdef __HAVE_MM_MD_CACHE_ALIASING 18806058c33Smatt have_direct = mm_md_page_color(paddr, &color); 18906058c33Smatt #else 19006058c33Smatt have_direct = true; 19106058c33Smatt color = 0; 19206058c33Smatt #endif 19306058c33Smatt 194e225b7bdSrmind #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 195e225b7bdSrmind /* Is physical address directly mapped? Return VA. */ 19606058c33Smatt if (have_direct) 197e225b7bdSrmind have_direct = mm_md_direct_mapped_phys(paddr, &vaddr); 198e225b7bdSrmind #else 199fd3766f7Sjoerg vaddr = 0; 200e225b7bdSrmind have_direct = false; 201e225b7bdSrmind #endif 202e225b7bdSrmind if (!have_direct) { 203e225b7bdSrmind /* Get a special virtual address. */ 20406058c33Smatt const vaddr_t va = dev_mem_getva(paddr, color); 205e225b7bdSrmind 206e225b7bdSrmind /* Map selected KVA to physical address. */ 207e225b7bdSrmind mutex_enter(&dev_mem_lock); 208e225b7bdSrmind pmap_kenter_pa(va, paddr, prot, 0); 209e225b7bdSrmind pmap_update(pmap_kernel()); 210e225b7bdSrmind 211e225b7bdSrmind /* Perform I/O. */ 212e225b7bdSrmind vaddr = va + offset; 213e225b7bdSrmind error = uiomove((void *)vaddr, len, uio); 214e225b7bdSrmind 215e225b7bdSrmind /* Unmap, flush before unlock. */ 216e225b7bdSrmind pmap_kremove(va, PAGE_SIZE); 217e225b7bdSrmind pmap_update(pmap_kernel()); 218e225b7bdSrmind mutex_exit(&dev_mem_lock); 219e225b7bdSrmind 220e225b7bdSrmind /* "Release" the virtual address. */ 221e225b7bdSrmind dev_mem_relva(paddr, va); 222e225b7bdSrmind } else { 223e225b7bdSrmind /* Direct map, just perform I/O. */ 224e225b7bdSrmind vaddr += offset; 225e225b7bdSrmind error = uiomove((void *)vaddr, len, uio); 226e225b7bdSrmind } 227e225b7bdSrmind return error; 228e225b7bdSrmind } 229e225b7bdSrmind 230e225b7bdSrmind /* 231e225b7bdSrmind * dev_kmem_readwrite: helper for DEV_KMEM (/dev/kmem) case of R/W. 232e225b7bdSrmind */ 233e225b7bdSrmind static int 234e225b7bdSrmind dev_kmem_readwrite(struct uio *uio, struct iovec *iov) 235e225b7bdSrmind { 236e225b7bdSrmind void *addr; 237e225b7bdSrmind size_t len, offset; 238e225b7bdSrmind vm_prot_t prot; 239e225b7bdSrmind int error; 240e225b7bdSrmind bool md_kva; 241e225b7bdSrmind 242e225b7bdSrmind /* Check for wrap around. */ 243e225b7bdSrmind addr = (void *)(intptr_t)uio->uio_offset; 244e225b7bdSrmind if ((uintptr_t)addr != uio->uio_offset) { 245e225b7bdSrmind return EFAULT; 246e225b7bdSrmind } 247e225b7bdSrmind /* 248e225b7bdSrmind * Handle non-page aligned offset. 249e225b7bdSrmind * Otherwise, we operate in page-by-page basis. 250e225b7bdSrmind */ 251e225b7bdSrmind offset = uio->uio_offset & PAGE_MASK; 252e225b7bdSrmind len = MIN(uio->uio_resid, PAGE_SIZE - offset); 253e225b7bdSrmind prot = (uio->uio_rw == UIO_WRITE) ? VM_PROT_WRITE : VM_PROT_READ; 254e225b7bdSrmind 255e225b7bdSrmind md_kva = false; 256e225b7bdSrmind 257e225b7bdSrmind #ifdef __HAVE_MM_MD_DIRECT_MAPPED_IO 258e225b7bdSrmind paddr_t paddr; 259e225b7bdSrmind /* MD case: is this is a directly mapped address? */ 260e225b7bdSrmind if (mm_md_direct_mapped_io(addr, &paddr)) { 261e225b7bdSrmind /* If so, validate physical address. */ 262e225b7bdSrmind error = mm_md_physacc(paddr, prot); 263e225b7bdSrmind if (error) { 264e225b7bdSrmind return error; 265e225b7bdSrmind } 266e225b7bdSrmind md_kva = true; 267e225b7bdSrmind } 268e225b7bdSrmind #endif 269e225b7bdSrmind if (!md_kva) { 270e225b7bdSrmind bool checked = false; 271e225b7bdSrmind 272e225b7bdSrmind #ifdef __HAVE_MM_MD_KERNACC 273e225b7bdSrmind /* MD check for the address. */ 274e225b7bdSrmind error = mm_md_kernacc(addr, prot, &checked); 275e225b7bdSrmind if (error) { 276e225b7bdSrmind return error; 277e225b7bdSrmind } 278e225b7bdSrmind #endif 279e225b7bdSrmind /* UVM check for the address (unless MD indicated to not). */ 280e225b7bdSrmind if (!checked && !uvm_kernacc(addr, len, prot)) { 281e225b7bdSrmind return EFAULT; 282e225b7bdSrmind } 283e225b7bdSrmind } 284e225b7bdSrmind error = uiomove(addr, len, uio); 285e225b7bdSrmind return error; 286e225b7bdSrmind } 287e225b7bdSrmind 288e225b7bdSrmind /* 289e225b7bdSrmind * dev_zero_readwrite: helper for DEV_ZERO (/dev/null) case of R/W. 290e225b7bdSrmind */ 291e225b7bdSrmind static inline int 292e225b7bdSrmind dev_zero_readwrite(struct uio *uio, struct iovec *iov) 293e225b7bdSrmind { 294e225b7bdSrmind size_t len; 295e225b7bdSrmind 296e225b7bdSrmind /* Nothing to do for the write case. */ 297e225b7bdSrmind if (uio->uio_rw == UIO_WRITE) { 298e225b7bdSrmind uio->uio_resid = 0; 2995d823a22Schristos return 0; 300e225b7bdSrmind } 301e225b7bdSrmind /* 302e225b7bdSrmind * Read in page-by-page basis, caller will continue. 303e225b7bdSrmind * Cut appropriately for a single/last-iteration cases. 304e225b7bdSrmind */ 305e225b7bdSrmind len = MIN(iov->iov_len, PAGE_SIZE); 306e225b7bdSrmind return uiomove(dev_zero_page, len, uio); 307e225b7bdSrmind } 308e225b7bdSrmind 309e225b7bdSrmind /* 310e225b7bdSrmind * mm_readwrite: general memory R/W function. 311e225b7bdSrmind */ 312e225b7bdSrmind static int 313e225b7bdSrmind mm_readwrite(dev_t dev, struct uio *uio, int flags) 314e225b7bdSrmind { 315e225b7bdSrmind struct iovec *iov; 316e225b7bdSrmind int error; 317e225b7bdSrmind 318e225b7bdSrmind #ifdef __HAVE_MM_MD_READWRITE 319e225b7bdSrmind /* If defined - there are extra MD cases. */ 320e225b7bdSrmind switch (minor(dev)) { 321e225b7bdSrmind case DEV_MEM: 322e225b7bdSrmind case DEV_KMEM: 323e225b7bdSrmind case DEV_NULL: 324e225b7bdSrmind case DEV_ZERO: 325e225b7bdSrmind #if defined(COMPAT_16) && defined(__arm) 326e225b7bdSrmind case _DEV_ZERO_oARM: 327e225b7bdSrmind #endif 328e225b7bdSrmind break; 329e225b7bdSrmind default: 330e225b7bdSrmind return mm_md_readwrite(dev, uio); 331e225b7bdSrmind } 332e225b7bdSrmind #endif 333e225b7bdSrmind error = 0; 334e225b7bdSrmind while (uio->uio_resid > 0 && error == 0) { 335e225b7bdSrmind iov = uio->uio_iov; 336e225b7bdSrmind if (iov->iov_len == 0) { 337e225b7bdSrmind /* Processed; next I/O vector. */ 338e225b7bdSrmind uio->uio_iov++; 339e225b7bdSrmind uio->uio_iovcnt--; 340e225b7bdSrmind KASSERT(uio->uio_iovcnt >= 0); 341e225b7bdSrmind continue; 342e225b7bdSrmind } 343e225b7bdSrmind /* Helper functions will process in page-by-page basis. */ 344e225b7bdSrmind switch (minor(dev)) { 345e225b7bdSrmind case DEV_MEM: 346e225b7bdSrmind error = dev_mem_readwrite(uio, iov); 347e225b7bdSrmind break; 348e225b7bdSrmind case DEV_KMEM: 349e225b7bdSrmind error = dev_kmem_readwrite(uio, iov); 350e225b7bdSrmind break; 351e225b7bdSrmind case DEV_NULL: 352e225b7bdSrmind if (uio->uio_rw == UIO_WRITE) { 353e225b7bdSrmind uio->uio_resid = 0; 354e225b7bdSrmind } 355e225b7bdSrmind /* Break directly out of the loop. */ 356e225b7bdSrmind return 0; 3572ebfbceaSchristos case DEV_FULL: 3582ebfbceaSchristos if (uio->uio_rw == UIO_WRITE) { 3592ebfbceaSchristos return ENOSPC; 3602ebfbceaSchristos } 361e225b7bdSrmind #if defined(COMPAT_16) && defined(__arm) 3624fc46031Smrg /* FALLTHROUGH */ 363e225b7bdSrmind case _DEV_ZERO_oARM: 364e225b7bdSrmind #endif 3654fc46031Smrg /* FALLTHROUGH */ 366e225b7bdSrmind case DEV_ZERO: 367e225b7bdSrmind error = dev_zero_readwrite(uio, iov); 368e225b7bdSrmind break; 369e225b7bdSrmind default: 370e225b7bdSrmind error = ENXIO; 371e225b7bdSrmind break; 372e225b7bdSrmind } 373e225b7bdSrmind } 374e225b7bdSrmind return error; 375e225b7bdSrmind } 376e225b7bdSrmind 377e225b7bdSrmind /* 378e225b7bdSrmind * mm_mmap: general mmap() handler. 379e225b7bdSrmind */ 380e225b7bdSrmind static paddr_t 381e225b7bdSrmind mm_mmap(dev_t dev, off_t off, int acc) 382e225b7bdSrmind { 383e225b7bdSrmind vm_prot_t prot; 384e225b7bdSrmind 385e225b7bdSrmind #ifdef __HAVE_MM_MD_MMAP 386e225b7bdSrmind /* If defined - there are extra mmap() MD cases. */ 387e225b7bdSrmind switch (minor(dev)) { 388e225b7bdSrmind case DEV_MEM: 389e225b7bdSrmind case DEV_KMEM: 390e225b7bdSrmind case DEV_NULL: 391e225b7bdSrmind #if defined(COMPAT_16) && defined(__arm) 392e225b7bdSrmind case _DEV_ZERO_oARM: 393e225b7bdSrmind #endif 394e225b7bdSrmind case DEV_ZERO: 395e225b7bdSrmind break; 396e225b7bdSrmind default: 397e225b7bdSrmind return mm_md_mmap(dev, off, acc); 398e225b7bdSrmind } 399e225b7bdSrmind #endif 400e225b7bdSrmind /* 401e225b7bdSrmind * /dev/null does not make sense, /dev/kmem is volatile and 402e225b7bdSrmind * /dev/zero is handled in mmap already. 403e225b7bdSrmind */ 404e225b7bdSrmind if (minor(dev) != DEV_MEM) { 405e225b7bdSrmind return -1; 406e225b7bdSrmind } 407e225b7bdSrmind 408e225b7bdSrmind prot = 0; 409e225b7bdSrmind if (acc & PROT_EXEC) 410e225b7bdSrmind prot |= VM_PROT_EXECUTE; 411e225b7bdSrmind if (acc & PROT_READ) 412e225b7bdSrmind prot |= VM_PROT_READ; 413e225b7bdSrmind if (acc & PROT_WRITE) 414e225b7bdSrmind prot |= VM_PROT_WRITE; 415e225b7bdSrmind 416e225b7bdSrmind /* Validate the physical address. */ 417e225b7bdSrmind if (mm_md_physacc(off, prot) != 0) { 418e225b7bdSrmind return -1; 419e225b7bdSrmind } 420e225b7bdSrmind return off >> PGSHIFT; 421e225b7bdSrmind } 422e225b7bdSrmind 423e225b7bdSrmind static int 424e225b7bdSrmind mm_ioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 425e225b7bdSrmind { 426e225b7bdSrmind 427e225b7bdSrmind switch (cmd) { 428e225b7bdSrmind case FIONBIO: 429e225b7bdSrmind /* We never block anyway. */ 430e225b7bdSrmind return 0; 431e225b7bdSrmind 4327cea8a13Sjdolecek case FIOSETOWN: 4337cea8a13Sjdolecek case FIOGETOWN: 4345d823a22Schristos case TIOCGPGRP: 4355d823a22Schristos case TIOCSPGRP: 436f9657a08Schristos case TIOCGETA: 4375d823a22Schristos return ENOTTY; 438e225b7bdSrmind 4395d823a22Schristos case FIOASYNC: 440e225b7bdSrmind if ((*(int *)data) == 0) { 4415d823a22Schristos return 0; 4425d823a22Schristos } 443e225b7bdSrmind /* FALLTHROUGH */ 444e225b7bdSrmind default: 4455d823a22Schristos return EOPNOTSUPP; 4465d823a22Schristos } 4475d823a22Schristos } 448