1 /* 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94 39 * $FreeBSD: src/sys/kern/kern_subr.c,v 1.31.2.2 2002/04/21 08:09:37 bde Exp $ 40 * $DragonFly: src/sys/kern/kern_subr.c,v 1.27 2007/01/29 20:44:02 tgen Exp $ 41 */ 42 43 #include "opt_ddb.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/proc.h> 49 #include <sys/malloc.h> 50 #include <sys/lock.h> 51 #include <sys/resourcevar.h> 52 #include <sys/sysctl.h> 53 #include <sys/uio.h> 54 #include <sys/vnode.h> 55 #include <sys/sfbuf.h> 56 #include <sys/thread2.h> 57 #include <machine/limits.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_page.h> 61 #include <vm/vm_map.h> 62 63 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV, 64 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)"); 65 66 /* 67 * UIO_READ: copy the kernelspace cp to the user or kernelspace UIO 68 * UIO_WRITE: copy the user or kernelspace UIO to the kernelspace cp 69 * 70 * For userspace UIO's, uio_td must be the current thread. 71 * 72 * The syscall interface is responsible for limiting the length to 73 * ssize_t for things like read() or write() which return the bytes 74 * read or written as ssize_t. These functions work with unsigned 75 * lengths. 76 */ 77 int 78 uiomove(caddr_t cp, size_t n, struct uio *uio) 79 { 80 struct iovec *iov; 81 size_t cnt; 82 int error = 0; 83 int save = 0; 84 int baseticks = ticks; 85 86 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, 87 ("uiomove: mode")); 88 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 89 ("uiomove proc")); 90 91 if (curproc) { 92 save = curproc->p_flag & P_DEADLKTREAT; 93 curproc->p_flag |= P_DEADLKTREAT; 94 } 95 96 while (n > 0 && uio->uio_resid) { 97 iov = uio->uio_iov; 98 cnt = iov->iov_len; 99 if (cnt == 0) { 100 uio->uio_iov++; 101 uio->uio_iovcnt--; 102 continue; 103 } 104 if (cnt > n) 105 cnt = n; 106 107 switch (uio->uio_segflg) { 108 109 case UIO_USERSPACE: 110 if (ticks - baseticks >= hogticks) { 111 uio_yield(); 112 baseticks = ticks; 113 } 114 if (uio->uio_rw == UIO_READ) 115 error = copyout(cp, iov->iov_base, cnt); 116 else 117 error = copyin(iov->iov_base, cp, cnt); 118 if (error) 119 break; 120 break; 121 122 case UIO_SYSSPACE: 123 if (uio->uio_rw == UIO_READ) 124 bcopy((caddr_t)cp, iov->iov_base, cnt); 125 else 126 bcopy(iov->iov_base, (caddr_t)cp, cnt); 127 break; 128 case UIO_NOCOPY: 129 break; 130 } 131 iov->iov_base = (char *)iov->iov_base + cnt; 132 iov->iov_len -= cnt; 133 uio->uio_resid -= cnt; 134 uio->uio_offset += cnt; 135 cp += cnt; 136 n -= cnt; 137 } 138 if (curproc) 139 curproc->p_flag = (curproc->p_flag & ~P_DEADLKTREAT) | save; 140 return (error); 141 } 142 143 /* 144 * Like uiomove() but copies zero-fill. Only allowed for UIO_READ, 145 * for obvious reasons. 146 */ 147 int 148 uiomovez(size_t n, struct uio *uio) 149 { 150 struct iovec *iov; 151 size_t cnt; 152 int error = 0; 153 154 KASSERT(uio->uio_rw == UIO_READ, ("uiomovez: mode")); 155 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 156 ("uiomove proc")); 157 158 while (n > 0 && uio->uio_resid) { 159 iov = uio->uio_iov; 160 cnt = iov->iov_len; 161 if (cnt == 0) { 162 uio->uio_iov++; 163 uio->uio_iovcnt--; 164 continue; 165 } 166 if (cnt > n) 167 cnt = n; 168 169 switch (uio->uio_segflg) { 170 case UIO_USERSPACE: 171 error = copyout(ZeroPage, iov->iov_base, cnt); 172 if (error) 173 break; 174 break; 175 case UIO_SYSSPACE: 176 bzero(iov->iov_base, cnt); 177 break; 178 case UIO_NOCOPY: 179 break; 180 } 181 iov->iov_base = (char *)iov->iov_base + cnt; 182 iov->iov_len -= cnt; 183 uio->uio_resid -= cnt; 184 uio->uio_offset += cnt; 185 n -= cnt; 186 } 187 return (error); 188 } 189 190 /* 191 * Wrapper for uiomove() that validates the arguments against a known-good 192 * kernel buffer. 193 */ 194 int 195 uiomove_frombuf(void *buf, size_t buflen, struct uio *uio) 196 { 197 size_t offset; 198 199 offset = (size_t)uio->uio_offset; 200 if ((off_t)offset != uio->uio_offset) 201 return (EINVAL); 202 if (buflen == 0 || offset >= buflen) 203 return (0); 204 return (uiomove((char *)buf + offset, buflen - offset, uio)); 205 } 206 207 /* 208 * Give next character to user as result of read. 209 */ 210 int 211 ureadc(int c, struct uio *uio) 212 { 213 struct iovec *iov; 214 char *iov_base; 215 216 again: 217 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0) 218 panic("ureadc"); 219 iov = uio->uio_iov; 220 if (iov->iov_len == 0) { 221 uio->uio_iovcnt--; 222 uio->uio_iov++; 223 goto again; 224 } 225 switch (uio->uio_segflg) { 226 227 case UIO_USERSPACE: 228 if (subyte(iov->iov_base, c) < 0) 229 return (EFAULT); 230 break; 231 232 case UIO_SYSSPACE: 233 iov_base = iov->iov_base; 234 *iov_base = c; 235 iov->iov_base = iov_base; 236 break; 237 238 case UIO_NOCOPY: 239 break; 240 } 241 iov->iov_base = (char *)iov->iov_base + 1; 242 iov->iov_len--; 243 uio->uio_resid--; 244 uio->uio_offset++; 245 return (0); 246 } 247 248 /* 249 * General routine to allocate a hash table. Make the hash table size a 250 * power of 2 greater or equal to the number of elements requested, and 251 * store the masking value in *hashmask. 252 */ 253 void * 254 hashinit(int elements, struct malloc_type *type, u_long *hashmask) 255 { 256 long hashsize; 257 LIST_HEAD(generic, generic) *hashtbl; 258 int i; 259 260 if (elements <= 0) 261 panic("hashinit: bad elements"); 262 for (hashsize = 2; hashsize < elements; hashsize <<= 1) 263 continue; 264 hashtbl = kmalloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK); 265 for (i = 0; i < hashsize; i++) 266 LIST_INIT(&hashtbl[i]); 267 *hashmask = hashsize - 1; 268 return (hashtbl); 269 } 270 271 /* 272 * This is a newer version which allocates a hash table of structures. 273 * 274 * The returned array will be zero'd. The caller is responsible for 275 * initializing the structures. 276 */ 277 void * 278 hashinit_ext(int elements, size_t size, struct malloc_type *type, 279 u_long *hashmask) 280 { 281 long hashsize; 282 void *hashtbl; 283 284 if (elements <= 0) 285 panic("hashinit: bad elements"); 286 for (hashsize = 2; hashsize < elements; hashsize <<= 1) 287 continue; 288 hashtbl = kmalloc((size_t)hashsize * size, type, M_WAITOK | M_ZERO); 289 *hashmask = hashsize - 1; 290 return (hashtbl); 291 } 292 293 static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039, 294 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653, 295 7159, 7673, 8191, 12281, 16381, 24571, 32749 }; 296 #define NPRIMES (sizeof(primes) / sizeof(primes[0])) 297 298 /* 299 * General routine to allocate a prime number sized hash table. 300 */ 301 void * 302 phashinit(int elements, struct malloc_type *type, u_long *nentries) 303 { 304 long hashsize; 305 LIST_HEAD(generic, generic) *hashtbl; 306 int i; 307 308 if (elements <= 0) 309 panic("phashinit: bad elements"); 310 for (i = 1, hashsize = primes[1]; hashsize <= elements;) { 311 i++; 312 if (i == NPRIMES) 313 break; 314 hashsize = primes[i]; 315 } 316 hashsize = primes[i - 1]; 317 hashtbl = kmalloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK); 318 for (i = 0; i < hashsize; i++) 319 LIST_INIT(&hashtbl[i]); 320 *nentries = hashsize; 321 return (hashtbl); 322 } 323 324 /* 325 * This is a newer version which allocates a hash table of structures 326 * in a prime-number size. 327 * 328 * The returned array will be zero'd. The caller is responsible for 329 * initializing the structures. 330 */ 331 void * 332 phashinit_ext(int elements, size_t size, struct malloc_type *type, 333 u_long *nentries) 334 { 335 long hashsize; 336 void *hashtbl; 337 int i; 338 339 if (elements <= 0) 340 panic("phashinit: bad elements"); 341 for (i = 1, hashsize = primes[1]; hashsize <= elements;) { 342 i++; 343 if (i == NPRIMES) 344 break; 345 hashsize = primes[i]; 346 } 347 hashsize = primes[i - 1]; 348 hashtbl = kmalloc((size_t)hashsize * size, type, M_WAITOK | M_ZERO); 349 *nentries = hashsize; 350 return (hashtbl); 351 } 352 353 /* 354 * Copyin an iovec. If the iovec array fits, use the preallocated small 355 * iovec structure. If it is too big, dynamically allocate an iovec array 356 * of sufficient size. 357 * 358 * MPSAFE 359 */ 360 int 361 iovec_copyin(struct iovec *uiov, struct iovec **kiov, struct iovec *siov, 362 size_t iov_cnt, size_t *iov_len) 363 { 364 struct iovec *iovp; 365 int error, i; 366 size_t len; 367 368 if (iov_cnt > UIO_MAXIOV) 369 return EMSGSIZE; 370 if (iov_cnt > UIO_SMALLIOV) { 371 MALLOC(*kiov, struct iovec *, sizeof(struct iovec) * iov_cnt, 372 M_IOV, M_WAITOK); 373 } else { 374 *kiov = siov; 375 } 376 error = copyin(uiov, *kiov, iov_cnt * sizeof(struct iovec)); 377 if (error == 0) { 378 *iov_len = 0; 379 for (i = 0, iovp = *kiov; i < iov_cnt; i++, iovp++) { 380 /* 381 * Check for both *iov_len overflows and out of 382 * range iovp->iov_len's. We limit to the 383 * capabilities of signed integers. 384 * 385 * GCC4 - overflow check opt requires assign/test. 386 */ 387 len = *iov_len + iovp->iov_len; 388 if (len < *iov_len) 389 error = EINVAL; 390 *iov_len = len; 391 } 392 } 393 394 /* 395 * From userland disallow iovec's which exceed the sized size 396 * limit as the system calls return ssize_t. 397 * 398 * NOTE: Internal kernel interfaces can handle the unsigned 399 * limit. 400 */ 401 if (error == 0 && (ssize_t)*iov_len < 0) 402 error = EINVAL; 403 404 if (error) 405 iovec_free(kiov, siov); 406 return (error); 407 } 408 409 410 /* 411 * Copyright (c) 2004 Alan L. Cox <alc@cs.rice.edu> 412 * Copyright (c) 1982, 1986, 1991, 1993 413 * The Regents of the University of California. All rights reserved. 414 * (c) UNIX System Laboratories, Inc. 415 * All or some portions of this file are derived from material licensed 416 * to the University of California by American Telephone and Telegraph 417 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 418 * the permission of UNIX System Laboratories, Inc. 419 * 420 * Redistribution and use in source and binary forms, with or without 421 * modification, are permitted provided that the following conditions 422 * are met: 423 * 1. Redistributions of source code must retain the above copyright 424 * notice, this list of conditions and the following disclaimer. 425 * 2. Redistributions in binary form must reproduce the above copyright 426 * notice, this list of conditions and the following disclaimer in the 427 * documentation and/or other materials provided with the distribution. 428 * 4. Neither the name of the University nor the names of its contributors 429 * may be used to endorse or promote products derived from this software 430 * without specific prior written permission. 431 * 432 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 433 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 434 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 435 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 436 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 437 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 438 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 439 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 440 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 441 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 442 * SUCH DAMAGE. 443 * 444 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94 445 * $FreeBSD: src/sys/i386/i386/uio_machdep.c,v 1.1 2004/03/21 20:28:36 alc Exp $ 446 */ 447 448 /* 449 * Implement uiomove(9) from physical memory using sf_bufs to reduce 450 * the creation and destruction of ephemeral mappings. 451 */ 452 int 453 uiomove_fromphys(vm_page_t *ma, vm_offset_t offset, size_t n, struct uio *uio) 454 { 455 struct sf_buf *sf; 456 struct thread *td = curthread; 457 struct iovec *iov; 458 void *cp; 459 vm_offset_t page_offset; 460 vm_page_t m; 461 size_t cnt; 462 int error = 0; 463 int save = 0; 464 465 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, 466 ("uiomove_fromphys: mode")); 467 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 468 ("uiomove_fromphys proc")); 469 470 crit_enter(); 471 save = td->td_flags & TDF_DEADLKTREAT; 472 td->td_flags |= TDF_DEADLKTREAT; 473 crit_exit(); 474 475 while (n > 0 && uio->uio_resid) { 476 iov = uio->uio_iov; 477 cnt = iov->iov_len; 478 if (cnt == 0) { 479 uio->uio_iov++; 480 uio->uio_iovcnt--; 481 continue; 482 } 483 if (cnt > n) 484 cnt = n; 485 page_offset = offset & PAGE_MASK; 486 cnt = min(cnt, PAGE_SIZE - page_offset); 487 m = ma[offset >> PAGE_SHIFT]; 488 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 489 cp = (char *)sf_buf_kva(sf) + page_offset; 490 switch (uio->uio_segflg) { 491 case UIO_USERSPACE: 492 /* 493 * note: removed uioyield (it was the wrong place to 494 * put it). 495 */ 496 if (uio->uio_rw == UIO_READ) 497 error = copyout(cp, iov->iov_base, cnt); 498 else 499 error = copyin(iov->iov_base, cp, cnt); 500 if (error) { 501 sf_buf_free(sf); 502 goto out; 503 } 504 break; 505 case UIO_SYSSPACE: 506 if (uio->uio_rw == UIO_READ) 507 bcopy(cp, iov->iov_base, cnt); 508 else 509 bcopy(iov->iov_base, cp, cnt); 510 break; 511 case UIO_NOCOPY: 512 break; 513 } 514 sf_buf_free(sf); 515 iov->iov_base = (char *)iov->iov_base + cnt; 516 iov->iov_len -= cnt; 517 uio->uio_resid -= cnt; 518 uio->uio_offset += cnt; 519 offset += cnt; 520 n -= cnt; 521 } 522 out: 523 if (save == 0) { 524 crit_enter(); 525 td->td_flags &= ~TDF_DEADLKTREAT; 526 crit_exit(); 527 } 528 return (error); 529 } 530 531