1 /* $NetBSD: netbsd32_fs.c,v 1.4 2001/04/09 10:22:01 jdolecek Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2001 Matthew R. Green 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #if defined(_KERNEL) && !defined(_LKM) 32 #include "opt_ktrace.h" 33 #endif 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/mount.h> 39 #include <sys/socket.h> 40 #include <sys/socketvar.h> 41 #include <sys/stat.h> 42 #include <sys/time.h> 43 #include <sys/ktrace.h> 44 #include <sys/resourcevar.h> 45 #include <sys/vnode.h> 46 #include <sys/file.h> 47 #include <sys/filedesc.h> 48 #include <sys/namei.h> 49 #include <sys/syscallargs.h> 50 #include <sys/proc.h> 51 52 #include <compat/netbsd32/netbsd32.h> 53 #include <compat/netbsd32/netbsd32_syscallargs.h> 54 #include <compat/netbsd32/netbsd32_conv.h> 55 56 57 static int dofilereadv32 __P((struct proc *, int, struct file *, struct netbsd32_iovec *, 58 int, off_t *, int, register_t *)); 59 static int dofilewritev32 __P((struct proc *, int, struct file *, struct netbsd32_iovec *, 60 int, off_t *, int, register_t *)); 61 static int change_utimes32 __P((struct vnode *, netbsd32_timevalp_t, struct proc *)); 62 63 int 64 netbsd32_getfsstat(p, v, retval) 65 struct proc *p; 66 void *v; 67 register_t *retval; 68 { 69 struct netbsd32_getfsstat_args /* { 70 syscallarg(netbsd32_statfsp_t) buf; 71 syscallarg(netbsd32_long) bufsize; 72 syscallarg(int) flags; 73 } */ *uap = v; 74 struct mount *mp, *nmp; 75 struct statfs *sp; 76 struct netbsd32_statfs sb32; 77 caddr_t sfsp; 78 long count, maxcount, error; 79 80 maxcount = SCARG(uap, bufsize) / sizeof(struct netbsd32_statfs); 81 sfsp = (caddr_t)(u_long)SCARG(uap, buf); 82 simple_lock(&mountlist_slock); 83 count = 0; 84 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 85 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) { 86 nmp = mp->mnt_list.cqe_next; 87 continue; 88 } 89 if (sfsp && count < maxcount) { 90 sp = &mp->mnt_stat; 91 /* 92 * If MNT_NOWAIT or MNT_LAZY is specified, do not 93 * refresh the fsstat cache. MNT_WAIT or MNT_LAXY 94 * overrides MNT_NOWAIT. 95 */ 96 if (SCARG(uap, flags) != MNT_NOWAIT && 97 SCARG(uap, flags) != MNT_LAZY && 98 (SCARG(uap, flags) == MNT_WAIT || 99 SCARG(uap, flags) == 0) && 100 (error = VFS_STATFS(mp, sp, p)) != 0) { 101 simple_lock(&mountlist_slock); 102 nmp = mp->mnt_list.cqe_next; 103 vfs_unbusy(mp); 104 continue; 105 } 106 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 107 sp->f_oflags = sp->f_flags & 0xffff; 108 netbsd32_from_statfs(sp, &sb32); 109 error = copyout(&sb32, sfsp, sizeof(sb32)); 110 if (error) { 111 vfs_unbusy(mp); 112 return (error); 113 } 114 sfsp += sizeof(sb32); 115 } 116 count++; 117 simple_lock(&mountlist_slock); 118 nmp = mp->mnt_list.cqe_next; 119 vfs_unbusy(mp); 120 } 121 simple_unlock(&mountlist_slock); 122 if (sfsp && count > maxcount) 123 *retval = maxcount; 124 else 125 *retval = count; 126 return (0); 127 } 128 129 int 130 netbsd32_readv(p, v, retval) 131 struct proc *p; 132 void *v; 133 register_t *retval; 134 { 135 struct netbsd32_readv_args /* { 136 syscallarg(int) fd; 137 syscallarg(const netbsd32_iovecp_t) iovp; 138 syscallarg(int) iovcnt; 139 } */ *uap = v; 140 int fd = SCARG(uap, fd); 141 struct file *fp; 142 struct filedesc *fdp = p->p_fd; 143 144 if ((u_int)fd >= fdp->fd_nfiles || 145 (fp = fdp->fd_ofiles[fd]) == NULL || 146 (fp->f_flag & FREAD) == 0) 147 return (EBADF); 148 149 return (dofilereadv32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp), 150 SCARG(uap, iovcnt), &fp->f_offset, FOF_UPDATE_OFFSET, retval)); 151 } 152 153 /* Damn thing copies in the iovec! */ 154 int 155 dofilereadv32(p, fd, fp, iovp, iovcnt, offset, flags, retval) 156 struct proc *p; 157 int fd; 158 struct file *fp; 159 struct netbsd32_iovec *iovp; 160 int iovcnt; 161 off_t *offset; 162 int flags; 163 register_t *retval; 164 { 165 struct uio auio; 166 struct iovec *iov; 167 struct iovec *needfree; 168 struct iovec aiov[UIO_SMALLIOV]; 169 long i, cnt, error = 0; 170 u_int iovlen; 171 #ifdef KTRACE 172 struct iovec *ktriov = NULL; 173 #endif 174 175 /* note: can't use iovlen until iovcnt is validated */ 176 iovlen = iovcnt * sizeof(struct iovec); 177 if ((u_int)iovcnt > UIO_SMALLIOV) { 178 if ((u_int)iovcnt > IOV_MAX) 179 return (EINVAL); 180 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK); 181 needfree = iov; 182 } else if ((u_int)iovcnt > 0) { 183 iov = aiov; 184 needfree = NULL; 185 } else 186 return (EINVAL); 187 188 auio.uio_iov = iov; 189 auio.uio_iovcnt = iovcnt; 190 auio.uio_rw = UIO_READ; 191 auio.uio_segflg = UIO_USERSPACE; 192 auio.uio_procp = p; 193 error = netbsd32_to_iovecin(iovp, iov, iovcnt); 194 if (error) 195 goto done; 196 auio.uio_resid = 0; 197 for (i = 0; i < iovcnt; i++) { 198 auio.uio_resid += iov->iov_len; 199 /* 200 * Reads return ssize_t because -1 is returned on error. 201 * Therefore we must restrict the length to SSIZE_MAX to 202 * avoid garbage return values. 203 */ 204 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) { 205 error = EINVAL; 206 goto done; 207 } 208 iov++; 209 } 210 #ifdef KTRACE 211 /* 212 * if tracing, save a copy of iovec 213 */ 214 if (KTRPOINT(p, KTR_GENIO)) { 215 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK); 216 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen); 217 } 218 #endif 219 cnt = auio.uio_resid; 220 error = (*fp->f_ops->fo_read)(fp, offset, &auio, fp->f_cred, flags); 221 if (error) 222 if (auio.uio_resid != cnt && (error == ERESTART || 223 error == EINTR || error == EWOULDBLOCK)) 224 error = 0; 225 cnt -= auio.uio_resid; 226 #ifdef KTRACE 227 if (KTRPOINT(p, KTR_GENIO)) 228 if (error == 0) { 229 ktrgenio(p, fd, UIO_READ, ktriov, cnt, 230 error); 231 FREE(ktriov, M_TEMP); 232 } 233 #endif 234 *retval = cnt; 235 done: 236 if (needfree) 237 FREE(needfree, M_IOV); 238 return (error); 239 } 240 241 int 242 netbsd32_writev(p, v, retval) 243 struct proc *p; 244 void *v; 245 register_t *retval; 246 { 247 struct netbsd32_writev_args /* { 248 syscallarg(int) fd; 249 syscallarg(const netbsd32_iovecp_t) iovp; 250 syscallarg(int) iovcnt; 251 } */ *uap = v; 252 int fd = SCARG(uap, fd); 253 struct file *fp; 254 struct filedesc *fdp = p->p_fd; 255 256 if ((u_int)fd >= fdp->fd_nfiles || 257 (fp = fdp->fd_ofiles[fd]) == NULL || 258 (fp->f_flag & FWRITE) == 0) 259 return (EBADF); 260 261 return (dofilewritev32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp), 262 SCARG(uap, iovcnt), &fp->f_offset, FOF_UPDATE_OFFSET, retval)); 263 } 264 265 int 266 dofilewritev32(p, fd, fp, iovp, iovcnt, offset, flags, retval) 267 struct proc *p; 268 int fd; 269 struct file *fp; 270 struct netbsd32_iovec *iovp; 271 int iovcnt; 272 off_t *offset; 273 int flags; 274 register_t *retval; 275 { 276 struct uio auio; 277 struct iovec *iov; 278 struct iovec *needfree; 279 struct iovec aiov[UIO_SMALLIOV]; 280 long i, cnt, error = 0; 281 u_int iovlen; 282 #ifdef KTRACE 283 struct iovec *ktriov = NULL; 284 #endif 285 286 /* note: can't use iovlen until iovcnt is validated */ 287 iovlen = iovcnt * sizeof(struct iovec); 288 if ((u_int)iovcnt > UIO_SMALLIOV) { 289 if ((u_int)iovcnt > IOV_MAX) 290 return (EINVAL); 291 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK); 292 needfree = iov; 293 } else if ((u_int)iovcnt > 0) { 294 iov = aiov; 295 needfree = NULL; 296 } else 297 return (EINVAL); 298 299 auio.uio_iov = iov; 300 auio.uio_iovcnt = iovcnt; 301 auio.uio_rw = UIO_WRITE; 302 auio.uio_segflg = UIO_USERSPACE; 303 auio.uio_procp = p; 304 error = netbsd32_to_iovecin(iovp, iov, iovcnt); 305 if (error) 306 goto done; 307 auio.uio_resid = 0; 308 for (i = 0; i < iovcnt; i++) { 309 auio.uio_resid += iov->iov_len; 310 /* 311 * Writes return ssize_t because -1 is returned on error. 312 * Therefore we must restrict the length to SSIZE_MAX to 313 * avoid garbage return values. 314 */ 315 if (iov->iov_len > SSIZE_MAX || auio.uio_resid > SSIZE_MAX) { 316 error = EINVAL; 317 goto done; 318 } 319 iov++; 320 } 321 #ifdef KTRACE 322 /* 323 * if tracing, save a copy of iovec 324 */ 325 if (KTRPOINT(p, KTR_GENIO)) { 326 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK); 327 memcpy((caddr_t)ktriov, (caddr_t)auio.uio_iov, iovlen); 328 } 329 #endif 330 cnt = auio.uio_resid; 331 error = (*fp->f_ops->fo_write)(fp, offset, &auio, fp->f_cred, flags); 332 if (error) { 333 if (auio.uio_resid != cnt && (error == ERESTART || 334 error == EINTR || error == EWOULDBLOCK)) 335 error = 0; 336 if (error == EPIPE) 337 psignal(p, SIGPIPE); 338 } 339 cnt -= auio.uio_resid; 340 #ifdef KTRACE 341 if (KTRPOINT(p, KTR_GENIO)) 342 if (error == 0) { 343 ktrgenio(p, fd, UIO_WRITE, ktriov, cnt, 344 error); 345 FREE(ktriov, M_TEMP); 346 } 347 #endif 348 *retval = cnt; 349 done: 350 if (needfree) 351 FREE(needfree, M_IOV); 352 return (error); 353 } 354 355 int 356 netbsd32_utimes(p, v, retval) 357 struct proc *p; 358 void *v; 359 register_t *retval; 360 { 361 struct netbsd32_utimes_args /* { 362 syscallarg(const netbsd32_charp) path; 363 syscallarg(const netbsd32_timevalp_t) tptr; 364 } */ *uap = v; 365 int error; 366 struct nameidata nd; 367 368 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, (char *)(u_long)SCARG(uap, path), p); 369 if ((error = namei(&nd)) != 0) 370 return (error); 371 372 error = change_utimes32(nd.ni_vp, SCARG(uap, tptr), p); 373 374 vrele(nd.ni_vp); 375 return (error); 376 } 377 378 /* 379 * Common routine to set access and modification times given a vnode. 380 */ 381 static int 382 change_utimes32(vp, tptr, p) 383 struct vnode *vp; 384 netbsd32_timevalp_t tptr; 385 struct proc *p; 386 { 387 struct netbsd32_timeval tv32[2]; 388 struct timeval tv[2]; 389 struct vattr vattr; 390 int error; 391 392 VATTR_NULL(&vattr); 393 if (tptr == NULL) { 394 microtime(&tv[0]); 395 tv[1] = tv[0]; 396 vattr.va_vaflags |= VA_UTIMES_NULL; 397 } else { 398 error = copyin((caddr_t)(u_long)tptr, tv32, sizeof(tv32)); 399 if (error) 400 return (error); 401 netbsd32_to_timeval(&tv32[0], &tv[0]); 402 netbsd32_to_timeval(&tv32[1], &tv[1]); 403 } 404 VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); 405 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 406 vattr.va_atime.tv_sec = tv[0].tv_sec; 407 vattr.va_atime.tv_nsec = tv[0].tv_usec * 1000; 408 vattr.va_mtime.tv_sec = tv[1].tv_sec; 409 vattr.va_mtime.tv_nsec = tv[1].tv_usec * 1000; 410 error = VOP_SETATTR(vp, &vattr, p->p_ucred, p); 411 VOP_UNLOCK(vp, 0); 412 return (error); 413 } 414 415 int 416 netbsd32_statfs(p, v, retval) 417 struct proc *p; 418 void *v; 419 register_t *retval; 420 { 421 struct netbsd32_statfs_args /* { 422 syscallarg(const netbsd32_charp) path; 423 syscallarg(netbsd32_statfsp_t) buf; 424 } */ *uap = v; 425 struct mount *mp; 426 struct statfs *sp; 427 struct netbsd32_statfs s32; 428 int error; 429 struct nameidata nd; 430 431 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, (char *)(u_long)SCARG(uap, path), p); 432 if ((error = namei(&nd)) != 0) 433 return (error); 434 mp = nd.ni_vp->v_mount; 435 sp = &mp->mnt_stat; 436 vrele(nd.ni_vp); 437 if ((error = VFS_STATFS(mp, sp, p)) != 0) 438 return (error); 439 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 440 netbsd32_from_statfs(sp, &s32); 441 return (copyout(&s32, (caddr_t)(u_long)SCARG(uap, buf), sizeof(s32))); 442 } 443 444 int 445 netbsd32_fstatfs(p, v, retval) 446 struct proc *p; 447 void *v; 448 register_t *retval; 449 { 450 struct netbsd32_fstatfs_args /* { 451 syscallarg(int) fd; 452 syscallarg(netbsd32_statfsp_t) buf; 453 } */ *uap = v; 454 struct file *fp; 455 struct mount *mp; 456 struct statfs *sp; 457 struct netbsd32_statfs s32; 458 int error; 459 460 /* getvnode() will use the descriptor for us */ 461 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0) 462 return (error); 463 mp = ((struct vnode *)fp->f_data)->v_mount; 464 sp = &mp->mnt_stat; 465 if ((error = VFS_STATFS(mp, sp, p)) != 0) 466 goto out; 467 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; 468 netbsd32_from_statfs(sp, &s32); 469 error = copyout(&s32, (caddr_t)(u_long)SCARG(uap, buf), sizeof(s32)); 470 out: 471 FILE_UNUSE(fp, p); 472 return (error); 473 } 474 475 int 476 netbsd32_futimes(p, v, retval) 477 struct proc *p; 478 void *v; 479 register_t *retval; 480 { 481 struct netbsd32_futimes_args /* { 482 syscallarg(int) fd; 483 syscallarg(const netbsd32_timevalp_t) tptr; 484 } */ *uap = v; 485 int error; 486 struct file *fp; 487 488 /* getvnode() will use the descriptor for us */ 489 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0) 490 return (error); 491 492 error = change_utimes32((struct vnode *)fp->f_data, 493 SCARG(uap, tptr), p); 494 FILE_UNUSE(fp, p); 495 return (error); 496 } 497 498 int 499 netbsd32_getdents(p, v, retval) 500 struct proc *p; 501 void *v; 502 register_t *retval; 503 { 504 struct netbsd32_getdents_args /* { 505 syscallarg(int) fd; 506 syscallarg(netbsd32_charp) buf; 507 syscallarg(netbsd32_size_t) count; 508 } */ *uap = v; 509 struct file *fp; 510 int error, done; 511 512 /* getvnode() will use the descriptor for us */ 513 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0) 514 return (error); 515 if ((fp->f_flag & FREAD) == 0) { 516 error = EBADF; 517 goto out; 518 } 519 error = vn_readdir(fp, (caddr_t)(u_long)SCARG(uap, buf), UIO_USERSPACE, 520 SCARG(uap, count), &done, p, 0, 0); 521 *retval = done; 522 out: 523 FILE_UNUSE(fp, p); 524 return (error); 525 } 526 527 int 528 netbsd32_lutimes(p, v, retval) 529 struct proc *p; 530 void *v; 531 register_t *retval; 532 { 533 struct netbsd32_lutimes_args /* { 534 syscallarg(const netbsd32_charp) path; 535 syscallarg(const netbsd32_timevalp_t) tptr; 536 } */ *uap = v; 537 int error; 538 struct nameidata nd; 539 540 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, (caddr_t)(u_long)SCARG(uap, path), p); 541 if ((error = namei(&nd)) != 0) 542 return (error); 543 544 error = change_utimes32(nd.ni_vp, SCARG(uap, tptr), p); 545 546 vrele(nd.ni_vp); 547 return (error); 548 } 549 550 int 551 netbsd32___stat13(p, v, retval) 552 struct proc *p; 553 void *v; 554 register_t *retval; 555 { 556 struct netbsd32___stat13_args /* { 557 syscallarg(const netbsd32_charp) path; 558 syscallarg(netbsd32_statp_t) ub; 559 } */ *uap = v; 560 struct netbsd32_stat sb32; 561 struct stat sb; 562 int error; 563 struct nameidata nd; 564 caddr_t sg; 565 const char *path; 566 567 path = (char *)(u_long)SCARG(uap, path); 568 sg = stackgap_init(p->p_emul); 569 CHECK_ALT_EXIST(p, &sg, path); 570 571 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, path, p); 572 if ((error = namei(&nd)) != 0) 573 return (error); 574 error = vn_stat(nd.ni_vp, &sb, p); 575 vput(nd.ni_vp); 576 if (error) 577 return (error); 578 netbsd32_from___stat13(&sb, &sb32); 579 error = copyout(&sb32, (caddr_t)(u_long)SCARG(uap, ub), sizeof(sb32)); 580 return (error); 581 } 582 583 int 584 netbsd32___fstat13(p, v, retval) 585 struct proc *p; 586 void *v; 587 register_t *retval; 588 { 589 struct netbsd32___fstat13_args /* { 590 syscallarg(int) fd; 591 syscallarg(netbsd32_statp_t) sb; 592 } */ *uap = v; 593 int fd = SCARG(uap, fd); 594 struct filedesc *fdp = p->p_fd; 595 struct file *fp; 596 struct netbsd32_stat sb32; 597 struct stat ub; 598 int error = 0; 599 600 if ((u_int)fd >= fdp->fd_nfiles || 601 (fp = fdp->fd_ofiles[fd]) == NULL) 602 return (EBADF); 603 604 FILE_USE(fp); 605 error = (*fp->f_ops->fo_stat)(fp, &ub, p); 606 FILE_UNUSE(fp, p); 607 608 if (error == 0) { 609 netbsd32_from___stat13(&ub, &sb32); 610 error = copyout(&sb32, (caddr_t)(u_long)SCARG(uap, sb), sizeof(sb32)); 611 } 612 return (error); 613 } 614 615 int 616 netbsd32___lstat13(p, v, retval) 617 struct proc *p; 618 void *v; 619 register_t *retval; 620 { 621 struct netbsd32___lstat13_args /* { 622 syscallarg(const netbsd32_charp) path; 623 syscallarg(netbsd32_statp_t) ub; 624 } */ *uap = v; 625 struct netbsd32_stat sb32; 626 struct stat sb; 627 int error; 628 struct nameidata nd; 629 caddr_t sg; 630 const char *path; 631 632 path = (char *)(u_long)SCARG(uap, path); 633 sg = stackgap_init(p->p_emul); 634 CHECK_ALT_EXIST(p, &sg, path); 635 636 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, path, p); 637 if ((error = namei(&nd)) != 0) 638 return (error); 639 error = vn_stat(nd.ni_vp, &sb, p); 640 vput(nd.ni_vp); 641 if (error) 642 return (error); 643 netbsd32_from___stat13(&sb, &sb32); 644 error = copyout(&sb32, (caddr_t)(u_long)SCARG(uap, ub), sizeof(sb32)); 645 return (error); 646 } 647 648 int 649 netbsd32_preadv(p, v, retval) 650 struct proc *p; 651 void *v; 652 register_t *retval; 653 { 654 struct netbsd32_preadv_args /* { 655 syscallarg(int) fd; 656 syscallarg(const netbsd32_iovecp_t) iovp; 657 syscallarg(int) iovcnt; 658 syscallarg(int) pad; 659 syscallarg(off_t) offset; 660 } */ *uap = v; 661 struct filedesc *fdp = p->p_fd; 662 struct file *fp; 663 struct vnode *vp; 664 off_t offset; 665 int error, fd = SCARG(uap, fd); 666 667 if ((u_int)fd >= fdp->fd_nfiles || 668 (fp = fdp->fd_ofiles[fd]) == NULL || 669 (fp->f_flag & FREAD) == 0) 670 return (EBADF); 671 672 vp = (struct vnode *)fp->f_data; 673 if (fp->f_type != DTYPE_VNODE 674 || vp->v_type == VFIFO) 675 return (ESPIPE); 676 677 offset = SCARG(uap, offset); 678 679 /* 680 * XXX This works because no file systems actually 681 * XXX take any action on the seek operation. 682 */ 683 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0) 684 return (error); 685 686 return (dofilereadv32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp), SCARG(uap, iovcnt), 687 &offset, 0, retval)); 688 } 689 690 int 691 netbsd32_pwritev(p, v, retval) 692 struct proc *p; 693 void *v; 694 register_t *retval; 695 { 696 struct netbsd32_pwritev_args /* { 697 syscallarg(int) fd; 698 syscallarg(const netbsd32_iovecp_t) iovp; 699 syscallarg(int) iovcnt; 700 syscallarg(int) pad; 701 syscallarg(off_t) offset; 702 } */ *uap = v; 703 struct filedesc *fdp = p->p_fd; 704 struct file *fp; 705 struct vnode *vp; 706 off_t offset; 707 int error, fd = SCARG(uap, fd); 708 709 if ((u_int)fd >= fdp->fd_nfiles || 710 (fp = fdp->fd_ofiles[fd]) == NULL || 711 (fp->f_flag & FWRITE) == 0) 712 return (EBADF); 713 714 vp = (struct vnode *)fp->f_data; 715 if (fp->f_type != DTYPE_VNODE 716 || vp->v_type == VFIFO) 717 return (ESPIPE); 718 719 offset = SCARG(uap, offset); 720 721 /* 722 * XXX This works because no file systems actually 723 * XXX take any action on the seek operation. 724 */ 725 if ((error = VOP_SEEK(vp, fp->f_offset, offset, fp->f_cred)) != 0) 726 return (error); 727 728 return (dofilewritev32(p, fd, fp, (struct netbsd32_iovec *)(u_long)SCARG(uap, iovp), SCARG(uap, iovcnt), 729 &offset, 0, retval)); 730 } 731 732 /* 733 * Find pathname of process's current directory. 734 * 735 * Use vfs vnode-to-name reverse cache; if that fails, fall back 736 * to reading directory contents. 737 */ 738 int 739 getcwd_common __P((struct vnode *, struct vnode *, 740 char **, char *, int, int, struct proc *)); 741 742 int netbsd32___getcwd(p, v, retval) 743 struct proc *p; 744 void *v; 745 register_t *retval; 746 { 747 struct netbsd32___getcwd_args /* { 748 syscallarg(char *) bufp; 749 syscallarg(size_t) length; 750 } */ *uap = v; 751 752 int error; 753 char *path; 754 char *bp, *bend; 755 int len = (int)SCARG(uap, length); 756 int lenused; 757 758 if (len > MAXPATHLEN*4) 759 len = MAXPATHLEN*4; 760 else if (len < 2) 761 return ERANGE; 762 763 path = (char *)malloc(len, M_TEMP, M_WAITOK); 764 if (!path) 765 return ENOMEM; 766 767 bp = &path[len]; 768 bend = bp; 769 *(--bp) = '\0'; 770 771 /* 772 * 5th argument here is "max number of vnodes to traverse". 773 * Since each entry takes up at least 2 bytes in the output buffer, 774 * limit it to N/2 vnodes for an N byte buffer. 775 */ 776 #define GETCWD_CHECK_ACCESS 0x0001 777 error = getcwd_common (p->p_cwdi->cwdi_cdir, NULL, &bp, path, len/2, 778 GETCWD_CHECK_ACCESS, p); 779 780 if (error) 781 goto out; 782 lenused = bend - bp; 783 *retval = lenused; 784 /* put the result into user buffer */ 785 error = copyout(bp, (caddr_t)(u_long)SCARG(uap, bufp), lenused); 786 787 out: 788 free(path, M_TEMP); 789 return error; 790 } 791